language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 6975,
"end": 7462
} | class ____(serializers.Serializer):
id = serializers.SlugField()
header = serializers.CharField(source="get_rendered_header")
body = serializers.CharField(source="get_rendered_body")
type = serializers.CharField()
icon_classes = serializers.CharField(source="get_display_icon_classes")
class Meta:
fields = [
"id",
"header",
"body",
"type",
"icon_classes",
]
| NotificationMessageSerializer |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tfr/python/op_reg_gen.py | {
"start": 4219,
"end": 5076
} | class ____(transpiler.GenericTranspiler):
"""Transforms Python objects into TFR MLIR source code."""
def transform_ast(self, node, ctx):
gen = OpRegGenImpl(ctx)
gen.visit(node)
return gen.code_buffer
def op_reg_gen(func):
"""Parse a function and emit the TFR functions."""
op_reg_code, _ = OpRegGen().transform(func, None)
return op_reg_code
def gen_register_op(source, method_prefix=None):
"""Parse a python code and emit the TFR functions from a target class."""
mlir_funcs = [
op_reg_gen(func)
for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction)
if not method_prefix or name.startswith(method_prefix)
]
headers = r"""
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
"""
code = '\n'.join(mlir_funcs)
return headers + code + '} // namespace tensorflow\n'
| OpRegGen |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 23845,
"end": 27484
} | class ____(JsIterable[T_co], Generic[T_co, T_contra, V_co]):
"""A JavaScript generator
A JavaScript object is treated as a generator if its
:js:data:`Symbol.toStringTag` is ``"Generator"``. Most likely this will be
because it is a true :js:class:`Generator` produced by the JavaScript
runtime, but it may be a custom object trying hard to pretend to be a
generator. It should have :js:meth:`~Generator.next`,
:js:meth:`~Generator.return` and :js:meth:`~Generator.throw` methods.
"""
_js_type_flags = ["IS_GENERATOR"]
def send(self, value: T_contra) -> T_co:
"""
Resumes the execution and "sends" a value into the generator function.
The ``value`` argument becomes the result of the current yield
expression. The ``send()`` method returns the next value yielded by the
generator, or raises :py:exc:`StopIteration` if the generator exits without
yielding another value. When ``send()`` is called to start the
generator, the argument will be ignored. Unlike in Python, we cannot
detect that the generator hasn't started yet, and no error will be
thrown if the argument of a not-started generator is not ``None``.
"""
raise NotImplementedError
@overload
def throw(
self,
typ: type[BaseException],
val: BaseException | object = ...,
tb: TracebackType | None = ...,
/,
) -> T_co: ...
@overload
def throw(
self,
typ: BaseException,
val: None = ...,
tb: TracebackType | None = ...,
/,
) -> T_co: ...
@docs_argspec("(self, error: BaseException, /) -> T_co")
def throw(
self,
*args: Any,
) -> T_co:
"""
Raises an exception at the point where the generator was paused, and
returns the next value yielded by the generator function.
If the generator exits without yielding another value, a
:py:exc:`StopIteration` exception is raised. If the generator function does
not catch the passed-in exception, or raises a different exception, then
that exception propagates to the caller.
In typical use, this is called with a single exception instance similar
to the way the raise keyword is used.
For backwards compatibility, however, a second signature is supported,
following a convention from older versions of Python. The type argument
should be an exception class, and value should be an exception instance.
If the value is not provided, the type constructor is called to get an
instance. If traceback is provided, it is set on the exception,
otherwise any existing ``__traceback__`` attribute stored in value may
be cleared.
"""
raise NotImplementedError
def close(self) -> None:
"""Raises a :py:exc:`GeneratorExit` at the point where the generator
function was paused.
If the generator function then exits gracefully, is already closed, or
raises :py:exc:`GeneratorExit` (by not catching the exception), ``close()``
returns to its caller. If the generator yields a value, a
:py:exc:`RuntimeError` is raised. If the generator raises any other
exception, it is propagated to the caller. ``close()`` does nothing if
the generator has already exited due to an exception or normal exit.
"""
def __next__(self) -> T_co:
raise NotImplementedError
def __iter__(self) -> "JsGenerator[T_co, T_contra, V_co]":
raise NotImplementedError
| JsGenerator |
python | scipy__scipy | scipy/spatial/tests/test_distance.py | {
"start": 57809,
"end": 61090
} | class ____:
def setup_method(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
self.cases = [(x, y)]
def test_minkowski(self):
for x, y in self.cases:
dist1 = minkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = minkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0 + 2.0**1.5)**(2. / 3))
dist2 = minkowski(x, y, p=2)
assert_almost_equal(dist2, 5.0 ** 0.5)
dist0p25 = minkowski(x, y, p=0.25)
assert_almost_equal(dist0p25, (1.0 + 2.0 ** 0.25) ** 4)
# Check that casting input to minimum scalar type doesn't affect result
# (issue #10262). This could be extended to more test inputs with
# np.min_scalar_type(np.max(input_matrix)).
a = np.array([352, 916])
b = np.array([350, 660])
assert_equal(minkowski(a, b),
minkowski(a.astype('uint16'), b.astype('uint16')))
def test_euclidean(self):
for x, y in self.cases:
dist = weuclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = wsqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = wcosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0 / (np.sqrt(14) * np.sqrt(27)))
def test_cosine_output_dtype(self):
# Regression test for gh-19541
assert isinstance(wcorrelation([1, 1], [1, 1], centered=False), float)
assert isinstance(wcosine([1, 1], [1, 1]), float)
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0 / 3, -4.0 / 3, 5.0 - 7.0 / 3])
for x, y in self.cases:
dist = wcorrelation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym) / (norm(xm) * norm(ym)))
def test_correlation_positive(self):
# Regression test for gh-12320 (negative return value due to rounding
x = np.array([0., 0., 0., 0., 0., 0., -2., 0., 0., 0., -2., -2., -2.,
0., -2., 0., -2., 0., 0., -1., -2., 0., 1., 0., 0., -2.,
0., 0., -2., 0., -2., -2., -2., -2., -2., -2., 0.])
y = np.array([1., 1., 1., 1., 1., 1., -1., 1., 1., 1., -1., -1., -1.,
1., -1., 1., -1., 1., 1., 0., -1., 1., 2., 1., 1., -1.,
1., 1., -1., 1., -1., -1., -1., -1., -1., -1., 1.])
dist = correlation(x, y)
assert 0 <= dist <= 10 * np.finfo(np.float64).eps
@pytest.mark.filterwarnings('ignore:Casting complex')
@pytest.mark.parametrize("func", [correlation, cosine])
def test_corr_dep_complex(self, func):
x = [1+0j, 2+0j]
y = [3+0j, 4+0j]
with pytest.raises(TypeError, match="real"):
func(x, y)
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
| TestSomeDistanceFunctions |
python | PyCQA__isort | tests/unit/profiles/test_django.py | {
"start": 1682,
"end": 3687
} | class ____:
def __repr__(self):
return '<Deferred field>'
def __str__(self):
return '<Deferred field>'"""
)
def test_django_snippet_two():
django_isort_test(
'''from django.utils.version import get_version
VERSION = (3, 2, 0, 'alpha', 0)
__version__ = get_version(VERSION)
def setup(set_prefix=True):
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
Set the thread-local urlresolvers script prefix if `set_prefix` is True.
"""
from django.apps import apps
from django.conf import settings
from django.urls import set_script_prefix
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
if set_prefix:
set_script_prefix(
'/' if settings.FORCE_SCRIPT_NAME is None else settings.FORCE_SCRIPT_NAME
)
apps.populate(settings.INSTALLED_APPS)'''
)
def test_django_snippet_three():
django_isort_test(
"""import cgi
import codecs
import copy
import warnings
from io import BytesIO
from itertools import chain
from urllib.parse import quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping, ImmutableList, MultiValueDict,
)
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, limited_parse_qsl
from django.utils.regex_helper import _lazy_re_compile
from .multipartparser import parse_header
RAISE_ERROR = object()
| Deferred |
python | kamyu104__LeetCode-Solutions | Python/merge-similar-items.py | {
"start": 71,
"end": 357
} | class ____(object):
def mergeSimilarItems(self, items1, items2):
"""
:type items1: List[List[int]]
:type items2: List[List[int]]
:rtype: List[List[int]]
"""
return sorted((Counter(dict(items1))+Counter(dict(items2))).iteritems())
| Solution |
python | gevent__gevent | src/gevent/events.py | {
"start": 15170,
"end": 15569
} | class ____(_PatchAllMixin, GeventWillPatchEvent):
"""
Implementation of `IGeventWillPatchAllEvent`.
"""
#: The name of the setuptools entry point that is called when this
#: event is emitted.
ENTRY_POINT_NAME = 'gevent.plugins.monkey.will_patch_all'
def will_patch_module(self, module_name):
return self.patch_all_arguments.get(module_name)
| GeventWillPatchAllEvent |
python | networkx__networkx | networkx/utils/tests/test_mapped_queue.py | {
"start": 5344,
"end": 7354
} | class ____(TestMappedQueue):
def _make_mapped_queue(self, h):
priority_dict = {elt: elt for elt in h}
return MappedQueue(priority_dict)
def test_init(self):
d = {5: 0, 4: 1, "a": 2, 2: 3, 1: 4}
q = MappedQueue(d)
assert q.position == d
def test_ties(self):
d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4}
q = MappedQueue(d)
assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
def test_pop(self):
d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4}
q = MappedQueue(d)
assert q.pop() == _HeapElement(0, 5)
assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
def test_empty_pop(self):
q = MappedQueue()
pytest.raises(IndexError, q.pop)
def test_incomparable_ties(self):
d = {5: 0, 4: 0, "a": 0, 2: 0, 1: 0}
pytest.raises(TypeError, MappedQueue, d)
def test_push(self):
to_push = [6, 1, 4, 3, 2, 5, 0]
h_sifted = [0, 2, 1, 6, 3, 5, 4]
q = MappedQueue()
for elt in to_push:
q.push(elt, priority=elt)
assert q.heap == h_sifted
self._check_map(q)
def test_push_duplicate(self):
to_push = [2, 1, 0]
h_sifted = [0, 2, 1]
q = MappedQueue()
for elt in to_push:
inserted = q.push(elt, priority=elt)
assert inserted
assert q.heap == h_sifted
self._check_map(q)
inserted = q.push(1, priority=1)
assert not inserted
def test_update_leaf(self):
h = [0, 20, 10, 60, 30, 50, 40]
h_updated = [0, 15, 10, 60, 20, 50, 40]
q = self._make_mapped_queue(h)
removed = q.update(30, 15, priority=15)
assert q.heap == h_updated
def test_update_root(self):
h = [0, 20, 10, 60, 30, 50, 40]
h_updated = [10, 20, 35, 60, 30, 50, 40]
q = self._make_mapped_queue(h)
removed = q.update(0, 35, priority=35)
assert q.heap == h_updated
| TestMappedDict |
python | altair-viz__altair | altair/utils/_vegafusion_data.py | {
"start": 2059,
"end": 9660
} | class ____(TypedDict):
url: str
_VegaFusionReturnType = Union[_ToVegaFusionReturnUrlDict, ToValuesReturnType]
@overload
def vegafusion_data_transformer(
data: None = ..., max_rows: int = ...
) -> Callable[..., Any]: ...
@overload
def vegafusion_data_transformer(
data: DataFrameLike, max_rows: int = ...
) -> ToValuesReturnType: ...
@overload
def vegafusion_data_transformer(
data: dict | IntoDataFrame | SupportsGeoInterface, max_rows: int = ...
) -> _VegaFusionReturnType: ...
def vegafusion_data_transformer(
data: DataType | None = None, max_rows: int = 100000
) -> Callable[..., Any] | _VegaFusionReturnType:
"""VegaFusion Data Transformer."""
if data is None:
return vegafusion_data_transformer
if is_supported_by_vf(data) and not isinstance(data, SupportsGeoInterface):
table_name = f"table_{uuid.uuid4()}".replace("-", "_")
extracted_inline_tables[table_name] = data
return {"url": VEGAFUSION_PREFIX + table_name}
else:
# Use default transformer for geo interface objects
# # (e.g. a geopandas GeoDataFrame)
# Or if we don't recognize data type
return default_data_transformer(data)
def get_inline_table_names(vega_spec: dict[str, Any]) -> set[str]:
"""
Get a set of the inline datasets names in the provided Vega spec.
Inline datasets are encoded as URLs that start with the table://
prefix.
Parameters
----------
vega_spec: dict
A Vega specification dict
Returns
-------
set of str
Set of the names of the inline datasets that are referenced
in the specification.
Examples
--------
>>> spec = {
... "data": [
... {"name": "foo", "url": "https://path/to/file.csv"},
... {"name": "bar", "url": "vegafusion+dataset://inline_dataset_123"},
... ]
... }
>>> get_inline_table_names(spec)
{'inline_dataset_123'}
"""
table_names = set()
# Process datasets
for data in vega_spec.get("data", []):
url = data.get("url", "")
if url.startswith(VEGAFUSION_PREFIX):
name = url[len(VEGAFUSION_PREFIX) :]
table_names.add(name)
# Recursively process child marks, which may have their own datasets
for mark in vega_spec.get("marks", []):
table_names.update(get_inline_table_names(mark))
return table_names
def get_inline_tables(vega_spec: dict[str, Any]) -> dict[str, DataFrameLike]:
"""
Get the inline tables referenced by a Vega specification.
Note: This function should only be called on a Vega spec that corresponds
to a chart that was processed by the vegafusion_data_transformer.
Furthermore, this function may only be called once per spec because
the returned dataframes are deleted from internal storage.
Parameters
----------
vega_spec: dict
A Vega specification dict
Returns
-------
dict from str to dataframe
dict from inline dataset name to dataframe object
"""
inline_names = get_inline_table_names(vega_spec)
# exclude named dataset that was provided by the user,
# or dataframes that have been deleted.
table_names = inline_names.intersection(extracted_inline_tables)
return {k: extracted_inline_tables.pop(k) for k in table_names}
def compile_to_vegafusion_chart_state(
vegalite_spec: dict[str, Any], local_tz: str
) -> ChartState:
"""
Compile a Vega-Lite spec to a VegaFusion ChartState.
Note: This function should only be called on a Vega-Lite spec
that was generated with the "vegafusion" data transformer enabled.
In particular, this spec may contain references to extract datasets
using table:// prefixed URLs.
Parameters
----------
vegalite_spec: dict
A Vega-Lite spec that was generated from an Altair chart with
the "vegafusion" data transformer enabled
local_tz: str
Local timezone name (e.g. 'America/New_York')
Returns
-------
ChartState
A VegaFusion ChartState object
"""
# Local import to avoid circular ImportError
from altair import data_transformers, vegalite_compilers
vf = import_vegafusion()
# Compile Vega-Lite spec to Vega
compiler = vegalite_compilers.get()
if compiler is None:
msg = "No active vega-lite compiler plugin found"
raise ValueError(msg)
vega_spec = compiler(vegalite_spec)
# Retrieve dict of inline tables referenced by the spec
inline_tables = get_inline_tables(vega_spec)
# Pre-evaluate transforms in vega spec with vegafusion
row_limit = data_transformers.options.get("max_rows", None)
chart_state = vf.runtime.new_chart_state(
vega_spec,
local_tz=local_tz,
inline_datasets=inline_tables,
row_limit=row_limit,
)
# Check from row limit warning and convert to MaxRowsError
handle_row_limit_exceeded(row_limit, chart_state.get_warnings())
return chart_state
def compile_with_vegafusion(vegalite_spec: dict[str, Any]) -> dict[str, Any]:
"""
Compile a Vega-Lite spec to Vega and pre-transform with VegaFusion.
Note: This function should only be called on a Vega-Lite spec
that was generated with the "vegafusion" data transformer enabled.
In particular, this spec may contain references to extract datasets
using table:// prefixed URLs.
Parameters
----------
vegalite_spec: dict
A Vega-Lite spec that was generated from an Altair chart with
the "vegafusion" data transformer enabled
Returns
-------
dict
A Vega spec that has been pre-transformed by VegaFusion
"""
# Local import to avoid circular ImportError
from altair import data_transformers, vegalite_compilers
vf = import_vegafusion()
# Compile Vega-Lite spec to Vega
compiler = vegalite_compilers.get()
if compiler is None:
msg = "No active vega-lite compiler plugin found"
raise ValueError(msg)
vega_spec = compiler(vegalite_spec)
# Retrieve dict of inline tables referenced by the spec
inline_tables = get_inline_tables(vega_spec)
# Pre-evaluate transforms in vega spec with vegafusion
row_limit = data_transformers.options.get("max_rows", None)
transformed_vega_spec, warnings = vf.runtime.pre_transform_spec(
vega_spec,
vf.get_local_tz(),
inline_datasets=inline_tables,
row_limit=row_limit,
)
# Check from row limit warning and convert to MaxRowsError
handle_row_limit_exceeded(row_limit, warnings)
return transformed_vega_spec
def handle_row_limit_exceeded(row_limit: int | None, warnings: list):
for warning in warnings:
if warning.get("type") == "RowLimitExceeded":
msg = (
"The number of dataset rows after filtering and aggregation exceeds\n"
f"the current limit of {row_limit}. Try adding an aggregation to reduce\n"
"the size of the dataset that must be loaded into the browser. Or, disable\n"
"the limit by calling alt.data_transformers.disable_max_rows(). Note that\n"
"disabling this limit may cause the browser to freeze or crash."
)
raise MaxRowsError(msg)
def using_vegafusion() -> bool:
"""Check whether the vegafusion data transformer is enabled."""
# Local import to avoid circular ImportError
from altair import data_transformers
return data_transformers.active == "vegafusion"
| _ToVegaFusionReturnUrlDict |
python | bokeh__bokeh | src/bokeh/protocol/messages/pull_doc_reply.py | {
"start": 1591,
"end": 1635
} | class ____(TypedDict):
doc: DocJson
| PullDoc |
python | ray-project__ray | release/ray_release/tests/test_test.py | {
"start": 787,
"end": 17740
} | class ____(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_name(self) -> str:
return self.get("name", "")
def get_test_results(self, limit: int) -> List[TestResult]:
return self.get("test_results", [])
def is_high_impact(self) -> bool:
return self.get(Test.KEY_IS_HIGH_IMPACT, "false") == "true"
def _stub_test(val: dict) -> Test:
test = Test(
{
"name": "test",
"cluster": {},
}
)
test.update(val)
return test
def _stub_test_result(
status: ResultStatus = ResultStatus.SUCCESS, rayci_step_id="123", commit="456"
) -> TestResult:
return TestResult(
status=status.value,
commit=commit,
branch="master",
url="url",
timestamp=0,
pull_request="1",
rayci_step_id=rayci_step_id,
duration_ms=5.0,
)
def test_convert_env_list_to_dict():
with mock.patch.dict(os.environ, {"ENV": "env"}):
assert _convert_env_list_to_dict(["a=b", "c=d=e", "ENV"]) == {
"a": "b",
"c": "d=e",
"ENV": "env",
}
def test_get_python_version():
assert _stub_test({}).get_python_version() == "3.9"
assert _stub_test({"python": "3.11"}).get_python_version() == "3.11"
def test_get_ray_image():
os.environ["RAYCI_BUILD_ID"] = "a1b2c3d4"
# These images are NOT saved on Docker Hub, but on private ECR.
assert (
_stub_test(
{
"python": "3.9",
"cluster": {"byod": {}},
}
).get_ray_image()
== "rayproject/ray:a1b2c3d4-py39-cpu"
)
assert (
_stub_test(
{
"python": "3.9",
"cluster": {
"byod": {
"type": "gpu",
}
},
}
).get_ray_image()
== "rayproject/ray-ml:a1b2c3d4-py39-gpu"
)
assert (
_stub_test(
{
"python": "3.11",
"cluster": {
"byod": {
"type": "llm-cu124",
}
},
}
).get_ray_image()
== "rayproject/ray-llm:a1b2c3d4-py311-cu124"
)
# When RAY_IMAGE_TAG is set, we use the RAYCI_BUILD_ID.
with mock.patch.dict(os.environ, {"RAY_IMAGE_TAG": "my_tag"}):
assert (
_stub_test({"cluster": {"byod": {}}}).get_ray_image()
== "rayproject/ray:my_tag"
)
with mock.patch.dict(os.environ, {"BUILDKITE_BRANCH": "releases/1.0.0"}):
# Even on release branches, we also use the RAYCI_BUILD_ID.
assert (
_stub_test({"cluster": {"byod": {}}}).get_ray_image()
== "rayproject/ray:a1b2c3d4-py39-cpu"
)
with mock.patch.dict(os.environ, {"BUILDKITE_PULL_REQUEST": "123"}):
assert (
_stub_test({"cluster": {"byod": {}}}).get_ray_image()
== "rayproject/ray:a1b2c3d4-py39-cpu"
)
# Unless RAY_IMAGE_TAG is set, we use the RAYCI_BUILD_ID.
with mock.patch.dict(os.environ, {"RAY_IMAGE_TAG": "my_tag"}):
assert (
_stub_test({"cluster": {"byod": {}}}).get_ray_image()
== "rayproject/ray:my_tag"
)
def test_get_byod_runtime_env():
test = _stub_test(
{
"python": "3.11",
"cluster": {
"byod": {
"runtime_env": ["a=b"],
},
},
}
)
runtime_env = test.get_byod_runtime_env()
assert runtime_env.get("RAY_BACKEND_LOG_JSON") == "1"
assert runtime_env.get("a") == "b"
def test_get_anyscale_byod_image():
os.environ["RAYCI_BUILD_ID"] = "a1b2c3d4"
assert (
_stub_test({"python": "3.7", "cluster": {"byod": {}}}).get_anyscale_byod_image()
== f"{get_global_config()['byod_ecr']}/{DATAPLANE_ECR_REPO}:a1b2c3d4-py37-cpu"
)
assert _stub_test(
{
"python": "3.8",
"cluster": {
"byod": {
"type": "gpu",
}
},
}
).get_anyscale_byod_image() == (
f"{get_global_config()['byod_ecr']}/"
f"{DATAPLANE_ECR_ML_REPO}:a1b2c3d4-py38-gpu"
)
assert _stub_test(
{
"python": "3.8",
"cluster": {
"byod": {
"type": "gpu",
"post_build_script": "foo.sh",
}
},
}
).get_anyscale_byod_image() == (
f"{get_global_config()['byod_ecr']}"
f"/{DATAPLANE_ECR_ML_REPO}:a1b2c3d4-py38-gpu-"
"5f311914c59730d72cee8e2a015c5d6eedf6523bfbf5abe2494e0cb85a5a7b70"
)
def test_get_anyscale_byod_image_ray_version():
os.environ["RAYCI_BUILD_ID"] = "a1b2c3d4"
assert (
_stub_test({"python": "3.7", "cluster": {"byod": {}}}).get_anyscale_byod_image()
== f"{get_global_config()['byod_ecr']}/{DATAPLANE_ECR_REPO}:a1b2c3d4-py37-cpu"
)
assert _stub_test(
{
"python": "3.8",
"cluster": {
"ray_version": "2.50.0",
"byod": {
"type": "gpu",
},
},
}
).get_anyscale_byod_image() == (f"{ANYSCALE_RAY_IMAGE_PREFIX}:2.50.0-py38-cu121")
assert _stub_test(
{
"python": "3.8",
"cluster": {
"ray_version": "2.50.0",
"byod": {
"type": "gpu",
"post_build_script": "foo.sh",
},
},
}
).get_anyscale_byod_image() == (
f"{get_global_config()['byod_ecr']}"
f"/{DATAPLANE_ECR_ML_REPO}:a1b2c3d4-py38-gpu-"
"5f311914c59730d72cee8e2a015c5d6eedf6523bfbf5abe2494e0cb85a5a7b70"
"-2.50.0"
)
@patch("github.Repository")
@patch("github.Issue")
def test_is_jailed_with_open_issue(mock_repo, mock_issue) -> None:
assert not Test(state="passing").is_jailed_with_open_issue(mock_repo)
mock_repo.get_issue.return_value = mock_issue
mock_issue.state = "open"
assert Test(state="jailed", github_issue_number="1").is_jailed_with_open_issue(
mock_repo
)
mock_issue.state = "closed"
assert not Test(state="jailed", github_issue_number="1").is_jailed_with_open_issue(
mock_repo
)
def test_is_stable() -> None:
assert Test().is_stable()
assert Test(stable=True).is_stable()
assert not Test(stable=False).is_stable()
@patch.dict(
os.environ,
{
"BUILDKITE_BRANCH": "food",
"BUILDKITE_PULL_REQUEST": "1",
"RAYCI_STEP_ID": "g4_s5",
},
)
def test_result_from_bazel_event() -> None:
result = TestResult.from_bazel_event(
{
"testResult": {"status": "PASSED", "testAttemptDurationMillis": "5"},
}
)
assert result.is_passing()
assert result.branch == "food"
assert result.pull_request == "1"
assert result.rayci_step_id == "g4_s5"
assert result.duration_ms == 5
result = TestResult.from_bazel_event(
{
"testResult": {"status": "FAILED"},
}
)
assert result.is_failing()
assert result.duration_ms is None
def test_from_bazel_event() -> None:
test = Test.from_bazel_event(
{
"id": {"testResult": {"label": "//ray/ci:test"}},
},
"ci",
)
assert test.get_name() == f"{platform.system().lower()}://ray/ci:test"
assert test.get_oncall() == "ci"
@patch.object(boto3, "client")
@patch.dict(
os.environ,
{"BUILDKITE_PIPELINE_ID": get_global_config()["ci_pipeline_postmerge"][0]},
)
def test_update_from_s3(mock_client) -> None:
mock_object = mock.Mock()
mock_object.return_value.get.return_value.read.return_value = json.dumps(
{
"state": "failing",
"team": "core",
"github_issue_number": "1234",
}
).encode("utf-8")
mock_client.return_value.get_object = mock_object
test = _stub_test({"team": "ci"})
test.update_from_s3()
assert test.get_state() == TestState.FAILING
assert test.get_oncall() == "ci"
assert test["github_issue_number"] == "1234"
@patch("ray_release.test.Test._get_s3_name")
@patch("ray_release.test.Test.gen_from_s3")
def test_gen_from_name(mock_gen_from_s3, _) -> None:
mock_gen_from_s3.return_value = [
_stub_test({"name": "a"}),
_stub_test({"name": "good"}),
_stub_test({"name": "test"}),
]
assert Test.gen_from_name("good").get_name() == "good"
def test_get_test_type() -> None:
assert (
_stub_test({"name": f"{LINUX_TEST_PREFIX}_test"}).get_test_type()
== TestType.LINUX_TEST
)
assert (
_stub_test({"name": f"{MACOS_TEST_PREFIX}_test"}).get_test_type()
== TestType.MACOS_TEST
)
assert (
_stub_test({"name": f"{WINDOWS_TEST_PREFIX}_test"}).get_test_type()
== TestType.WINDOWS_TEST
)
assert _stub_test({"name": "release_test"}).get_test_type() == TestType.RELEASE_TEST
def test_get_bisect_daily_rate_limit() -> None:
assert (
_stub_test({"name": f"{MACOS_TEST_PREFIX}_test"}).get_bisect_daily_rate_limit()
) == MACOS_BISECT_DAILY_RATE_LIMIT
def test_get_s3_name() -> None:
assert Test._get_s3_name("linux://python/ray/test") == "linux:__python_ray_test"
def test_is_high_impact() -> None:
assert _stub_test(
{"name": "test", Test.KEY_IS_HIGH_IMPACT: "true"}
).is_high_impact()
assert not _stub_test(
{"name": "test", Test.KEY_IS_HIGH_IMPACT: "false"}
).is_high_impact()
assert not _stub_test({"name": "test"}).is_high_impact()
@patch("ray_release.test.Test._gen_test_result")
def test_gen_test_results(mock_gen_test_result) -> None:
def _mock_gen_test_result(
client: aioboto3.Session.client,
bucket: str,
key: str,
) -> TestResult:
return (
_stub_test_result(ResultStatus.SUCCESS)
if key == "good"
else _stub_test_result(ResultStatus.ERROR)
)
mock_gen_test_result.side_effect = AsyncMock(side_effect=_mock_gen_test_result)
results = asyncio.run(
_stub_test({})._gen_test_results(
bucket="bucket",
keys=["good", "bad", "bad", "good"],
)
)
assert [result.status for result in results] == [
ResultStatus.SUCCESS.value,
ResultStatus.ERROR.value,
ResultStatus.ERROR.value,
ResultStatus.SUCCESS.value,
]
@patch("ray_release.test.Test.gen_microcheck_test")
@patch("ray_release.test.Test.gen_from_name")
def gen_microcheck_step_ids(mock_gen_from_name, mock_gen_microcheck_test) -> None:
core_test = MockTest(
{
"name": "linux://core_test",
Test.KEY_IS_HIGH_IMPACT: "false",
"test_results": [
_stub_test_result(rayci_step_id="corebuild", commit="123"),
],
}
)
data_test_01 = MockTest(
{
"name": "linux://data_test_01",
Test.KEY_IS_HIGH_IMPACT: "true",
"test_results": [
_stub_test_result(rayci_step_id="databuild", commit="123"),
],
}
)
data_test_02 = MockTest(
{
"name": "linux://data_test_02",
Test.KEY_IS_HIGH_IMPACT: "true",
"test_results": [
_stub_test_result(rayci_step_id="data15build", commit="123"),
_stub_test_result(rayci_step_id="databuild", commit="123"),
_stub_test_result(rayci_step_id="databuild", commit="456"),
],
}
)
all_tests = [core_test, data_test_01, data_test_02]
mock_gen_microcheck_test.return_value = [test.get_target() for test in all_tests]
mock_gen_from_name.side_effect = lambda x: [
test for test in all_tests if test.get_name() == x
][0]
assert Test.gen_microcheck_step_ids("linux", "") == {"databuild"}
def test_get_test_target():
input_to_output = {
"linux://test": "//test",
"darwin://test": "//test",
"windows://test": "//test",
"test": "test",
}
for input, output in input_to_output.items():
assert Test({"name": input}).get_target() == output
@mock.patch.dict(
os.environ,
{"BUILDKITE_PULL_REQUEST_BASE_BRANCH": "base", "BUILDKITE_COMMIT": "commit"},
)
@mock.patch("subprocess.check_call")
@mock.patch("subprocess.check_output")
def test_get_changed_files(mock_check_output, mock_check_call) -> None:
mock_check_output.return_value = b"file1\nfile2\n"
assert Test._get_changed_files("") == {"file1", "file2"}
@mock.patch("ray_release.test.Test._get_test_targets_per_file")
@mock.patch("ray_release.test.Test._get_changed_files")
def test_get_changed_tests(
mock_get_changed_files, mock_get_test_targets_per_file
) -> None:
mock_get_changed_files.return_value = {"test_src", "build_src"}
mock_get_test_targets_per_file.side_effect = (
lambda x, _: {"//t1", "//t2"} if x == "test_src" else {}
)
assert Test._get_changed_tests("") == {"//t1", "//t2"}
@mock.patch.dict(
os.environ,
{"BUILDKITE_PULL_REQUEST_BASE_BRANCH": "base", "BUILDKITE_COMMIT": "commit"},
)
@mock.patch("subprocess.check_call")
@mock.patch("subprocess.check_output")
def test_get_human_specified_tests(mock_check_output, mock_check_call) -> None:
mock_check_output.return_value = b"hi\n@microcheck //test01 //test02\nthere"
assert Test._get_human_specified_tests("") == {"//test01", "//test02"}
def test_gen_microcheck_tests() -> None:
test_harness = [
{
"input": [],
"changed_tests": set(),
"human_tests": set(),
"output": set(),
},
{
"input": [
_stub_test(
{
"name": "linux://core_good",
"team": "core",
Test.KEY_IS_HIGH_IMPACT: "true",
}
),
_stub_test(
{
"name": "linux://serve_good",
"team": "serve",
Test.KEY_IS_HIGH_IMPACT: "true",
}
),
],
"changed_tests": {"//core_new"},
"human_tests": {"//human_test"},
"output": {
"//core_good",
"//core_new",
"//human_test",
},
},
]
for test in test_harness:
with mock.patch(
"ray_release.test.Test.gen_from_s3",
return_value=test["input"],
), mock.patch(
"ray_release.test.Test._get_changed_tests",
return_value=test["changed_tests"],
), mock.patch(
"ray_release.test.Test._get_human_specified_tests",
return_value=test["human_tests"],
):
assert (
Test.gen_microcheck_tests(
prefix="linux",
bazel_workspace_dir="",
team="core",
)
== test["output"]
)
@patch("ray_release.test.Test.get_byod_base_image_tag")
def test_get_byod_image_tag(mock_get_byod_base_image_tag):
test = _stub_test(
{
"name": "linux://test",
"cluster": {
"byod": {
"post_build_script": "test_post_build_script.sh",
"python_depset": "test_python_depset.lock",
},
},
}
)
mock_get_byod_base_image_tag.return_value = "test-image"
custom_info = {
"post_build_script": "test_post_build_script.sh",
"python_depset": "test_python_depset.lock",
}
hash_value = dict_hash(custom_info)
assert test.get_byod_image_tag() == f"test-image-{hash_value}"
@patch("ray_release.test.Test.get_byod_base_image_tag")
def test_get_byod_image_tag_ray_version(mock_get_byod_base_image_tag):
test = _stub_test(
{
"name": "linux://test",
"cluster": {
"ray_version": "2.50.0",
"byod": {
"post_build_script": "test_post_build_script.sh",
"python_depset": "test_python_depset.lock",
},
},
}
)
mock_get_byod_base_image_tag.return_value = "test-image"
custom_info = {
"post_build_script": "test_post_build_script.sh",
"python_depset": "test_python_depset.lock",
}
hash_value = dict_hash(custom_info)
assert test.get_byod_image_tag() == f"test-image-{hash_value}-2.50.0"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| MockTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/annotation.py | {
"start": 6911,
"end": 18370
} | class ____(SupportsAnnotations):
"""clones a SupportsAnnotations and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__eq__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
.. note:: The rationale for Annotated producing a brand new class,
rather than placing the functionality directly within ClauseElement,
is **performance**. The __hash__() method is absent on plain
ClauseElement which leads to significantly reduced function call
overhead, as the use of sets and dictionaries against ClauseElement
objects is prevalent, but most are not "annotated".
"""
_is_column_operators = False
@classmethod
def _as_annotated_instance(
cls, element: SupportsWrappingAnnotations, values: _AnnotationDict
) -> Annotated:
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = _new_annotation_type(element.__class__, cls)
return cls(element, values)
_annotations: util.immutabledict[str, Any]
__element: SupportsWrappingAnnotations
_hash: int
def __new__(cls: Type[Self], *args: Any) -> Self:
return object.__new__(cls)
def __init__(
self, element: SupportsWrappingAnnotations, values: _AnnotationDict
):
self.__dict__ = element.__dict__.copy()
self.__dict__.pop("_annotations_cache_key", None)
self.__dict__.pop("_generate_cache_key", None)
self.__element = element
self._annotations = util.immutabledict(values)
self._hash = hash(element)
def _annotate(self, values: _AnnotationDict) -> Self:
_values = self._annotations.union(values)
new = self._with_annotations(_values)
return new
def _with_annotations(self, values: _AnnotationDict) -> Self:
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone.__dict__.pop("_annotations_cache_key", None)
clone.__dict__.pop("_generate_cache_key", None)
clone._annotations = util.immutabledict(values)
return clone
@overload
def _deannotate(
self,
values: Literal[None] = ...,
clone: bool = ...,
) -> Self: ...
@overload
def _deannotate(
self,
values: Sequence[str] = ...,
clone: bool = ...,
) -> Annotated: ...
def _deannotate(
self,
values: Optional[Sequence[str]] = None,
clone: bool = True,
) -> SupportsAnnotations:
if values is None:
return self.__element
else:
return self._with_annotations(
util.immutabledict(
{
key: value
for key, value in self._annotations.items()
if key not in values
}
)
)
if not typing.TYPE_CHECKING:
# manually proxy some methods that need extra attention
def _compiler_dispatch(self, visitor: Any, **kw: Any) -> Any:
return self.__element.__class__._compiler_dispatch(
self, visitor, **kw
)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self, **kw: Any) -> Self:
clone = self.__element._clone(**kw)
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return self.__class__(clone, self._annotations)
def __reduce__(self) -> Tuple[Type[Annotated], Tuple[Any, ...]]:
return self.__class__, (self.__element, self._annotations)
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: Any) -> bool:
if self._is_column_operators:
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
@util.ro_non_memoized_property
def entity_namespace(self) -> _EntityNamespace:
if "entity_namespace" in self._annotations:
return cast(
SupportsWrappingAnnotations,
self._annotations["entity_namespace"],
).entity_namespace
else:
return self.__element.entity_namespace
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable; additionally, other
# decisions can be made up front about the type of object being annotated
# just once per class rather than per-instance.
annotated_classes: Dict[Type[SupportsWrappingAnnotations], Type[Annotated]] = (
{}
)
_SA = TypeVar("_SA", bound="SupportsAnnotations")
def _safe_annotate(to_annotate: _SA, annotations: _AnnotationDict) -> _SA:
try:
_annotate = to_annotate._annotate
except AttributeError:
# skip objects that don't actually have an `_annotate`
# attribute, namely QueryableAttribute inside of a join
# condition
return to_annotate
else:
return _annotate(annotations)
def _deep_annotate(
element: _SA,
annotations: _AnnotationDict,
exclude: Optional[Sequence[SupportsAnnotations]] = None,
*,
detect_subquery_cols: bool = False,
ind_cols_on_fromclause: bool = False,
annotate_callable: Optional[
Callable[[SupportsAnnotations, _AnnotationDict], SupportsAnnotations]
] = None,
) -> _SA:
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
# annotated objects hack the __hash__() method so if we want to
# uniquely process them we have to use id()
cloned_ids: Dict[int, SupportsAnnotations] = {}
def clone(elem: SupportsAnnotations, **kw: Any) -> SupportsAnnotations:
# ind_cols_on_fromclause means make sure an AnnotatedFromClause
# has its own .c collection independent of that which its proxying.
# this is used specifically by orm.LoaderCriteriaOption to break
# a reference cycle that it's otherwise prone to building,
# see test_relationship_criteria->
# test_loader_criteria_subquery_w_same_entity. logic here was
# changed for #8796 and made explicit; previously it occurred
# by accident
kw["detect_subquery_cols"] = detect_subquery_cols
id_ = id(elem)
if id_ in cloned_ids:
return cloned_ids[id_]
if (
exclude
and hasattr(elem, "proxy_set")
and elem.proxy_set.intersection(exclude)
):
newelem = elem._clone(clone=clone, **kw)
elif annotations != elem._annotations:
if detect_subquery_cols and elem._is_immutable:
to_annotate = elem._clone(clone=clone, **kw)
else:
to_annotate = elem
if annotate_callable:
newelem = annotate_callable(to_annotate, annotations)
else:
newelem = _safe_annotate(to_annotate, annotations)
else:
newelem = elem
newelem._copy_internals(
clone=clone,
ind_cols_on_fromclause=ind_cols_on_fromclause,
_annotations_traversal=True,
)
cloned_ids[id_] = newelem
return newelem
if element is not None:
element = cast(_SA, clone(element))
clone = None # type: ignore # remove gc cycles
return element
@overload
def _deep_deannotate(
element: Literal[None], values: Optional[Sequence[str]] = None
) -> Literal[None]: ...
@overload
def _deep_deannotate(
element: _SA, values: Optional[Sequence[str]] = None
) -> _SA: ...
def _deep_deannotate(
element: Optional[_SA], values: Optional[Sequence[str]] = None
) -> Optional[_SA]:
"""Deep copy the given element, removing annotations."""
cloned: Dict[Any, SupportsAnnotations] = {}
def clone(elem: SupportsAnnotations, **kw: Any) -> SupportsAnnotations:
key: Any
if values:
key = id(elem)
else:
key = elem
if key not in cloned:
newelem = elem._deannotate(values=values, clone=True)
newelem._copy_internals(clone=clone, _annotations_traversal=True)
cloned[key] = newelem
return newelem
else:
return cloned[key]
if element is not None:
element = cast(_SA, clone(element))
clone = None # type: ignore # remove gc cycles
return element
def _shallow_annotate(element: _SA, annotations: _AnnotationDict) -> _SA:
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "don't traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals(_annotations_traversal=True)
return element
def _new_annotation_type(
cls: Type[SupportsWrappingAnnotations], base_cls: Type[Annotated]
) -> Type[Annotated]:
"""Generates a new class that subclasses Annotated and proxies a given
element type.
"""
if issubclass(cls, Annotated):
return cls
elif cls in annotated_classes:
return annotated_classes[cls]
for super_ in cls.__mro__:
# check if an Annotated subclass more specific than
# the given base_cls is already registered, such
# as AnnotatedColumnElement.
if super_ in annotated_classes:
base_cls = annotated_classes[super_]
break
annotated_classes[cls] = anno_cls = cast(
Type[Annotated],
type("Annotated%s" % cls.__name__, (base_cls, cls), {}),
)
globals()["Annotated%s" % cls.__name__] = anno_cls
if "_traverse_internals" in cls.__dict__:
anno_cls._traverse_internals = list(cls._traverse_internals) + [
("_annotations", InternalTraversal.dp_annotations_key)
]
elif cls.__dict__.get("inherit_cache", False):
anno_cls._traverse_internals = list(cls._traverse_internals) + [
("_annotations", InternalTraversal.dp_annotations_key)
]
# some classes include this even if they have traverse_internals
# e.g. BindParameter, add it if present.
if cls.__dict__.get("inherit_cache", False):
anno_cls.inherit_cache = True # type: ignore
elif "inherit_cache" in cls.__dict__:
anno_cls.inherit_cache = cls.__dict__["inherit_cache"] # type: ignore
anno_cls._is_column_operators = issubclass(cls, operators.ColumnOperators)
return anno_cls
def _prepare_annotations(
target_hierarchy: Type[SupportsWrappingAnnotations],
base_cls: Type[Annotated],
) -> None:
for cls in util.walk_subclasses(target_hierarchy):
_new_annotation_type(cls, base_cls)
| Annotated |
python | getsentry__sentry | src/sentry/overwatch_webhooks/overwatch_consent/impl.py | {
"start": 563,
"end": 1930
} | class ____(OverwatchConsentService):
def get_organization_consent_status(
self, *, organization_ids: list[int], region_name: str
) -> dict[int, RpcOrganizationConsentStatus]:
"""
Get consent status for multiple organizations in a region.
Consent is determined by the combination of 2 different organization option values:
- sentry:hide_ai_features should be False (default)
- sentry:enable_pr_review_test_generation should be True (default is False)
"""
organizations = Organization.objects.filter(id__in=organization_ids)
result: dict[int, RpcOrganizationConsentStatus] = {}
for org in organizations:
hide_ai_features = bool(
org.get_option("sentry:hide_ai_features", HIDE_AI_FEATURES_DEFAULT)
)
pr_review_test_generation_enabled = bool(
org.get_option(
"sentry:enable_pr_review_test_generation",
ENABLE_PR_REVIEW_TEST_GENERATION_DEFAULT,
)
)
has_consent = not hide_ai_features and pr_review_test_generation_enabled
result[org.id] = RpcOrganizationConsentStatus(
organization_id=org.id,
has_consent=has_consent,
)
return result
| DatabaseBackedOverwatchConsentService |
python | tornadoweb__tornado | tornado/httpserver.py | {
"start": 1510,
"end": 10551
} | class ____(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate):
r"""A non-blocking, single-threaded HTTP server.
A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
or, for backwards compatibility, a callback that takes an
`.HTTPServerRequest` as an argument. The delegate is usually a
`tornado.web.Application`.
`HTTPServer` supports keep-alive connections by default
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
requests ``Connection: keep-alive``).
If ``xheaders`` is ``True``, we support the
``X-Real-Ip``/``X-Forwarded-For`` and
``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
remote IP and URI scheme/protocol for all requests. These headers
are useful when running Tornado behind a reverse proxy or load
balancer. The ``protocol`` argument can also be set to ``https``
if Tornado is run behind an SSL-decoding proxy that does not set one of
the supported ``xheaders``.
By default, when parsing the ``X-Forwarded-For`` header, Tornado will
select the last (i.e., the closest) address on the list of hosts as the
remote host IP address. To select the next server in the chain, a list of
trusted downstream hosts may be passed as the ``trusted_downstream``
argument. These hosts will be skipped when parsing the ``X-Forwarded-For``
header.
To make this server serve SSL traffic, send the ``ssl_options`` keyword
argument with an `ssl.SSLContext` object. For compatibility with older
versions of Python ``ssl_options`` may also be a dictionary of keyword
arguments for the `ssl.SSLContext.wrap_socket` method.::
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
os.path.join(data_dir, "mydomain.key"))
HTTPServer(application, ssl_options=ssl_ctx)
`HTTPServer` initialization follows one of three patterns (the
initialization methods are defined on `tornado.tcpserver.TCPServer`):
1. `~tornado.tcpserver.TCPServer.listen`: single-process::
async def main():
server = HTTPServer()
server.listen(8888)
await asyncio.Event().wait()
asyncio.run(main())
In many cases, `tornado.web.Application.listen` can be used to avoid
the need to explicitly create the `HTTPServer`.
While this example does not create multiple processes on its own, when
the ``reuse_port=True`` argument is passed to ``listen()`` you can run
the program multiple times to create a multi-process service.
2. `~tornado.tcpserver.TCPServer.add_sockets`: multi-process::
sockets = bind_sockets(8888)
tornado.process.fork_processes(0)
async def post_fork_main():
server = HTTPServer()
server.add_sockets(sockets)
await asyncio.Event().wait()
asyncio.run(post_fork_main())
The ``add_sockets`` interface is more complicated, but it can be used with
`tornado.process.fork_processes` to run a multi-process service with all
worker processes forked from a single parent. ``add_sockets`` can also be
used in single-process servers if you want to create your listening
sockets in some way other than `~tornado.netutil.bind_sockets`.
Note that when using this pattern, nothing that touches the event loop
can be run before ``fork_processes``.
3. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
simple **deprecated** multi-process::
server = HTTPServer()
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.current().start()
This pattern is deprecated because it requires interfaces in the
`asyncio` module that have been deprecated since Python 3.10. Support for
creating multiple processes in the ``start`` method will be removed in a
future version of Tornado.
.. versionchanged:: 4.0
Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
arguments. Added support for `.HTTPServerConnectionDelegate`
instances as ``request_callback``.
.. versionchanged:: 4.1
`.HTTPServerConnectionDelegate.start_request` is now called with
two arguments ``(server_conn, request_conn)`` (in accordance with the
documentation) instead of one ``(request_conn)``.
.. versionchanged:: 4.2
`HTTPServer` is now a subclass of `tornado.util.Configurable`.
.. versionchanged:: 4.5
Added the ``trusted_downstream`` argument.
.. versionchanged:: 5.0
The ``io_loop`` argument has been removed.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# Ignore args to __init__; real initialization belongs in
# initialize since we're Configurable. (there's something
# weird in initialization order between this class,
# Configurable, and TCPServer so we can't leave __init__ out
# completely)
pass
def initialize(
self,
request_callback: Union[
httputil.HTTPServerConnectionDelegate,
Callable[[httputil.HTTPServerRequest], None],
],
no_keep_alive: bool = False,
xheaders: bool = False,
ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
protocol: Optional[str] = None,
decompress_request: bool = False,
chunk_size: Optional[int] = None,
max_header_size: Optional[int] = None,
idle_connection_timeout: Optional[float] = None,
body_timeout: Optional[float] = None,
max_body_size: Optional[int] = None,
max_buffer_size: Optional[int] = None,
trusted_downstream: Optional[List[str]] = None,
) -> None:
# This method's signature is not extracted with autodoc
# because we want its arguments to appear on the class
# constructor. When changing this signature, also update the
# copy in httpserver.rst.
self.request_callback = request_callback
self.xheaders = xheaders
self.protocol = protocol
self.conn_params = HTTP1ConnectionParameters(
decompress=decompress_request,
chunk_size=chunk_size,
max_header_size=max_header_size,
header_timeout=idle_connection_timeout or 3600,
max_body_size=max_body_size,
body_timeout=body_timeout,
no_keep_alive=no_keep_alive,
)
TCPServer.__init__(
self,
ssl_options=ssl_options,
max_buffer_size=max_buffer_size,
read_chunk_size=chunk_size,
)
self._connections = set() # type: Set[HTTP1ServerConnection]
self.trusted_downstream = trusted_downstream
@classmethod
def configurable_base(cls) -> Type[Configurable]:
return HTTPServer
@classmethod
def configurable_default(cls) -> Type[Configurable]:
return HTTPServer
async def close_all_connections(self) -> None:
"""Close all open connections and asynchronously wait for them to finish.
This method is used in combination with `~.TCPServer.stop` to
support clean shutdowns (especially for unittests). Typical
usage would call ``stop()`` first to stop accepting new
connections, then ``await close_all_connections()`` to wait for
existing connections to finish.
This method does not currently close open websocket connections.
Note that this method is a coroutine and must be called with ``await``.
"""
while self._connections:
# Peek at an arbitrary element of the set
conn = next(iter(self._connections))
await conn.close()
def handle_stream(self, stream: iostream.IOStream, address: Tuple) -> None:
context = _HTTPRequestContext(
stream, address, self.protocol, self.trusted_downstream
)
conn = HTTP1ServerConnection(stream, self.conn_params, context)
self._connections.add(conn)
conn.start_serving(self)
def start_request(
self, server_conn: object, request_conn: httputil.HTTPConnection
) -> httputil.HTTPMessageDelegate:
if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate):
delegate = self.request_callback.start_request(server_conn, request_conn)
else:
delegate = _CallableAdapter(self.request_callback, request_conn)
if self.xheaders:
delegate = _ProxyAdapter(delegate, request_conn)
return delegate
def on_close(self, server_conn: object) -> None:
self._connections.remove(typing.cast(HTTP1ServerConnection, server_conn))
| HTTPServer |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 39558,
"end": 47570
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
pca_ = helper_functions.get_value("PCA")
if pca_ is None:
return np.NaN
components = pca_.components_
pca_.components_ = components[:1]
transformed = pca_.transform(X)
pca_.components_ = components
skewness = scipy.stats.skew(transformed)
return skewness[0]
def calculate_all_metafeatures_encoded_labels(
X, y, feat_type, dataset_name, logger, calculate=None, dont_calculate=None
):
"""
Calculate only metafeatures for which a 1HotEncoded feature matrix is necessery.
"""
calculate = set()
calculate.update(npy_metafeatures)
return calculate_all_metafeatures(
X,
y,
feat_type,
dataset_name,
calculate=calculate,
dont_calculate=dont_calculate,
logger=logger,
)
def calculate_all_metafeatures_with_labels(
X, y, feat_type, dataset_name, logger, calculate=None, dont_calculate=None
):
if dont_calculate is None:
dont_calculate = set()
else:
dont_calculate = copy.deepcopy(dont_calculate)
dont_calculate.update(npy_metafeatures)
return calculate_all_metafeatures(
X,
y,
feat_type,
dataset_name,
calculate=calculate,
dont_calculate=dont_calculate,
logger=logger,
)
def calculate_all_metafeatures(
X,
y,
feat_type,
dataset_name,
logger,
calculate=None,
dont_calculate=None,
densify_threshold=1000,
):
"""Calculate all metafeatures."""
helper_functions.clear()
metafeatures.clear()
mf_ = dict()
visited = set()
to_visit = deque()
to_visit.extend(metafeatures)
X_transformed = None
y_transformed = None
# TODO calculate the numpy metafeatures after all others to consume less
# memory
while len(to_visit) > 0:
name = to_visit.pop()
if calculate is not None and name not in calculate:
continue
if dont_calculate is not None and name in dont_calculate:
continue
if name in npy_metafeatures:
if X_transformed is None:
# TODO make sure this is done as efficient as possible (no copy for
# sparse matrices because of wrong sparse format)
sparse = scipy.sparse.issparse(X)
# TODO make this more cohesive to the overall structure (quick bug fix)
if isinstance(X, pd.DataFrame):
for key in X.select_dtypes(include="string").columns:
feat_type[key] = "string"
DPP = FeatTypeSplit(
# The difference between feat_type and categorical, is that
# categorical has True/False instead of categorical/numerical
feat_type=feat_type,
force_sparse_output=True,
)
X_transformed = DPP.fit_transform(X)
feat_type_transformed = {
i: "numerical" for i in range(X_transformed.shape[1])
}
# Densify the transformed matrix
if not sparse and scipy.sparse.issparse(X_transformed):
bytes_per_float = X_transformed.dtype.itemsize
num_elements = X_transformed.shape[0] * X_transformed.shape[1]
megabytes_required = num_elements * bytes_per_float / 1000 / 1000
if megabytes_required < densify_threshold:
X_transformed = X_transformed.todense()
# This is not only important for datasets which are somehow
# sorted in a strange way, but also prevents lda from failing in
# some cases.
# Because this is advanced indexing, a copy of the data is returned!!!
X_transformed = check_array(
X_transformed, force_all_finite=True, accept_sparse="csr"
)
indices = np.arange(X_transformed.shape[0])
rs = np.random.RandomState(42)
rs.shuffle(indices)
# TODO Shuffle inplace
X_transformed = X_transformed[indices]
y_transformed = y[indices]
X_ = X_transformed
y_ = y_transformed
feat_type_ = feat_type_transformed
else:
X_ = X
y_ = y
feat_type_ = feat_type
dependency = metafeatures.get_dependency(name)
if dependency is not None:
is_metafeature = dependency in metafeatures
is_helper_function = dependency in helper_functions
if is_metafeature and is_helper_function:
raise NotImplementedError()
elif not is_metafeature and not is_helper_function:
raise ValueError(dependency)
elif is_metafeature and not metafeatures.is_calculated(dependency):
to_visit.appendleft(name)
continue
elif is_helper_function and not helper_functions.is_calculated(dependency):
logger.info("%s: Going to calculate: %s", dataset_name, dependency)
value = helper_functions[dependency](
X_, y_, feat_type=feat_type_, logger=logger
)
helper_functions.set_value(dependency, value)
mf_[dependency] = value
logger.info("%s: Going to calculate: %s", dataset_name, name)
value = metafeatures[name](X_, y_, logger, feat_type_)
metafeatures.set_value(name, value)
mf_[name] = value
visited.add(name)
mf_ = DatasetMetafeatures(dataset_name, mf_)
return mf_
npy_metafeatures = set(
[
"LandmarkLDA",
"LandmarkNaiveBayes",
"LandmarkDecisionTree",
"LandmarkDecisionNodeLearner",
"LandmarkRandomNodeLearner",
"LandmarkWorstNodeLearner",
"Landmark1NN",
"PCAFractionOfComponentsFor95PercentVariance",
"PCAKurtosisFirstPC",
"PCASkewnessFirstPC",
"Skewnesses",
"SkewnessMin",
"SkewnessMax",
"SkewnessMean",
"SkewnessSTD",
"Kurtosisses",
"KurtosisMin",
"KurtosisMax",
"KurtosisMean",
"KurtosisSTD",
]
)
subsets = dict()
# All implemented metafeatures
subsets["all"] = set(metafeatures.functions.keys())
# Metafeatures used by Pfahringer et al. (2000) in the first experiment
subsets["pfahringer_2000_experiment1"] = set(
[
"number_of_features",
"number_of_numeric_features",
"number_of_categorical_features",
"number_of_classes",
"class_probability_max",
"landmark_lda",
"landmark_naive_bayes",
"landmark_decision_tree",
]
)
# Metafeatures used by Pfahringer et al. (2000) in the second experiment
# worst node learner not implemented yet
"""
pfahringer_2000_experiment2 = set(["landmark_decision_node_learner",
"landmark_random_node_learner",
"landmark_worst_node_learner",
"landmark_1NN"])
"""
# Metafeatures used by Yogatama and Mann (2014)
subsets["yogotama_2014"] = set(
["log_number_of_features", "log_number_of_instances", "number_of_classes"]
)
# Metafeatures used by Bardenet et al. (2013) for the AdaBoost.MH experiment
subsets["bardenet_2013_boost"] = set(
[
"number_of_classes",
"log_number_of_features",
"log_inverse_dataset_ratio",
"pca_95percent",
]
)
# Metafeatures used by Bardenet et al. (2013) for the Neural Net experiment
subsets["bardenet_2013_nn"] = set(
[
"number_of_classes",
"log_number_of_features",
"log_inverse_dataset_ratio",
"pca_kurtosis_first_pc",
"pca_skewness_first_pc",
]
)
| PCASkewnessFirstPC |
python | mlflow__mlflow | mlflow/genai/evaluation/context.py | {
"start": 1418,
"end": 4020
} | class ____(Context):
"""
Context for eval execution.
NOTE: This class is not covered by unit tests and is meant to be tested through
smoke tests that run this code on an actual Databricks cluster.
"""
def __init__(self):
self._run_id = None
self._context_tags = context_registry.resolve_tags()
def get_mlflow_experiment_id(self) -> str | None:
# Note `_get_experiment_id` is thread-safe
return mlflow.tracking.fluent._get_experiment_id()
def get_mlflow_run_id(self) -> str | None:
"""
Gets the MLflow run_id the evaluation harness is running under.
Warning: This run_id may not be active. This happens when `get_mlflow_run_id` is called from
a different thread than the one that started the MLflow run.
"""
# First check if a run ID is specified explicitly by the parent thread
if self._run_id:
return self._run_id
# Otherwise fall back to the active run in the current thread
if run := mlflow.active_run():
return run.info.run_id
return None
def set_mlflow_run_id(self, run_id: str) -> None:
"""
Set the MLflow run ID explicitly.
This method should be called when running code in a different thread than the one that
started the MLflow run. It sets the run ID in a thread-local variable so that it can be
accessed from the thread.
"""
self._run_id = run_id
def get_user_name(self) -> str:
return self._context_tags.get(MLFLOW_USER, "unknown")
# Context is a singleton.
_context_singleton = NoneContext()
def context_is_active() -> bool:
"""
Check if a context is active.
"""
return not isinstance(get_context(), NoneContext)
def get_context() -> Context:
"""
Get the context.
"""
return _context_singleton
def eval_context(func: Callable[P, R]) -> Callable[P, R]:
"""
Decorator for wrapping all eval APIs with setup and closure logic.
Sets up a context singleton with RealContext if there isn't one already.
"""
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
# Set up the context singleton if it doesn't exist
if not context_is_active():
global _context_singleton
_context_singleton = RealContext()
return func(*args, **kwargs)
return wrapper
def _set_context(context: Context) -> None:
"""SHOULD ONLY BE USED FOR TESTING."""
global _context_singleton
_context_singleton = context
| RealContext |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 43293,
"end": 44687
} | class ____(ColumnConstraint):
"""A column constraint that ensures all values in a pandas column are greater than the provided
lower bound [inclusive].
Args:
min_value (Union[int, float, datetime.datetime]): The lower bound.
ignore_missing_vals (bool): If true, this constraint will enforce the constraint on non missing values.
"""
def __init__(self, min_value, ignore_missing_vals):
self.min_value = check.inst_param(min_value, "min_value", (int, float, datetime))
self.ignore_missing_vals = check.bool_param(ignore_missing_vals, "ignore_missing_vals")
super().__init__(
markdown_description=f"values > {self.min_value}",
error_description=f"Column must have values > {self.min_value}",
)
def validate(self, dataframe, column_name):
invalid = dataframe[column_name] < self.min_value
if self.ignore_missing_vals:
invalid = apply_ignore_missing_data_to_mask(invalid, dataframe[column_name])
out_of_bounds_rows = dataframe[invalid]
if not out_of_bounds_rows.empty:
raise ColumnConstraintViolationException(
constraint_name=self.name,
constraint_description=self.error_description,
column_name=column_name,
offending_rows=out_of_bounds_rows,
)
| MinValueColumnConstraint |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 90572,
"end": 91019
} | class ____(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
| _closeiter |
python | pypa__pip | src/pip/_vendor/rich/pretty.py | {
"start": 8147,
"end": 14333
} | class ____(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
max_depth: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.max_depth = max_depth
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
max_depth=self.max_depth,
expand_all=self.expand_all,
)
pretty_text = Text.from_ansi(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
max_depth=self.max_depth,
expand_all=self.expand_all,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_deque(_object: Deque[Any]) -> Tuple[str, str, str]:
if _object.maxlen is None:
return ("deque([", "])", "deque()")
return (
"deque([",
f"], maxlen={_object.maxlen})",
f"deque(maxlen={_object.maxlen})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: _get_braces_for_deque,
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
_safe_isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
| Pretty |
python | RaRe-Technologies__gensim | gensim/test/test_aggregation.py | {
"start": 372,
"end": 797
} | class ____(unittest.TestCase):
def setUp(self):
self.confirmed_measures = [1.1, 2.2, 3.3, 4.4]
def test_arithmetic_mean(self):
"""Test arithmetic_mean()"""
obtained = aggregation.arithmetic_mean(self.confirmed_measures)
expected = 2.75
self.assertEqual(obtained, expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| TestAggregation |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/base_streams.py | {
"start": 20685,
"end": 21108
} | class ____(IncrementalShopifySubstream):
slice_key = "id"
data_field = "metafields"
parent_stream_class: Union[ShopifyStream, IncrementalShopifyStream] = None
def path(self, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> str:
object_id = stream_slice[self.slice_key]
return f"{self.parent_stream_class.data_field}/{object_id}/{self.data_field}.json"
| MetafieldShopifySubstream |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/plugin/pytestplugin.py | {
"start": 5907,
"end": 21646
} | class ____:
def pytest_configure_node(self, node):
from sqlalchemy.testing import provision
from sqlalchemy.testing import asyncio
# the master for each node fills workerinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.workerinput)
node.workerinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
asyncio._maybe_async_provisioning(
provision.create_follower_db, node.workerinput["follower_ident"]
)
def pytest_testnodedown(self, node, error):
from sqlalchemy.testing import provision
from sqlalchemy.testing import asyncio
asyncio._maybe_async_provisioning(
provision.drop_follower_db, node.workerinput["follower_ident"]
)
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
from sqlalchemy.testing import asyncio
rebuilt_items = collections.defaultdict(
lambda: collections.defaultdict(list)
)
items[:] = [
item
for item in items
if item.getparent(pytest.Class) is not None
and not item.getparent(pytest.Class).name.startswith("_")
]
test_classes = {item.getparent(pytest.Class) for item in items}
def collect(element):
for inst_or_fn in element.collect():
if isinstance(inst_or_fn, pytest.Collector):
yield from collect(inst_or_fn)
else:
yield inst_or_fn
def setup_test_classes():
for test_class in test_classes:
# transfer legacy __backend__ and __sparse_backend__ symbols
# to be markers
if getattr(test_class.cls, "__backend__", False) or getattr(
test_class.cls, "__only_on__", False
):
add_markers = {"backend"}
elif getattr(test_class.cls, "__sparse_backend__", False):
add_markers = {"sparse_backend", "backend"}
elif getattr(test_class.cls, "__sparse_driver_backend__", False):
add_markers = {"sparse_driver_backend", "backend"}
else:
add_markers = frozenset()
existing_markers = {
mark.name for mark in test_class.iter_markers()
}
add_markers = add_markers - existing_markers
all_markers = existing_markers.union(add_markers)
for marker in add_markers:
test_class.add_marker(marker)
sub_tests = list(
plugin_base.generate_sub_tests(
test_class.cls, test_class.module, all_markers
)
)
if not sub_tests:
rebuilt_items[test_class.cls]
for sub_cls in sub_tests:
if sub_cls is not test_class.cls:
per_cls_dict = rebuilt_items[test_class.cls]
module = test_class.getparent(pytest.Module)
new_cls = pytest.Class.from_parent(
name=sub_cls.__name__, parent=module
)
for marker in add_markers:
new_cls.add_marker(marker)
for fn in collect(new_cls):
per_cls_dict[fn.name].append(fn)
# class requirements will sometimes need to access the DB to check
# capabilities, so need to do this for async
asyncio._maybe_async_provisioning(setup_test_classes)
newitems = []
for item in items:
cls_ = item.cls
if cls_ in rebuilt_items:
newitems.extend(rebuilt_items[cls_][item.name])
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(
newitems,
key=lambda item: (
item.getparent(pytest.Module).name,
item.getparent(pytest.Class).name,
item.name,
),
)
def pytest_pycollect_makeitem(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(name, obj):
from sqlalchemy.testing import config
if config.any_async:
obj = _apply_maybe_async(obj)
return [
pytest.Class.from_parent(
name=parametrize_cls.__name__, parent=collector
)
for parametrize_cls in _parametrize_cls(collector.module, obj)
]
elif (
inspect.isfunction(obj)
and collector.cls is not None
and plugin_base.want_method(collector.cls, obj)
):
# None means, fall back to default logic, which includes
# method-level parametrize
return None
else:
# empty list means skip this item
return []
def _is_wrapped_coroutine_function(fn):
while hasattr(fn, "__wrapped__"):
fn = fn.__wrapped__
return inspect.iscoroutinefunction(fn)
def _apply_maybe_async(obj, recurse=True):
from sqlalchemy.testing import asyncio
for name, value in vars(obj).items():
if (
(callable(value) or isinstance(value, classmethod))
and not getattr(value, "_maybe_async_applied", False)
and (name.startswith("test_"))
and not _is_wrapped_coroutine_function(value)
):
is_classmethod = False
if isinstance(value, classmethod):
value = value.__func__
is_classmethod = True
@_pytest_fn_decorator
def make_async(fn, *args, **kwargs):
return asyncio._maybe_async(fn, *args, **kwargs)
do_async = make_async(value)
if is_classmethod:
do_async = classmethod(do_async)
do_async._maybe_async_applied = True
setattr(obj, name, do_async)
if recurse:
for cls in obj.mro()[1:]:
if cls != object:
_apply_maybe_async(cls, False)
return obj
def _parametrize_cls(module, cls):
"""implement a class-based version of pytest parametrize."""
if "_sa_parametrize" not in cls.__dict__:
return [cls]
_sa_parametrize = cls._sa_parametrize
classes = []
for full_param_set in itertools.product(
*[params for argname, params in _sa_parametrize]
):
cls_variables = {}
for argname, param in zip(
[_sa_param[0] for _sa_param in _sa_parametrize], full_param_set
):
if not argname:
raise TypeError("need argnames for class-based combinations")
argname_split = re.split(r",\s*", argname)
for arg, val in zip(argname_split, param.values):
cls_variables[arg] = val
parametrized_name = "_".join(
re.sub(r"\W", "", token)
for param in full_param_set
for token in param.id.split("-")
)
name = "%s_%s" % (cls.__name__, parametrized_name)
newcls = type.__new__(type, name, (cls,), cls_variables)
setattr(module, name, newcls)
classes.append(newcls)
return classes
_current_class = None
_current_warning_context = None
def pytest_runtest_setup(item):
from sqlalchemy.testing import asyncio
# pytest_runtest_setup runs *before* pytest fixtures with scope="class".
# plugin_base.start_test_class_outside_fixtures may opt to raise SkipTest
# for the whole class and has to run things that are across all current
# databases, so we run this outside of the pytest fixture system altogether
# and ensure asyncio greenlet if any engines are async
global _current_class, _current_warning_context
if isinstance(item, pytest.Function) and _current_class is None:
asyncio._maybe_async_provisioning(
plugin_base.start_test_class_outside_fixtures,
item.cls,
)
_current_class = item.getparent(pytest.Class)
if hasattr(_current_class.cls, "__warnings__"):
import warnings
_current_warning_context = warnings.catch_warnings()
_current_warning_context.__enter__()
for warning_message in _current_class.cls.__warnings__:
warnings.filterwarnings("ignore", warning_message)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(item, nextitem):
# runs inside of pytest function fixture scope
# after test function runs
from sqlalchemy.testing import asyncio
asyncio._maybe_async(plugin_base.after_test, item)
yield
# this is now after all the fixture teardown have run, the class can be
# finalized. Since pytest v7 this finalizer can no longer be added in
# pytest_runtest_setup since the class has not yet been setup at that
# time.
# See https://github.com/pytest-dev/pytest/issues/9343
global _current_class, _current_report, _current_warning_context
if _current_class is not None and (
# last test or a new class
nextitem is None
or nextitem.getparent(pytest.Class) is not _current_class
):
if _current_warning_context is not None:
_current_warning_context.__exit__(None, None, None)
_current_warning_context = None
_current_class = None
try:
asyncio._maybe_async_provisioning(
plugin_base.stop_test_class_outside_fixtures, item.cls
)
except Exception as e:
# in case of an exception during teardown attach the original
# error to the exception message, otherwise it will get lost
if _current_report.failed:
if not e.args:
e.args = (
"__Original test failure__:\n"
+ _current_report.longreprtext,
)
elif e.args[-1] and isinstance(e.args[-1], str):
args = list(e.args)
args[-1] += (
"\n__Original test failure__:\n"
+ _current_report.longreprtext
)
e.args = tuple(args)
else:
e.args += (
"__Original test failure__",
_current_report.longreprtext,
)
raise
finally:
_current_report = None
def pytest_runtest_call(item):
# runs inside of pytest function fixture scope
# before test function runs
from sqlalchemy.testing import asyncio
asyncio._maybe_async(
plugin_base.before_test,
item,
item.module.__name__,
item.cls,
item.name,
)
_current_report = None
def pytest_runtest_logreport(report):
global _current_report
if report.when == "call":
_current_report = report
@pytest.fixture(scope="class")
def setup_class_methods(request):
from sqlalchemy.testing import asyncio
cls = request.cls
if hasattr(cls, "setup_test_class"):
asyncio._maybe_async(cls.setup_test_class)
yield
if hasattr(cls, "teardown_test_class"):
asyncio._maybe_async(cls.teardown_test_class)
asyncio._maybe_async(plugin_base.stop_test_class, cls)
@pytest.fixture(scope="function")
def setup_test_methods(request):
from sqlalchemy.testing import asyncio
# called for each test
self = request.instance
# before this fixture runs:
# 1. function level "autouse" fixtures under py3k (examples: TablesTest
# define tables / data, MappedTest define tables / mappers / data)
# 2. was for p2k. no longer applies
# 3. run outer xdist-style setup
if hasattr(self, "setup_test"):
asyncio._maybe_async(self.setup_test)
# alembic test suite is using setUp and tearDown
# xdist methods; support these in the test suite
# for the near term
if hasattr(self, "setUp"):
asyncio._maybe_async(self.setUp)
# inside the yield:
# 4. function level fixtures defined on test functions themselves,
# e.g. "connection", "metadata" run next
# 5. pytest hook pytest_runtest_call then runs
# 6. test itself runs
yield
# yield finishes:
# 7. function level fixtures defined on test functions
# themselves, e.g. "connection" rolls back the transaction, "metadata"
# emits drop all
# 8. pytest hook pytest_runtest_teardown hook runs, this is associated
# with fixtures close all sessions, provisioning.stop_test_class(),
# engines.testing_reaper -> ensure all connection pool connections
# are returned, engines created by testing_engine that aren't the
# config engine are disposed
asyncio._maybe_async(plugin_base.after_test_fixtures, self)
# 10. run xdist-style teardown
if hasattr(self, "tearDown"):
asyncio._maybe_async(self.tearDown)
if hasattr(self, "teardown_test"):
asyncio._maybe_async(self.teardown_test)
# 11. was for p2k. no longer applies
# 12. function level "autouse" fixtures under py3k (examples: TablesTest /
# MappedTest delete table data, possibly drop tables and clear mappers
# depending on the flags defined by the test class)
def _pytest_fn_decorator(target):
"""Port of langhelpers.decorator with pytest-specific tricks."""
from sqlalchemy.util.langhelpers import format_argspec_plus
from sqlalchemy.util.compat import inspect_getfullargspec
def _exec_code_in_env(code, env, fn_name):
# note this is affected by "from __future__ import annotations" at
# the top; exec'ed code will use non-evaluated annotations
# which allows us to be more flexible with code rendering
# in format_argpsec_plus()
exec(code, env)
return env[fn_name]
def decorate(fn, add_positional_parameters=()):
spec = inspect_getfullargspec(fn)
if add_positional_parameters:
spec.args.extend(add_positional_parameters)
metadata = dict(
__target_fn="__target_fn", __orig_fn="__orig_fn", name=fn.__name__
)
metadata.update(format_argspec_plus(spec, grouped=False))
code = (
"""\
def %(name)s%(grouped_args)s:
return %(__target_fn)s(%(__orig_fn)s, %(apply_kw)s)
"""
% metadata
)
decorated = _exec_code_in_env(
code, {"__target_fn": target, "__orig_fn": fn}, fn.__name__
)
if not add_positional_parameters:
decorated.__defaults__ = getattr(fn, "__func__", fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
else:
# this is the pytest hacky part. don't do a full update wrapper
# because pytest is really being sneaky about finding the args
# for the wrapped function
decorated.__module__ = fn.__module__
decorated.__name__ = fn.__name__
if hasattr(fn, "pytestmark"):
decorated.pytestmark = fn.pytestmark
return decorated
return decorate
| XDistHooks |
python | urllib3__urllib3 | src/urllib3/util/ssl_match_hostname.py | {
"start": 479,
"end": 5845
} | class ____(ValueError):
pass
def _dnsname_match(
dn: typing.Any, hostname: str, max_wildcards: int = 1
) -> typing.Match[str] | None | bool:
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r".")
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count("*")
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn)
)
# speed up common case w/o wildcards
if not wildcards:
return bool(dn.lower() == hostname.lower())
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == "*":
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append("[^.]+")
elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
return pat.match(hostname)
def _ipaddress_match(ipname: str, host_ip: IPv4Address | IPv6Address) -> bool:
"""Exact matching of IP addresses.
RFC 9110 section 4.3.5: "A reference identity of IP-ID contains the decoded
bytes of the IP address. An IP version 4 address is 4 octets, and an IP
version 6 address is 16 octets. [...] A reference identity of type IP-ID
matches if the address is identical to an iPAddress value of the
subjectAltName extension of the certificate."
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(ipname.rstrip())
return bool(ip.packed == host_ip.packed)
def match_hostname(
cert: _TYPE_PEER_CERT_RET_DICT | None,
hostname: str,
hostname_checks_common_name: bool = False,
) -> None:
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError(
"empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED"
)
try:
# Divergence from upstream: ipaddress can't handle byte str
#
# The ipaddress module shipped with Python < 3.9 does not support
# scoped IPv6 addresses so we unconditionally strip the Zone IDs for
# now. Once we drop support for Python 3.9 we can remove this branch.
if "%" in hostname:
host_ip = ipaddress.ip_address(hostname[: hostname.rfind("%")])
else:
host_ip = ipaddress.ip_address(hostname)
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san: tuple[tuple[str, str], ...] = cert.get("subjectAltName", ())
key: str
value: str
for key, value in san:
if key == "DNS":
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == "IP Address":
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
# We only check 'commonName' if it's enabled and we're not verifying
# an IP address. IP addresses aren't valid within 'commonName'.
if hostname_checks_common_name and host_ip is None and not dnsnames:
for sub in cert.get("subject", ()):
for key, value in sub:
if key == "commonName":
if _dnsname_match(value, hostname):
return
dnsnames.append(value) # Defensive: for Python < 3.9.3
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r "
"doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
)
elif len(dnsnames) == 1:
raise CertificateError(f"hostname {hostname!r} doesn't match {dnsnames[0]!r}")
else:
raise CertificateError("no appropriate subjectAltName fields were found")
| CertificateError |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 55730,
"end": 56442
} | class ____(dispatch.GlobalOpDispatcher):
"""A global dispatcher that allows building a functional model with TF Ops."""
def handle(self, op, args, kwargs):
"""Handle the specified operation with the specified arguments."""
if any(
isinstance(x, keras_tensor.KerasTensor)
for x in nest.flatten([args, kwargs])):
return TFOpLambda(op)(*args, **kwargs)
else:
return self.NOT_SUPPORTED
KerasOpDispatcher().register()
def _slice_to_dict(x):
if isinstance(x, slice):
return {'start': x.start, 'stop': x.stop, 'step': x.step}
return x
def _dict_to_slice(x):
if isinstance(x, dict):
return slice(x['start'], x['stop'], x['step'])
return x
| KerasOpDispatcher |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/tests/test_rest.py | {
"start": 1381,
"end": 1464
} | class ____(BaseModel):
some_float: float
some_bool: bool
| TestAnotherBaseModel |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 4987,
"end": 5371
} | class ____(nn.Module):
"""
ConvActivation: Conv, Activation
"""
def __init__(self, cin, cout, groups, act):
super().__init__()
self.conv1d = nn.Conv1d(in_channels=cin, out_channels=cout, kernel_size=1, groups=groups)
self.act = ACT2FN[act]
def forward(self, x):
output = self.conv1d(x)
return self.act(output)
| ConvActivation |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/system_config/objects.py | {
"start": 1605,
"end": 2328
} | class ____(NamedTuple):
"""Outputs are configured as a dict if any of the outputs have an output manager with an
output_config_schema, and a list otherwise.
"""
config: Optional[Union[dict, list]]
@property
def output_names(self) -> AbstractSet[str]:
if isinstance(self.config, list):
return {key for entry in self.config for key in entry.keys()}
elif isinstance(self.config, dict):
return self.config.keys()
else:
return set()
def get_output_manager_config(self, output_name: str) -> object:
if isinstance(self.config, dict):
return self.config.get(output_name)
else:
return None
| OutputsConfig |
python | getsentry__sentry | tests/sentry/models/test_organization.py | {
"start": 7042,
"end": 19747
} | class ____(TestCase, HybridCloudTestMixin):
def setUp(self) -> None:
self.owner = self.create_user("foo@example.com")
with assume_test_silo_mode(SiloMode.CONTROL):
TotpInterface().enroll(self.owner)
self.org = self.create_organization(owner=self.owner)
self.request = self.make_request(user=self.owner)
def _create_user(self, has_email=True):
if not has_email:
return self.create_user("")
return self.create_user()
def _create_user_and_member(self, has_2fa=False, has_user_email=True):
user = self._create_user(has_email=has_user_email)
if has_2fa:
with assume_test_silo_mode(SiloMode.CONTROL):
TotpInterface().enroll(user)
member = self.create_member(organization=self.org, user=user)
return user, member
def is_organization_member(self, user_id, member_id):
member = OrganizationMember.objects.get(id=member_id)
with assume_test_silo_mode(SiloMode.CONTROL):
user = User.objects.get(id=user_id)
assert not member.is_pending
assert not member.email
assert member.user_id == user.id
def is_pending_organization_member(self, user_id, member_id, was_booted=True):
member = OrganizationMember.objects.get(id=member_id)
if user_id:
with assume_test_silo_mode(SiloMode.CONTROL):
assert User.objects.filter(id=user_id).exists()
assert member.is_pending
assert member.email
if was_booted:
assert member.token
assert member.token_expires_at
else:
assert member.token is None
assert member.token_expires_at is None
def test_handle_2fa_required__compliant_and_non_compliant_members(self) -> None:
compliant_user, compliant_member = self._create_user_and_member(has_2fa=True)
non_compliant_user, non_compliant_member = self._create_user_and_member()
self.assert_org_member_mapping(org_member=compliant_member)
self.assert_org_member_mapping(org_member=non_compliant_member)
with (
self.options({"system.url-prefix": "http://example.com"}),
self.tasks(),
outbox_runner(),
):
self.org.handle_2fa_required(self.request)
self.is_organization_member(compliant_user.id, compliant_member.id)
self.is_pending_organization_member(non_compliant_user.id, non_compliant_member.id)
self.assert_org_member_mapping(org_member=compliant_member)
self.assert_org_member_mapping(org_member=non_compliant_member)
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [non_compliant_user.email]
with assume_test_silo_mode(SiloMode.CONTROL):
audit_logs = AuditLogEntry.objects.filter(
event=audit_log.get_event_id("MEMBER_PENDING"),
organization_id=self.org.id,
actor=self.owner,
)
assert audit_logs.count() == 1
assert audit_logs[0].data["email"] == non_compliant_user.email
assert audit_logs[0].target_user_id == non_compliant_user.id
def test_handle_2fa_required__compliant_members(self) -> None:
compliant = []
for num in range(0, 4):
user, member = self._create_user_and_member(has_2fa=True)
compliant.append((user, member))
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.org.handle_2fa_required(self.request)
for user, member in compliant:
self.is_organization_member(user.id, member.id)
assert len(mail.outbox) == 0
with assume_test_silo_mode(SiloMode.CONTROL):
assert not AuditLogEntry.objects.filter(
event=audit_log.get_event_id("MEMBER_PENDING"),
organization_id=self.org.id,
actor=self.owner,
).exists()
def test_handle_2fa_required__non_compliant_members(self) -> None:
non_compliant = []
for num in range(0, 4):
user, member = self._create_user_and_member()
self.assert_org_member_mapping(org_member=member)
non_compliant.append((user, member))
with (
self.options({"system.url-prefix": "http://example.com"}),
self.tasks(),
outbox_runner(),
):
self.org.handle_2fa_required(self.request)
for user, member in non_compliant:
self.is_pending_organization_member(user.id, member.id)
self.assert_org_member_mapping(org_member=member)
assert len(mail.outbox) == len(non_compliant)
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
event=audit_log.get_event_id("MEMBER_PENDING"),
organization_id=self.org.id,
actor=self.owner,
).count() == len(non_compliant)
def test_handle_2fa_required__pending_member__ok(self) -> None:
member = self.create_member(organization=self.org, email="bob@zombo.com")
assert not member.user_id
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.org.handle_2fa_required(self.request)
self.is_pending_organization_member(user_id=None, member_id=member.id, was_booted=False)
assert len(mail.outbox) == 0
with assume_test_silo_mode(SiloMode.CONTROL):
assert not AuditLogEntry.objects.filter(
event=audit_log.get_event_id("MEMBER_PENDING"),
organization_id=self.org.id,
actor=self.owner,
).exists()
@mock.patch("sentry.tasks.auth.auth.logger")
def test_handle_2fa_required__no_email__warning(self, auth_log: mock.MagicMock) -> None:
user, member = self._create_user_and_member(has_user_email=False)
with assume_test_silo_mode(SiloMode.CONTROL):
assert not user.has_2fa()
assert not user.email
assert not member.email
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.org.handle_2fa_required(self.request)
self.is_organization_member(user.id, member.id)
auth_log.warning.assert_called_with(
"Could not remove %s noncompliant user from org",
"2FA",
extra={"organization_id": self.org.id, "user_id": user.id, "member_id": member.id},
)
@mock.patch("sentry.tasks.auth.auth.logger")
def test_handle_2fa_required__no_actor_and_api_key__ok(self, auth_log: mock.MagicMock) -> None:
user, member = self._create_user_and_member()
self.assert_org_member_mapping(org_member=member)
with (
self.options({"system.url-prefix": "http://example.com"}),
self.tasks(),
outbox_runner(),
):
with assume_test_silo_mode(SiloMode.CONTROL):
api_key = ApiKey.objects.create(
organization_id=self.org.id,
scope_list=["org:read", "org:write", "member:read", "member:write"],
)
request = copy.deepcopy(self.request)
request.user = AnonymousUser()
request.auth = AuthenticatedToken.from_token(api_key)
self.org.handle_2fa_required(request)
self.is_pending_organization_member(user.id, member.id)
self.assert_org_member_mapping(org_member=member)
assert len(mail.outbox) == 1
with assume_test_silo_mode(SiloMode.CONTROL):
assert (
AuditLogEntry.objects.filter(
event=audit_log.get_event_id("MEMBER_PENDING"),
organization_id=self.org.id,
actor=None,
actor_key=api_key,
).count()
== 1
)
@mock.patch("sentry.tasks.auth.auth.logger")
def test_handle_2fa_required__no_ip_address__ok(self, auth_log: mock.MagicMock) -> None:
user, member = self._create_user_and_member()
self.assert_org_member_mapping(org_member=member)
with (
self.options({"system.url-prefix": "http://example.com"}),
self.tasks(),
outbox_runner(),
):
request = copy.deepcopy(self.request)
request.META["REMOTE_ADDR"] = None
self.org.handle_2fa_required(request)
self.is_pending_organization_member(user.id, member.id)
self.assert_org_member_mapping(org_member=member)
assert len(mail.outbox) == 1
with assume_test_silo_mode(SiloMode.CONTROL):
assert (
AuditLogEntry.objects.filter(
event=audit_log.get_event_id("MEMBER_PENDING"),
organization_id=self.org.id,
actor=self.owner,
actor_key=None,
ip_address=None,
).count()
== 1
)
def test_get_audit_log_data(self) -> None:
org = self.create_organization()
result = org.get_audit_log_data()
assert result["flags"] == int(org.flags)
def test_absolute_url_no_customer_domain(self) -> None:
org = self.create_organization(owner=self.user, slug="acme")
url = org.absolute_url("/organizations/acme/restore/")
assert url == "http://testserver/organizations/acme/restore/"
url = org.absolute_url("/organizations/acme/issues/", query="project=123", fragment="ref")
assert url == "http://testserver/organizations/acme/issues/?project=123#ref"
@with_feature("system:multi-region")
def test_absolute_url_with_customer_domain(self) -> None:
org = self.create_organization(owner=self.user, slug="acme")
url = org.absolute_url("/organizations/acme/restore/")
assert url == "http://acme.testserver/restore/"
url = org.absolute_url("/organizations/acme/issues/", query="project=123", fragment="ref")
assert url == "http://acme.testserver/issues/?project=123#ref"
url = org.absolute_url("/organizations/acme/issues/", query="?project=123", fragment="#ref")
assert url == "http://acme.testserver/issues/?project=123#ref"
def test_absolute_api_url(self) -> None:
org = self.create_organization(owner=self.user, slug="acme")
url = org.absolute_api_url("/restore/")
assert url == "http://testserver/api/0/organizations/acme/restore/"
url = org.absolute_api_url("/issues/", query="project=123", fragment="ref")
assert url == "http://testserver/api/0/organizations/acme/issues/?project=123#ref"
def test_absolute_api_url_mising_slashes(self) -> None:
org = self.create_organization(owner=self.user, slug="acme")
url = org.absolute_api_url("restore")
assert url == "http://testserver/api/0/organizations/acme/restore/"
url = org.absolute_api_url("/issues", query="project=123", fragment="ref")
assert url == "http://testserver/api/0/organizations/acme/issues/?project=123#ref"
url = org.absolute_api_url("issues/", query="project=123", fragment="ref")
assert url == "http://testserver/api/0/organizations/acme/issues/?project=123#ref"
def test_absolute_api_url_extraneous_slashes(self) -> None:
org = self.create_organization(owner=self.user, slug="acme")
url = org.absolute_api_url("/////restore/////")
assert url == "http://testserver/api/0/organizations/acme/restore/"
url = org.absolute_api_url("////issues", query="project=123", fragment="ref")
assert url == "http://testserver/api/0/organizations/acme/issues/?project=123#ref"
url = org.absolute_api_url("issues////", query="project=123", fragment="ref")
assert url == "http://testserver/api/0/organizations/acme/issues/?project=123#ref"
def test_get_bulk_owner_profiles(self) -> None:
u1, u2, u3 = (self.create_user() for _ in range(3))
o1, o2, o3 = (self.create_organization(owner=u) for u in (u1, u2, u3))
o2.get_default_owner() # populate _default_owner
with assume_test_silo_mode_of(User):
u3.delete()
bulk_owner_profiles = Organization.get_bulk_owner_profiles([o1, o2, o3])
assert set(bulk_owner_profiles.keys()) == {o1.id, o2.id}
assert bulk_owner_profiles[o1.id].id == u1.id
assert bulk_owner_profiles[o2.id].id == u2.id
assert bulk_owner_profiles[o2.id].name == u2.name
assert bulk_owner_profiles[o2.id].email == u2.email
| Require2fa |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_forms.py | {
"start": 1126,
"end": 5550
} | class ____(OrganizationTestCase):
def test_add_team_member_by_name(self):
url = reverse(
"organization_team_member_add",
args=[self.organization.slug, self.team.slug],
)
resp = self.client.post(url, data={"username_or_email": self.user.username})
self.assertEqual(resp.status_code, 302)
self.assertEqual(self.team.members.count(), 0)
self.assertEqual(Invitation.objects.for_object(self.team).count(), 1)
def test_add_duplicate_member_by_username(self):
url = reverse(
"organization_team_member_add",
args=[self.organization.slug, self.team.slug],
)
resp = self.client.post(url, data={"username_or_email": self.user.username})
self.assertEqual(resp.status_code, 302)
resp = self.client.post(url, data={"username_or_email": self.user.username})
self.assertEqual(resp.status_code, 302)
self.assertEqual(self.team.members.count(), 0)
self.assertEqual(Invitation.objects.for_object(self.team).count(), 1)
def test_add_team_member_by_email(self):
"""User with verified email is just added to team."""
user = fixture.get(User)
emailaddress = fixture.get(EmailAddress, user=user, verified=True)
self.assertEqual(Invitation.objects.all().count(), 0)
url = reverse(
"organization_team_member_add",
args=[self.organization.slug, self.team.slug],
)
resp = self.client.post(url, data={"username_or_email": emailaddress.email})
self.assertEqual(resp.status_code, 302)
invitation = Invitation.objects.for_object(self.team).get()
self.assertEqual(invitation.from_user, self.owner)
self.assertEqual(invitation.to_user, user)
self.assertEqual(invitation.to_email, None)
self.assertEqual(self.team.members.count(), 0)
def test_add_team_invite_unverified_email(self):
"""Team member with unverified email is invited by email."""
user = fixture.get(User)
fixture.get(EmailAddress, user=user, verified=False)
self.assertEqual(Invitation.objects.all().count(), 0)
url = reverse(
"organization_team_member_add",
args=[self.organization.slug, self.team.slug],
)
resp = self.client.post(url, data={"username_or_email": user.email})
self.assertEqual(resp.status_code, 302)
invitation = Invitation.objects.for_object(self.team).get()
self.assertEqual(invitation.from_user, self.owner)
self.assertEqual(invitation.to_user, None)
self.assertEqual(invitation.to_email, user.email)
self.assertEqual(self.team.members.count(), 0)
def test_add_fresh_member_by_email(self):
"""Add team member with email that is not associated with a user."""
self.assertEqual(self.organization.teams.count(), 1)
email = "testalsdkgh@example.com"
self.assertEqual(Invitation.objects.all().count(), 0)
url = reverse(
"organization_team_member_add",
args=[self.organization.slug, self.team.slug],
)
resp = self.client.post(url, data={"username_or_email": email})
self.assertEqual(resp.status_code, 302)
invitation = Invitation.objects.for_object(self.team).get()
self.assertEqual(invitation.from_user, self.owner)
self.assertEqual(invitation.to_user, None)
self.assertEqual(invitation.to_email, email)
self.assertEqual(self.team.members.count(), 0)
def test_add_duplicate_invite_by_email(self):
"""Add duplicate invite by email."""
self.assertEqual(self.organization.teams.count(), 1)
email = "non-existant@example.com"
self.assertEqual(Invitation.objects.all().count(), 0)
url = reverse(
"organization_team_member_add",
args=[self.organization.slug, self.team.slug],
)
resp = self.client.post(url, data={"username_or_email": email})
self.assertEqual(resp.status_code, 302)
self.assertEqual(Invitation.objects.all().count(), 1)
resp = self.client.post(url, data={"username_or_email": email})
self.assertEqual(resp.status_code, 302)
self.assertEqual(Invitation.objects.all().count(), 1)
self.assertEqual(self.team.members.count(), 0)
| OrganizationTeamMemberFormTests |
python | joke2k__faker | faker/providers/date_time/en_PH/__init__.py | {
"start": 46,
"end": 144
} | class ____(DateTimeProvider):
"""No difference from default DateTimeProvider"""
pass
| Provider |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1146664,
"end": 1147107
} | class ____(ScaleInvalidDataShowAsstrokeWidth):
"""
ScaleInvalidDataShowAsValuestrokeWidth schema wrapper.
Parameters
----------
value : float
The stroke width, in pixels.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"strokeWidth">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValuestrokeWidth |
python | getsentry__sentry | src/sentry/web/frontend/react_page.py | {
"start": 9015,
"end": 9235
} | class ____(GenericReactPageView):
auth_required = False
def handle_auth_required(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
raise Exception("This should not be called")
| AuthV2ReactPageView |
python | neetcode-gh__leetcode | python/0088-merge-sorted-array.py | {
"start": 0,
"end": 455
} | class ____:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
while m > 0 and n > 0:
if nums1[m-1] >= nums2[n-1]:
nums1[m+n-1] = nums1[m-1]
m -= 1
else:
nums1[m+n-1] = nums2[n-1]
n -= 1
if n > 0:
nums1[:n] = nums2[:n] | Solution |
python | huggingface__transformers | src/transformers/models/phimoe/modular_phimoe.py | {
"start": 12689,
"end": 12955
} | class ____(MixtralPreTrainedModel):
_can_record_outputs = {
"router_logits": OutputRecorder(PhimoeTopKRouter, layer_name="mlp.router", index=0),
"hidden_states": PhimoeDecoderLayer,
"attentions": PhimoeAttention,
}
| PhimoePreTrainedModel |
python | mkdocs__mkdocs | mkdocs/structure/nav.py | {
"start": 532,
"end": 1401
} | class ____:
def __init__(self, items: list, pages: list[Page]) -> None:
self.items = items # Nested List with full navigation of Sections, Pages, and Links.
self.pages = pages # Flat List of subset of Pages in nav, in order.
self.homepage = None
for page in pages:
if page.is_homepage:
self.homepage = page
break
homepage: Page | None
"""The [page][mkdocs.structure.pages.Page] object for the homepage of the site."""
pages: list[Page]
"""A flat list of all [page][mkdocs.structure.pages.Page] objects contained in the navigation."""
def __str__(self) -> str:
return '\n'.join(item._indent_print() for item in self)
def __iter__(self) -> Iterator:
return iter(self.items)
def __len__(self) -> int:
return len(self.items)
| Navigation |
python | huggingface__transformers | tests/models/swinv2/test_modeling_swinv2.py | {
"start": 17364,
"end": 20373
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256")
if is_vision_available()
else None
)
@slow
def test_inference_image_classification_head(self):
model = Swinv2ForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to(
torch_device
)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.3947, -0.4306, 0.0026]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_fp16(self):
model = Swinv2ForImageClassification.from_pretrained(
"microsoft/swinv2-tiny-patch4-window8-256", dtype=torch.float16
).to(torch_device)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, return_tensors="pt").to(model.dtype).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-0.3938, -0.4290, 0.0020], dtype=model.dtype).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_interpolate_pos_encoding(self):
# Swinv2 models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions.
model = Swinv2Model.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to(torch_device)
image_processor = self.default_image_processor
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(images=image, size={"height": 481, "width": 481}, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 256, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
@require_torch
| Swinv2ModelIntegrationTest |
python | django__django | django/contrib/gis/geos/prototypes/io.py | {
"start": 2274,
"end": 3059
} | class ____(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc("GEOSWKBReader_read")
wkb_reader_read_hex = WKBReadFunc("GEOSWKBReader_readHEX")
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory("GEOSWKBWriter_create", restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory("GEOSWKBWriter_destroy", argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
| WKBReadFunc |
python | getsentry__sentry | src/sentry/workflow_engine/models/detector.py | {
"start": 1785,
"end": 8398
} | class ____(DefaultFieldsModel, OwnerModel, JSONConfigBase):
__relocation_scope__ = RelocationScope.Organization
objects: ClassVar[DetectorManager] = DetectorManager()
objects_for_deletion: ClassVar[BaseManager] = BaseManager()
project = FlexibleForeignKey("sentry.Project", on_delete=models.CASCADE)
name = models.CharField(max_length=200)
# The data sources that the detector is watching
data_sources = models.ManyToManyField(
"workflow_engine.DataSource", through="workflow_engine.DataSourceDetector"
)
# If the detector is not enabled, it will not be evaluated. This is how we "snooze" a detector
enabled = models.BooleanField(db_default=True)
# The detector's status - used for tracking deletion state
status = models.SmallIntegerField(db_default=ObjectStatus.ACTIVE)
# Optionally set a description of the detector, this will be used in notifications
description = models.TextField(null=True)
# This will emit an event for the workflow to process
workflow_condition_group = FlexibleForeignKey(
"workflow_engine.DataConditionGroup",
blank=True,
null=True,
unique=True,
on_delete=models.SET_NULL,
)
# maps to registry (sentry.issues.grouptype.registry) entries for GroupType.slug in sentry.issues.grouptype.GroupType
type = models.CharField(max_length=200)
# The user that created the detector
created_by_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL")
class Meta(OwnerModel.Meta):
constraints = OwnerModel.Meta.constraints
error_detector_project_options = {
"fingerprinting_rules": "sentry:fingerprinting_rules",
"resolve_age": "sentry:resolve_age",
}
CACHE_TTL = 60 * 10
@classmethod
def _get_detector_project_type_cache_key(cls, project_id: int, detector_type: str) -> str:
"""Generate cache key for detector lookup by project and type."""
return f"detector:by_proj_type:{project_id}:{detector_type}"
@classmethod
def get_default_detector_for_project(cls, project_id: int, detector_type: str) -> Detector:
cache_key = cls._get_detector_project_type_cache_key(project_id, detector_type)
detector = cache.get(cache_key)
if detector is None:
detector = cls.objects.get(project_id=project_id, type=detector_type)
cache.set(cache_key, detector, cls.CACHE_TTL)
return detector
@classmethod
def get_error_detector_for_project(cls, project_id: int) -> Detector:
from sentry.grouping.grouptype import ErrorGroupType
return cls.get_default_detector_for_project(project_id, ErrorGroupType.slug)
@classmethod
def get_issue_stream_detector_for_project(cls, project_id: int) -> Detector:
from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType
return cls.get_default_detector_for_project(project_id, IssueStreamGroupType.slug)
@property
def group_type(self) -> builtins.type[GroupType]:
group_type = grouptype.registry.get_by_slug(self.type)
if not group_type:
raise ValueError(f"Group type '{self.type}' not registered")
return group_type
@property
def detector_handler(self) -> DetectorHandler | None:
group_type = self.group_type
if self.settings.handler is None:
logger.error(
"Registered grouptype for detector has no detector_handler",
extra={
"group_type": str(group_type),
"detector_id": self.id,
"detector_type": self.type,
},
)
return None
return self.settings.handler(self)
@property
def settings(self) -> DetectorSettings:
settings = self.group_type.detector_settings
if settings is None:
raise ValueError("Registered grouptype has no detector settings")
return settings
def get_snapshot(self) -> DetectorSnapshot:
trigger_condition = None
if self.workflow_condition_group:
trigger_condition = self.workflow_condition_group.get_snapshot()
return {
"id": self.id,
"type": self.type,
"enabled": self.enabled,
"status": self.status,
"trigger_condition": trigger_condition,
}
def get_audit_log_data(self) -> dict[str, Any]:
return {"name": self.name}
def get_option(
self, key: str, default: Any | None = None, validate: Callable[[object], bool] | None = None
) -> Any:
if not self.project:
raise ValueError("Detector must have a project to get options")
return self.project.get_option(key, default=default, validate=validate)
def get_conditions(self) -> BaseQuerySet[DataCondition]:
has_cached_condition_group = is_model_attr_cached(self, "workflow_condition_group")
conditions = None
if has_cached_condition_group:
if self.workflow_condition_group is not None:
has_cached_conditions = is_model_attr_cached(
self.workflow_condition_group, "conditions"
)
if has_cached_conditions:
conditions = self.workflow_condition_group.conditions.all()
if conditions is None:
# if we don't have the information cached execute a single query to return them
# (accessing as self.workflow_condition_group.conditions.all() issues 2 queries)
conditions = DataCondition.objects.filter(condition_group__detector=self)
return conditions
def enforce_config_schema(instance: Detector) -> None:
"""
Ensures the detector type is valid in the grouptype registry.
This needs to be available independently so callers can validate configs
without saving.
"""
group_type = instance.group_type
if not group_type:
raise ValueError(f"No group type found with type {instance.type}")
if not group_type.detector_settings:
return
if not isinstance(instance.config, dict):
raise ValidationError("Detector config must be a dictionary")
instance.validate_config(group_type.detector_settings.config_schema)
@receiver(pre_save, sender=Detector)
def enforce_config_schema_signal(sender, instance: Detector, **kwargs):
"""
This needs to be a signal because the grouptype registry's entries are not available at import time.
"""
enforce_config_schema(instance)
| Detector |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 26227,
"end": 26360
} | class ____(Exception):
"""Indicates that an error occurred during the execution of an external process."""
| DagsterPipesExecutionError |
python | numba__numba | numba/core/typing/npydecl.py | {
"start": 807,
"end": 7336
} | class ____(AbstractTemplate):
@classmethod
def _handle_inputs(cls, ufunc, args, kws):
"""
Process argument types to a given *ufunc*.
Returns a (base types, explicit outputs, ndims, layout) tuple where:
- `base types` is a tuple of scalar types for each input
- `explicit outputs` is a tuple of explicit output types (arrays)
- `ndims` is the number of dimensions of the loop and also of
any outputs, explicit or implicit
- `layout` is the layout for any implicit output to be allocated
"""
nin = ufunc.nin
nout = ufunc.nout
nargs = ufunc.nargs
# preconditions
assert nargs == nin + nout
if len(args) < nin:
msg = "ufunc '{0}': not enough arguments ({1} found, {2} required)"
raise TypingError(msg=msg.format(ufunc.__name__, len(args), nin))
if len(args) > nargs:
msg = "ufunc '{0}': too many arguments ({1} found, {2} maximum)"
raise TypingError(msg=msg.format(ufunc.__name__, len(args), nargs))
args = [a.as_array if isinstance(a, types.ArrayCompatible) else a
for a in args]
arg_ndims = [a.ndim if isinstance(a, types.ArrayCompatible) else 0
for a in args]
ndims = max(arg_ndims)
# explicit outputs must be arrays (no explicit scalar return values supported)
explicit_outputs = args[nin:]
if not all(isinstance(output, types.ArrayCompatible)
for output in explicit_outputs):
msg = "ufunc '{0}' called with an explicit output that is not an array"
raise TypingError(msg=msg.format(ufunc.__name__))
if not all(output.mutable for output in explicit_outputs):
msg = "ufunc '{0}' called with an explicit output that is read-only"
raise TypingError(msg=msg.format(ufunc.__name__))
# find the kernel to use, based only in the input types (as does NumPy)
base_types = [x.dtype if isinstance(x, types.ArrayCompatible) else x
for x in args]
# Figure out the output array layout, if needed.
layout = None
if ndims > 0 and (len(explicit_outputs) < ufunc.nout):
layout = 'C'
layouts = [x.layout if isinstance(x, types.ArrayCompatible) else ''
for x in args]
# Prefer C contig if any array is C contig.
# Next, prefer F contig.
# Defaults to C contig if not layouts are C/F.
if 'C' not in layouts and 'F' in layouts:
layout = 'F'
return base_types, explicit_outputs, ndims, layout
@property
def ufunc(self):
return self.key
def generic(self, args, kws):
# First, strip optional types, ufunc loops are typed on concrete types
args = [x.type if isinstance(x, types.Optional) else x for x in args]
ufunc = self.ufunc
base_types, explicit_outputs, ndims, layout = self._handle_inputs(
ufunc, args, kws)
ufunc_loop = ufunc_find_matching_loop(ufunc, base_types)
if ufunc_loop is None:
raise TypingError("can't resolve ufunc {0} for types {1}".format(ufunc.__name__, args))
# check if all the types involved in the ufunc loop are supported in this mode
if not supported_ufunc_loop(ufunc, ufunc_loop):
msg = "ufunc '{0}' using the loop '{1}' not supported in this mode"
raise TypingError(msg=msg.format(ufunc.__name__, ufunc_loop.ufunc_sig))
# if there is any explicit output type, check that it is valid
explicit_outputs_np = [as_dtype(tp.dtype) for tp in explicit_outputs]
# Numpy will happily use unsafe conversions (although it will actually warn)
if not all (np.can_cast(fromty, toty, 'unsafe') for (fromty, toty) in
zip(ufunc_loop.numpy_outputs, explicit_outputs_np)):
msg = "ufunc '{0}' can't cast result to explicit result type"
raise TypingError(msg=msg.format(ufunc.__name__))
# A valid loop was found that is compatible. The result of type inference should
# be based on the explicit output types, and when not available with the type given
# by the selected NumPy loop
out = list(explicit_outputs)
implicit_output_count = ufunc.nout - len(explicit_outputs)
if implicit_output_count > 0:
# XXX this is sometimes wrong for datetime64 and timedelta64,
# as ufunc_find_matching_loop() doesn't do any type inference
ret_tys = ufunc_loop.outputs[-implicit_output_count:]
if ndims > 0:
assert layout is not None
# If either of the types involved in the ufunc operation have a
# __array_ufunc__ method then invoke the first such one to
# determine the output type of the ufunc.
array_ufunc_type = None
for a in args:
if hasattr(a, "__array_ufunc__"):
array_ufunc_type = a
break
output_type = types.Array
if array_ufunc_type is not None:
output_type = array_ufunc_type.__array_ufunc__(ufunc, "__call__", *args, **kws)
if output_type is NotImplemented:
msg = (f"unsupported use of ufunc {ufunc} on "
f"{array_ufunc_type}")
# raise TypeError here because
# NumpyRulesArrayOperator.generic is capturing
# TypingError
raise NumbaTypeError(msg)
elif not issubclass(output_type, types.Array):
msg = (f"ufunc {ufunc} on {array_ufunc_type}"
f"cannot return non-array {output_type}")
# raise TypeError here because
# NumpyRulesArrayOperator.generic is capturing
# TypingError
raise NumbaTypeError(msg)
ret_tys = [output_type(dtype=ret_ty, ndim=ndims, layout=layout)
for ret_ty in ret_tys]
ret_tys = [resolve_output_type(self.context, args, ret_ty)
for ret_ty in ret_tys]
out.extend(ret_tys)
return _ufunc_loop_sig(out, args)
| Numpy_rules_ufunc |
python | tensorflow__tensorflow | tensorflow/python/ops/variable_scope.py | {
"start": 2037,
"end": 6470
} | class ____:
"""Holds partition info used by initializer functions."""
__slots__ = ["_full_shape", "_var_offset"]
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, (list, tuple)):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, (list, tuple)):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, (tuple, list)):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in range(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in range(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
| _PartitionInfo |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 9836,
"end": 11197
} | class ____(ContextWrappingVariable):
"""represents torch._functorch.pyfunction.temporarily_pop_interpreter_stack()"""
@staticmethod
def create(
tx: "InstructionTranslator", target_values: Any, **kwargs: Any
) -> "TemporarilyPopInterpreterStackCtxManagerVariable":
return TemporarilyPopInterpreterStackCtxManagerVariable(
target_values=target_values,
initial_values=None,
**kwargs,
)
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
self.saved = torch._C._functorch.pop_dynamic_layer_stack()
self.set_cleanup_hook(
tx,
lambda: torch._C._functorch.push_dynamic_layer_stack(self.saved),
)
self.proxy = tx.output.create_node(
"call_function",
torch._C._functorch.pop_dynamic_layer_stack,
(),
{},
)
return variables.ConstantVariable.create(None)
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
self.cleanup()
tx.output.create_node(
"call_function",
torch._C._functorch.push_dynamic_layer_stack,
(self.proxy,),
{},
)
return variables.ConstantVariable.create(None)
| TemporarilyPopInterpreterStackCtxManagerVariable |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/test_imports.py | {
"start": 2400,
"end": 12782
} | class ____ (unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, value, types):
if not isinstance(value, types):
self.fail("%r is not an instance of %r"%(value, types))
def setUp(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-relimport')
self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
#self.mf.debug = 999
self.script_name = os.path.join(root, 'script.py')
self.mf.add_script(self.script_name)
def testGraphStructure(self):
# 1. Script to imported modules
n = self.mf.find_node(self.script_name)
self.assertIsInstance(n, modulegraph.Script)
imported = ('mod', 'pkg', 'pkg.mod', 'pkg.oldstyle',
'pkg.relative', 'pkg.toplevel', 'pkg.subpkg.relative',
'pkg.subpkg.relative2', 'pkg.subpkg.mod2')
for nm in imported:
n2 = self.mf.find_node(nm)
ed = self.mf.edgeData(n, n2)
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
refs = self.mf.outgoing(n)
self.assertEqual(set(refs), set(self.mf.find_node(nm) for nm in imported))
refs = list(self.mf.incoming(n))
# The script is a toplevel item and is therefore referred to from the graph root (aka 'None')
# FIXME fails since PyInstaller skips edges pointing to the current
# graph, see change 49c725e9f5a79b65923b8e1bfdd794f0f6f7c4bf
#self.assertEqual(refs, [None])
# 2. 'mod'
n = self.mf.find_node('mod')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = list(self.mf.outgoing(n))
self.assertEqual(refs, [])
#refs = list(self.mf.incoming(n))
#self.assertEquals(refs, [])
# 3. 'pkg'
n = self.mf.find_node('pkg')
self.assertIsInstance(n, modulegraph.Package)
refs = list(self.mf.outgoing(n))
self.maxDiff = None
self.assertEqual(refs, [n])
#refs = list(self.mf.incoming(n))
#self.assertEquals(refs, [])
# 4. pkg.mod
n = self.mf.find_node('pkg.mod')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([self.mf.find_node('pkg')]))
ed = self.mf.edgeData(n, self.mf.find_node('pkg'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
# 5. pkg.oldstyle
n = self.mf.find_node('pkg.oldstyle')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
n2 = self.mf.find_node('mod')
self.assertEqual(refs, set([self.mf.find_node('pkg'), n2]))
ed = self.mf.edgeData(n, n2)
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
# 6. pkg.relative
n = self.mf.find_node('pkg.relative')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([self.mf.find_node('__future__'), self.mf.find_node('pkg'), self.mf.find_node('pkg.mod')]))
ed = self.mf.edgeData(n, self.mf.find_node('pkg.mod'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=True, conditional=False, function=False, tryexcept=False))
ed = self.mf.edgeData(n, self.mf.find_node('__future__'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
#ed = self.mf.edgeData(n, self.mf.find_node('__future__.absolute_import'))
#self.assertIsInstance(ed, modulegraph.DependencyInfo)
#self.assertEqual(ed, modulegraph.DependencyInfo(
#fromlist=True, conditional=False, function=False, tryexcept=False))
# 7. pkg.toplevel
n = self.mf.find_node('pkg.toplevel')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([self.mf.find_node('__future__'), self.mf.find_node('pkg'), self.mf.find_node('mod')]))
ed = self.mf.edgeData(n, self.mf.find_node('mod'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
ed = self.mf.edgeData(n, self.mf.find_node('__future__'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
#ed = self.mf.edgeData(n, self.mf.find_node('__future__.absolute_import'))
#self.assertIsInstance(ed, modulegraph.DependencyInfo)
#self.assertEqual(ed, modulegraph.DependencyInfo(
#fromlist=True, conditional=False, function=False, tryexcept=False))
# 8. pkg.subpkg
n = self.mf.find_node('pkg.subpkg')
self.assertIsInstance(n, modulegraph.Package)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([self.mf.find_node('pkg')]))
ed = self.mf.edgeData(n, self.mf.find_node('pkg'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
# 9. pkg.subpkg.relative
n = self.mf.find_node('pkg.subpkg.relative')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([self.mf.find_node('__future__'), self.mf.find_node('pkg'), self.mf.find_node('pkg.subpkg'), self.mf.find_node('pkg.mod')]))
ed = self.mf.edgeData(n, self.mf.find_node('pkg.subpkg'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=False, conditional=False, function=False, tryexcept=False))
ed = self.mf.edgeData(n, self.mf.find_node('pkg.mod'))
self.assertIsInstance(ed, modulegraph.DependencyInfo)
self.assertEqual(ed, modulegraph.DependencyInfo(
fromlist=True, conditional=False, function=False, tryexcept=False))
# 10. pkg.subpkg.relative2
n = self.mf.find_node('pkg.subpkg.relative2')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([self.mf.find_node('pkg.subpkg'), self.mf.find_node('pkg.relimport'), self.mf.find_node('__future__')]))
# 10. pkg.subpkg.mod2
n = self.mf.find_node('pkg.subpkg.mod2')
self.assertIsInstance(n, modulegraph.SourceModule)
refs = set(self.mf.outgoing(n))
self.assertEqual(refs, set([
self.mf.find_node('__future__'),
self.mf.find_node('pkg.subpkg'),
self.mf.find_node('pkg.sub2.mod'),
self.mf.find_node('pkg.sub2'),
]))
def testRootModule(self):
node = self.mf.find_node('mod')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'mod')
def testRootPkg(self):
node = self.mf.find_node('pkg')
self.assertIsInstance(node, modulegraph.Package)
self.assertEqual(node.identifier, 'pkg')
def testSubModule(self):
node = self.mf.find_node('pkg.mod')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.mod')
def testOldStyle(self):
node = self.mf.find_node('pkg.oldstyle')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.oldstyle')
sub = [ n for n in self.mf.get_edges(node)[0] if n.identifier != '__future__' ][0]
self.assertEqual(sub.identifier, 'mod')
def testNewStyle(self):
node = self.mf.find_node('pkg.toplevel')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.toplevel')
sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__')][0]
self.assertEqual(sub.identifier, 'mod')
def testRelativeImport(self):
node = self.mf.find_node('pkg.relative')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.relative')
sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__') ][0]
self.assertIsInstance(sub, modulegraph.Package)
self.assertEqual(sub.identifier, 'pkg')
node = self.mf.find_node('pkg.subpkg.relative')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.subpkg.relative')
sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__') ][0]
self.assertIsInstance(sub, modulegraph.Package)
self.assertEqual(sub.identifier, 'pkg')
node = self.mf.find_node('pkg.subpkg.mod2')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.subpkg.mod2')
sub = [ n for n in self.mf.get_edges(node)[0] if not n.identifier.startswith('__future__') ][0]
self.assertIsInstance(sub, modulegraph.SourceModule)
self.assertEqual(sub.identifier, 'pkg.sub2.mod')
node = self.mf.find_node('pkg.subpkg.relative2')
self.assertIsInstance(node, modulegraph.SourceModule)
self.assertEqual(node.identifier, 'pkg.subpkg.relative2')
node = self.mf.find_node('pkg.relimport')
self.assertIsInstance(node, modulegraph.SourceModule)
| TestModuleGraphImport |
python | huggingface__transformers | tests/tokenization/test_tokenization_utils.py | {
"start": 1238,
"end": 15710
} | class ____(unittest.TestCase):
def check_tokenizer_from_pretrained(self, tokenizer_class):
# max_model_input_sizes is a legacy attribute that may not exist on all tokenizers
if not hasattr(tokenizer_class, "max_model_input_sizes"):
return
s3_models = list(tokenizer_class.max_model_input_sizes.keys())
for model_name in s3_models[:1]:
tokenizer = tokenizer_class.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, tokenizer_class)
self.assertIsInstance(tokenizer, PythonBackend)
for special_tok in tokenizer.all_special_tokens:
self.assertIsInstance(special_tok, str)
special_tok_id = tokenizer.convert_tokens_to_ids(special_tok)
self.assertIsInstance(special_tok_id, int)
@slow
def test_pretrained_tokenizers(self):
self.check_tokenizer_from_pretrained(GPT2Tokenizer)
def test_tensor_type_from_str(self):
self.assertEqual(TensorType("pt"), TensorType.PYTORCH)
self.assertEqual(TensorType("np"), TensorType.NUMPY)
@require_tokenizers
def test_batch_encoding_word_to_tokens(self):
tokenizer_r = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
encoded = tokenizer_r(["Test", "\xad", "test"], is_split_into_words=True)
self.assertEqual(encoded.word_to_tokens(0), TokenSpan(start=1, end=2))
self.assertEqual(encoded.word_to_tokens(1), None)
self.assertEqual(encoded.word_to_tokens(2), TokenSpan(start=2, end=3))
def test_batch_encoding_with_labels(self):
batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type="np")
self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
self.assertEqual(tensor_batch["labels"].shape, (2,))
# test converting the converted
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type="np")
self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
tensor_batch = batch.convert_to_tensors(tensor_type="np", prepend_batch_axis=True)
self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
self.assertEqual(tensor_batch["labels"].shape, (1,))
@require_torch
def test_batch_encoding_with_labels_pt(self):
batch = BatchEncoding({"inputs": [[1, 2, 3], [4, 5, 6]], "labels": [0, 1]})
tensor_batch = batch.convert_to_tensors(tensor_type="pt")
self.assertEqual(tensor_batch["inputs"].shape, (2, 3))
self.assertEqual(tensor_batch["labels"].shape, (2,))
# test converting the converted
with CaptureStderr() as cs:
tensor_batch = batch.convert_to_tensors(tensor_type="pt")
self.assertFalse(len(cs.err), msg=f"should have no warning, but got {cs.err}")
batch = BatchEncoding({"inputs": [1, 2, 3], "labels": 0})
tensor_batch = batch.convert_to_tensors(tensor_type="pt", prepend_batch_axis=True)
self.assertEqual(tensor_batch["inputs"].shape, (1, 3))
self.assertEqual(tensor_batch["labels"].shape, (1,))
def test_padding_accepts_tensors(self):
features = [{"input_ids": np.array([0, 1, 2])}, {"input_ids": np.array([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch["input_ids"], np.ndarray))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors="np")
self.assertTrue(isinstance(batch["input_ids"], np.ndarray))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
@require_tokenizers
def test_decoding_single_token(self):
for tokenizer_class in [BertTokenizer, BertTokenizer]:
with self.subTest(f"{tokenizer_class}"):
tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased")
token_id = 2300
decoded_flat = tokenizer.decode(token_id)
decoded_list = tokenizer.decode([token_id])
self.assertEqual(decoded_flat, "Force")
self.assertEqual(decoded_list, "Force")
token_id = 0
decoded_flat = tokenizer.decode(token_id)
decoded_list = tokenizer.decode([token_id])
self.assertEqual(decoded_flat, "[PAD]")
self.assertEqual(decoded_list, "[PAD]")
last_item_id = tokenizer.vocab_size - 1
decoded_flat = tokenizer.decode(last_item_id)
decoded_list = tokenizer.decode([last_item_id])
self.assertEqual(decoded_flat, "##:")
self.assertEqual(decoded_list, "##:")
def test_extra_special_tokens_multimodal(self):
attribute_special_tokens_list = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
llama_tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b")
llama_tokenizer.extra_special_tokens = {
"boi_token": "<image_start>",
"eoi_token": "<image_end>",
"image_token": "<image>",
}
multimodal_special_tokens_list = attribute_special_tokens_list + ["boi_token", "eoi_token", "image_token"]
self.assertListEqual(llama_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, multimodal_special_tokens_list)
with tempfile.TemporaryDirectory() as tmpdirname:
llama_tokenizer.save_pretrained(tmpdirname)
# load back and check we have extra special tokens set
loaded_tokenizer = LlamaTokenizer.from_pretrained(tmpdirname)
multimodal_special_tokens_list = attribute_special_tokens_list + ["boi_token", "eoi_token", "image_token"]
self.assertListEqual(loaded_tokenizer.SPECIAL_TOKENS_ATTRIBUTES, multimodal_special_tokens_list)
# We set an image_token_id before, so we can get an "image_token" as str that matches the id
self.assertTrue(loaded_tokenizer.image_token == "<image>")
self.assertTrue(loaded_tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>"))
# save one more time and make sure the image token can get loaded back
with tempfile.TemporaryDirectory() as tmpdirname:
loaded_tokenizer.save_pretrained(tmpdirname)
loaded_tokenizer_with_extra_tokens = LlamaTokenizer.from_pretrained(tmpdirname)
self.assertTrue(loaded_tokenizer_with_extra_tokens.image_token == "<image>")
# test that we can also indicate extra tokens during load time
extra_special_tokens = {
"boi_token": "<image_start>",
"eoi_token": "<image_end>",
"image_token": "<image>",
}
tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", extra_special_tokens=extra_special_tokens)
self.assertTrue(tokenizer.image_token == "<image>")
self.assertTrue(tokenizer.image_token_id == loaded_tokenizer.convert_tokens_to_ids("<image>"))
@require_tokenizers
def test_decoding_skip_special_tokens(self):
for tokenizer_class in [BertTokenizer, BertTokenizer]:
with self.subTest(f"{tokenizer_class}"):
tokenizer = tokenizer_class.from_pretrained("google-bert/bert-base-cased")
tokenizer.add_tokens(["ஐ"], special_tokens=True)
# test special token with other tokens, skip the special tokens
sentence = "This is a beautiful flower ஐ"
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=True)
self.assertEqual(decoded_sent, "This is a beautiful flower")
# test special token with other tokens, do not skip the special tokens
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=False)
self.assertEqual(decoded_sent, "[CLS] This is a beautiful flower ஐ [SEP]")
# test special token stand alone, skip the special tokens
sentence = "ஐ"
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=True)
self.assertEqual(decoded_sent, "")
# test special token stand alone, do not skip the special tokens
ids = tokenizer(sentence)["input_ids"]
decoded_sent = tokenizer.decode(ids, skip_special_tokens=False)
self.assertEqual(decoded_sent, "[CLS] ஐ [SEP]")
# test single special token alone, skip
pad_id = 0
decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=True)
self.assertEqual(decoded_sent, "")
# test single special token alone, do not skip
decoded_sent = tokenizer.decode(pad_id, skip_special_tokens=False)
self.assertEqual(decoded_sent, "[PAD]")
@require_torch
def test_padding_accepts_tensors_pt(self):
import torch
features = [{"input_ids": torch.tensor([0, 1, 2])}, {"input_ids": torch.tensor([0, 1, 2, 3])}]
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased")
batch = tokenizer.pad(features, padding=True)
self.assertTrue(isinstance(batch["input_ids"], torch.Tensor))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
batch = tokenizer.pad(features, padding=True, return_tensors="pt")
self.assertTrue(isinstance(batch["input_ids"], torch.Tensor))
self.assertEqual(batch["input_ids"].tolist(), [[0, 1, 2, tokenizer.pad_token_id], [0, 1, 2, 3]])
@require_tokenizers
def test_instantiation_from_tokenizers(self):
bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
PreTrainedTokenizerFast(tokenizer_object=bert_tokenizer)
@require_tokenizers
def test_instantiation_from_tokenizers_json_file(self):
bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
with tempfile.TemporaryDirectory() as tmpdirname:
bert_tokenizer.save(os.path.join(tmpdirname, "tokenizer.json"))
PreTrainedTokenizerFast(tokenizer_file=os.path.join(tmpdirname, "tokenizer.json"))
def test_len_tokenizer(self):
for tokenizer_class in [BertTokenizer, BertTokenizer]:
with self.subTest(f"{tokenizer_class}"):
tokenizer = tokenizer_class.from_pretrained("bert-base-uncased")
added_tokens_size = len(tokenizer.added_tokens_decoder)
self.assertEqual(len(tokenizer), tokenizer.vocab_size)
tokenizer.add_tokens(["<test_token>"])
self.assertEqual(len(tokenizer), tokenizer.vocab_size + 1)
self.assertEqual(len(tokenizer.added_tokens_decoder), added_tokens_size + 1)
self.assertEqual(len(tokenizer.added_tokens_encoder), added_tokens_size + 1)
@require_sentencepiece
def test_sentencepiece_cohabitation(self):
from sentencepiece import sentencepiece_model_pb2 as _original_protobuf # noqa: F401
from transformers.convert_slow_tokenizer import import_protobuf # noqa: F401
# Now this will try to import sentencepiece_model_pb2_new.py. This should not fail even if the protobuf
# was already imported.
import_protobuf()
def test_training_new_tokenizer_edge_cases(self):
_tokenizer = Tokenizer(tokenizers.models.BPE(vocab={"a": 1, "b": 2, "ab": 3}, merges=[("a", "b")]))
_tokenizer.pre_tokenizer = None
tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer)
toy_text_iterator = ("a" for _ in range(1000))
tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
_tokenizer.normalizer = None
tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer)
toy_text_iterator = ("a" for _ in range(1000))
tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
_tokenizer.post_processor = None
tokenizer = PreTrainedTokenizerFast(tokenizer_object=_tokenizer)
toy_text_iterator = ("a" for _ in range(1000))
tokenizer.train_new_from_iterator(text_iterator=toy_text_iterator, length=1000, vocab_size=50)
def test_encode_message(self):
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
conversation = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hey there, how are you?"},
{"role": "assistant", "content": "Thank you for asking, I am doing well"},
{"role": "user", "content": "What's the weather like today?"},
{"role": "assistant", "content": "Today the weather is nice"},
]
# First, test the default case, where we encode the whole conversation at once
whole_conversation_tokens = tokenizer.apply_chat_template(conversation, tokenize=True, return_dict=False)
# Now, test the message-by-message encoding
tokens = []
for i, message in enumerate(conversation):
tokens += tokenizer.encode_message_with_chat_template(message, conversation_history=conversation[:i])
self.assertEqual(whole_conversation_tokens, tokens)
def test_encode_message_raises_on_add_generation_prompt(self):
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
conversation = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hey there, how are you?"},
]
with self.assertRaises(ValueError):
tokenizer.encode_message_with_chat_template(conversation[0], add_generation_prompt=True)
| TokenizerUtilsTest |
python | numba__llvmlite | llvmlite/binding/module.py | {
"start": 7097,
"end": 7305
} | class ____(_Iterator):
kind = 'global'
def _dispose(self):
self._capi.LLVMPY_DisposeGlobalsIter(self)
def _next(self):
return ffi.lib.LLVMPY_GlobalsIterNext(self)
| _GlobalsIterator |
python | kamyu104__LeetCode-Solutions | Python/number-of-increasing-paths-in-a-grid.py | {
"start": 70,
"end": 1467
} | class ____(object):
def countPaths(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
in_degree = [[0]*len(grid[0]) for _ in xrange(len(grid))]
q = []
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
for di, dj in directions:
ni, nj = i+di, j+dj
if 0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[i][j] > grid[ni][nj]:
in_degree[i][j] += 1
if not in_degree[i][j]:
q.append((i, j))
dp = [[1]*len(grid[0]) for _ in xrange(len(grid))]
result = 0
while q:
new_q = []
for i, j in q:
result = (result+dp[i][j])%MOD
for di, dj in directions:
ni, nj = i+di, j+dj
if not (0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[i][j] < grid[ni][nj]):
continue
dp[ni][nj] = (dp[ni][nj]+dp[i][j])%MOD
in_degree[ni][nj] -= 1
if not in_degree[ni][nj]:
new_q.append((ni, nj))
q = new_q
return result
# Time: O(m * n)
# Space: O(m * n)
# top-down dp, memoization
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 4671,
"end": 4757
} | class ____(Protocol):
@property
def prop1(self) -> int:
return 0
| Proto15 |
python | mlflow__mlflow | mlflow/server/auth/config.py | {
"start": 143,
"end": 1036
} | class ____(NamedTuple):
default_permission: str
database_uri: str
admin_username: str
admin_password: str
authorization_function: str
def _get_auth_config_path() -> str:
return (
MLFLOW_AUTH_CONFIG_PATH.get() or Path(__file__).parent.joinpath("basic_auth.ini").resolve()
)
def read_auth_config() -> AuthConfig:
config_path = _get_auth_config_path()
config = configparser.ConfigParser()
config.read(config_path)
return AuthConfig(
default_permission=config["mlflow"]["default_permission"],
database_uri=config["mlflow"]["database_uri"],
admin_username=config["mlflow"]["admin_username"],
admin_password=config["mlflow"]["admin_password"],
authorization_function=config["mlflow"].get(
"authorization_function", "mlflow.server.auth:authenticate_request_basic_auth"
),
)
| AuthConfig |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/event.py | {
"start": 982,
"end": 1349
} | class ____(BaseEvent):
"""Event fired when page data fetch completes successfully."""
page_id: str = Field(description="ID of the page that was fetched")
document: Document = Field(description="The processed document")
metadata: Dict[str, Any] = Field(
default_factory=dict, description="Additional metadata"
)
| SNOWKBPageFetchCompletedEvent |
python | pytorch__pytorch | torch/testing/_internal/distributed/multi_threaded_pg.py | {
"start": 3429,
"end": 5070
} | class ____:
@torch.no_grad()
def work(self, data):
world_size = len(data)
for dest_rank in range(world_size):
output_buffer, _, output_split_sizes, _ = data[dest_rank]
output_indexes = self._size_cumsum(
output_buffer.size(0), output_split_sizes, world_size
)
for src_rank in range(world_size):
_, input_buffer, _, input_split_sizes = data[src_rank]
input_indexes = self._size_cumsum(
input_buffer.size(0), input_split_sizes, world_size
)
# See Note [Hide collectives mutation from autograd]
output_buffer[
output_indexes[src_rank] : output_indexes[src_rank + 1]
].detach().copy_(
input_buffer[
input_indexes[dest_rank] : input_indexes[dest_rank + 1]
]
)
def _size_cumsum(
self,
buf_size: int,
sizes: Union[torch.Tensor, list[int], None],
world_size: int,
) -> torch.Tensor:
if sizes is None or len(sizes) == 0:
sizes = torch.full((world_size,), buf_size // world_size, dtype=torch.int64)
if not isinstance(sizes, torch.Tensor):
sizes = torch.tensor(sizes, dtype=torch.int64)
assert sizes.dtype == torch.int64
sizes = torch.cumsum(
torch.cat(
(torch.tensor([0], dtype=torch.int64, device=sizes.device), sizes),
dim=0,
),
dim=0,
)
return sizes
| AllToAllBase |
python | numpy__numpy | numpy/f2py/symbolic.py | {
"start": 1294,
"end": 1613
} | class ____(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
| Op |
python | ray-project__ray | python/ray/llm/_internal/common/callbacks/base.py | {
"start": 398,
"end": 1505
} | class ____:
"""
Context object passed to all callback hooks.
Callbacks can read and modify fields as needed.
"""
worker_node_download_model: Optional["NodeModelDownloadable"] = None
"""Model download configuration for worker nodes. Used to specify how
models should be downloaded and cached on worker nodes in distributed
deployments."""
placement_group: Optional[Any] = None
"""Ray placement group for resource allocation and scheduling. Controls
where and how resources are allocated across the cluster."""
runtime_env: Optional[Dict[str, Any]] = None
"""Runtime environment configuration for the Ray workers. Includes
dependencies, environment variables, and other runtime settings."""
custom_data: Dict[str, Any] = field(default_factory=dict)
"""Flexible dictionary for callback-specific state and data. Allows
callbacks to store and share custom information during initialization."""
run_init_node: bool = True
"""Whether to run model downloads during initialization. Set to False
to skip downloading models."""
| CallbackCtx |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 17406,
"end": 17592
} | class ____(PreTrainedModel):
config: NystromformerConfig
base_model_prefix = "nystromformer"
supports_gradient_checkpointing = True
@auto_docstring
| NystromformerPreTrainedModel |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 24270,
"end": 25231
} | class ____(TestCase):
def test_serializer_class_not_provided(self):
class NoSerializerClass(generics.GenericAPIView):
pass
with pytest.raises(AssertionError) as excinfo:
NoSerializerClass().get_serializer_class()
assert str(excinfo.value) == (
"'NoSerializerClass' should either include a `serializer_class` "
"attribute, or override the `get_serializer_class()` method.")
def test_given_context_not_overridden(self):
context = object()
class View(generics.ListAPIView):
serializer_class = serializers.Serializer
def list(self, request):
response = Response()
response.serializer = self.get_serializer(context=context)
return response
response = View.as_view()(factory.get('/'))
serializer = response.serializer
assert serializer.context is context
| TestSerializer |
python | PyCQA__pylint | tests/functional/e/enum_self_defined_member_6805.py | {
"start": 317,
"end": 582
} | class ____(metaclass=Foo):
def __new__(cls):
return Parent.__new__(cls)
def __getattr__(self, item):
return item
def magic(self):
return self.dynamic
NotEnumHasDynamicGetAttrMetaclass().magic()
| NotEnumHasDynamicGetAttrMetaclass |
python | sympy__sympy | sympy/stats/joint_rv_types.py | {
"start": 4652,
"end": 8222
} | class ____(JointDistribution):
_argnames = ('mu', 'sigma')
is_Continuous=True
@property
def set(self):
k = self.mu.shape[0]
return S.Reals**k
@staticmethod
def check(mu, sigma):
_value_check(mu.shape[0] == sigma.shape[0],
"Size of the mean vector and covariance matrix are incorrect.")
#check if covariance matrix is positive semi definite or not.
if not isinstance(sigma, MatrixSymbol):
_value_check(sigma.is_positive_semidefinite,
"The covariance matrix must be positive semi definite. ")
def pdf(self, *args):
mu, sigma = self.mu, self.sigma
k = mu.shape[0]
if len(args) == 1 and args[0].is_Matrix:
args = args[0]
else:
args = ImmutableMatrix(args)
x = args - mu
density = S.One/sqrt((2*pi)**(k)*det(sigma))*exp(
Rational(-1, 2)*x.transpose()*(sigma.inv()*x))
return MatrixElement(density, 0, 0)
def _marginal_distribution(self, indices, sym):
sym = ImmutableMatrix([Indexed(sym, i) for i in indices])
_mu, _sigma = self.mu, self.sigma
k = self.mu.shape[0]
for i in range(k):
if i not in indices:
_mu = _mu.row_del(i)
_sigma = _sigma.col_del(i)
_sigma = _sigma.row_del(i)
return Lambda(tuple(sym), S.One/sqrt((2*pi)**(len(_mu))*det(_sigma))*exp(
Rational(-1, 2)*(_mu - sym).transpose()*(_sigma.inv()*\
(_mu - sym)))[0])
def MultivariateNormal(name, mu, sigma):
r"""
Creates a continuous random variable with Multivariate Normal
Distribution.
The density of the multivariate normal distribution can be found at [1].
Parameters
==========
mu : List representing the mean or the mean vector
sigma : Positive semidefinite square matrix
Represents covariance Matrix.
If `\sigma` is noninvertible then only sampling is supported currently
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import MultivariateNormal, density, marginal_distribution
>>> from sympy import symbols, MatrixSymbol
>>> X = MultivariateNormal('X', [3, 4], [[2, 1], [1, 2]])
>>> y, z = symbols('y z')
>>> density(X)(y, z)
sqrt(3)*exp(-y**2/3 + y*z/3 + 2*y/3 - z**2/3 + 5*z/3 - 13/3)/(6*pi)
>>> density(X)(1, 2)
sqrt(3)*exp(-4/3)/(6*pi)
>>> marginal_distribution(X, X[1])(y)
exp(-(y - 4)**2/4)/(2*sqrt(pi))
>>> marginal_distribution(X, X[0])(y)
exp(-(y - 3)**2/4)/(2*sqrt(pi))
The example below shows that it is also possible to use
symbolic parameters to define the MultivariateNormal class.
>>> n = symbols('n', integer=True, positive=True)
>>> Sg = MatrixSymbol('Sg', n, n)
>>> mu = MatrixSymbol('mu', n, 1)
>>> obs = MatrixSymbol('obs', n, 1)
>>> X = MultivariateNormal('X', mu, Sg)
The density of a multivariate normal can be
calculated using a matrix argument, as shown below.
>>> density(X)(obs)
(exp(((1/2)*mu.T - (1/2)*obs.T)*Sg**(-1)*(-mu + obs))/sqrt((2*pi)**n*Determinant(Sg)))[0, 0]
References
==========
.. [1] https://en.wikipedia.org/wiki/Multivariate_normal_distribution
"""
return multivariate_rv(MultivariateNormalDistribution, name, mu, sigma)
#-------------------------------------------------------------------------------
# Multivariate Laplace distribution --------------------------------------------
| MultivariateNormalDistribution |
python | python__mypy | mypyc/test/test_run.py | {
"start": 16379,
"end": 18037
} | class ____(TestRun):
"""Run the tests with strict dunder typing."""
strict_dunder_typing = True
test_name_suffix = "_dunder_typing"
files = ["run-dunders.test", "run-floats.test"]
def fix_native_line_number(message: str, fnam: str, delta: int) -> str:
"""Update code locations in test case output to point to the .test file.
The description of the test case is written to native.py, and line numbers
in test case output often are relative to native.py. This translates the
line numbers to be relative to the .test file that contains the test case
description, and also updates the file name to the .test file name.
Args:
message: message to update
fnam: path of the .test file
delta: line number of the beginning of the test case in the .test file
Returns updated message (or original message if we couldn't find anything).
"""
fnam = os.path.basename(fnam)
message = re.sub(
r"native\.py:([0-9]+):", lambda m: "%s:%d:" % (fnam, int(m.group(1)) + delta), message
)
message = re.sub(
r'"native.py", line ([0-9]+),',
lambda m: '"%s", line %d,' % (fnam, int(m.group(1)) + delta),
message,
)
return message
def copy_output_files(target_dir: str) -> None:
try:
os.mkdir(target_dir)
except OSError:
# Only copy data for the first failure, to avoid excessive output in case
# many tests fail
return
for fnam in glob.glob("build/*.[ch]"):
shutil.copy(fnam, target_dir)
sys.stderr.write(f"\nGenerated files: {target_dir} (for first failure only)\n\n")
| TestRunStrictDunderTyping |
python | pytorch__pytorch | test/test_testing.py | {
"start": 32686,
"end": 33664
} | class ____(TestCase):
@deviceCountAtLeast(1)
def test_mismatching_device(self, devices):
for actual_device, expected_device in itertools.permutations(("cpu", *devices), 2):
actual = torch.empty((), device=actual_device)
expected = actual.clone().to(expected_device)
for fn in assert_close_with_inputs(actual, expected):
with self.assertRaisesRegex(AssertionError, "device"):
fn()
@deviceCountAtLeast(1)
def test_mismatching_device_no_check(self, devices):
for actual_device, expected_device in itertools.permutations(("cpu", *devices), 2):
actual = torch.rand((), device=actual_device)
expected = actual.clone().to(expected_device)
for fn in assert_close_with_inputs(actual, expected):
fn(check_device=False)
instantiate_device_type_tests(TestAssertCloseMultiDevice, globals(), only_for="cuda")
| TestAssertCloseMultiDevice |
python | networkx__networkx | networkx/readwrite/text.py | {
"start": 221,
"end": 424
} | class ____:
@classmethod
def as_dict(cls):
return {
a: getattr(cls, a)
for a in dir(cls)
if not a.startswith("_") and a != "as_dict"
}
| BaseGlyphs |
python | doocs__leetcode | solution/0300-0399/0398.Random Pick Index/Solution.py | {
"start": 0,
"end": 469
} | class ____:
def __init__(self, nums: List[int]):
self.nums = nums
def pick(self, target: int) -> int:
n = ans = 0
for i, v in enumerate(self.nums):
if v == target:
n += 1
x = random.randint(1, n)
if x == n:
ans = i
return ans
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 59281,
"end": 59659
} | class ____(Elemwise):
_parameters = ["frame", "skipna", "ddof", "numeric_only"]
_defaults = {"skipna": True, "ddof": 1, "numeric_only": False}
_keyword_only = ["skipna", "ddof", "numeric_only"]
operation = M.var
_is_length_preserving = True
@functools.cached_property
def _kwargs(self) -> dict:
return {"axis": 1, **super()._kwargs}
| VarColumns |
python | huggingface__transformers | tests/models/gpt_neox/test_modeling_gpt_neox.py | {
"start": 11245,
"end": 14626
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
test_missing_keys = False
def setUp(self):
self.model_tester = GPTNeoXModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPTNeoXConfig, hidden_size=64, num_attention_heads=8)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(config, input_ids, input_mask)
def test_model_as_decoder(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask)
def test_model_as_decoder_with_default_input_mask(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask)
def test_decoder_model_past_large_inputs(self):
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask)
def test_model_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_model_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_model_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_model_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_cached_forward_with_and_without_attention_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_cached_forward_with_and_without_attention_mask(*config_and_inputs)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@require_torch
| GPTNeoXModelTest |
python | jina-ai__jina | jina/serve/runtimes/asyncio.py | {
"start": 1133,
"end": 15380
} | class ____:
"""
Runtime to make sure that a server can asynchronously run inside a new asynchronous loop. It will make sure that the server is run forever while handling the TERMINATE signals
to be received by the orchestrator to shutdown the server and its resources.
"""
def __init__(
self,
args: 'argparse.Namespace',
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
signal_handlers_installed_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
req_handler_cls=None,
gateway_load_balancer: bool = False,
**kwargs,
):
self.req_handler_cls = req_handler_cls
self.gateway_load_balancer = gateway_load_balancer
self.args = args
if args.name:
self.name = f'{args.name}/{self.__class__.__name__}'
else:
self.name = self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
self.is_cancel = cancel_event or asyncio.Event()
self.is_signal_handlers_installed = (
signal_handlers_installed_event or asyncio.Event()
)
self.logger.debug(f'Setting signal handlers')
if not __windows__:
def _cancel(sig):
def _inner_cancel(*args, **kwargs):
self.logger.debug(f'Received signal {sig.name}')
self.is_cancel.set(),
return _inner_cancel
for sig in HANDLED_SIGNALS:
self._loop.add_signal_handler(sig, _cancel(sig), sig, None)
else:
def _cancel(signum, frame):
self.logger.debug(f'Received signal {signum}')
self.is_cancel.set(),
for sig in HANDLED_SIGNALS:
signal.signal(sig, _cancel)
self.logger.debug(f'Signal handlers already set')
self.is_signal_handlers_installed.set()
self._start_time = time.time()
self._loop.run_until_complete(self.async_setup())
self._send_telemetry_event(event='start')
def run_forever(self):
"""
Running method to block the main thread.
Run the event loop until a Future is done.
"""
self._loop.run_until_complete(self._loop_body())
def teardown(self):
"""Call async_teardown() and stop and close the event loop."""
self._loop.run_until_complete(self.async_teardown())
self._loop.stop()
self._loop.close()
self.logger.close()
self._stop_time = time.time()
self._send_telemetry_event(
event='stop', extra_kwargs={'duration': self._stop_time - self._start_time}
)
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# threads are not using asyncio.Event, but threading.Event
if isinstance(self.is_cancel, asyncio.Event) and not hasattr(
self.server, '_should_exit'
):
await self.is_cancel.wait()
else:
while not self.is_cancel.is_set() and not getattr(
self.server, '_should_exit', False
):
await asyncio.sleep(0.1)
await self.async_teardown()
async def _loop_body(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
try:
await asyncio.gather(self.async_run_forever(), self._wait_for_cancel())
except asyncio.CancelledError:
self.logger.warning('received terminate ctrl message from main process')
def _cancel(self):
"""
Signal the runtime to terminate
"""
self.is_cancel.set()
def _get_server(self):
# construct server type based on protocol (and potentially req handler class to keep backwards compatibility)
from jina.enums import ProtocolType, ProviderType
if self.req_handler_cls.__name__ == 'GatewayRequestHandler':
self.timeout_send = self.args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
if not self.args.port:
self.args.port = random_ports(len(self.args.protocol))
_set_gateway_uses(
self.args, gateway_load_balancer=self.gateway_load_balancer
)
uses_with = self.args.uses_with or {}
non_defaults = ArgNamespace.get_non_defaults_args(
self.args, set_gateway_parser()
)
if 'title' not in non_defaults:
uses_with['title'] = self.args.title
if 'description' not in non_defaults:
uses_with['description'] = self.args.description
if 'no_debug_endpoints' not in non_defaults:
uses_with['no_debug_endpoints'] = self.args.no_debug_endpoints
if 'no_crud_endpoints' not in non_defaults:
uses_with['no_crud_endpoints'] = self.args.no_crud_endpoints
if 'expose_endpoints' not in non_defaults:
uses_with['expose_endpoints'] = self.args.expose_endpoints
if 'expose_graphql_endpoint' not in non_defaults:
uses_with['expose_graphql_endpoint'] = self.args.expose_graphql_endpoint
if 'cors' not in non_defaults:
uses_with['cors'] = self.args.cors
server = BaseGateway.load_config(
self.args.uses,
uses_with=dict(
**non_defaults,
**uses_with,
),
uses_metas={},
runtime_args={ # these are not parsed to the yaml config file but are pass directly during init
**vars(self.args),
'default_port': getattr(self.args, 'default_port', False),
'gateway_load_balancer': self.gateway_load_balancer,
'timeout_send': self.timeout_send,
},
py_modules=self.args.py_modules,
extra_search_paths=self.args.extra_search_paths,
)
if isinstance(server, BaseServer):
server.is_cancel = self.is_cancel
return server
elif (
hasattr(self.args, 'provider')
and self.args.provider == ProviderType.SAGEMAKER
):
from jina.serve.runtimes.servers.http import SagemakerHTTPServer
return SagemakerHTTPServer(
name=self.args.name,
runtime_args=self.args,
req_handler_cls=self.req_handler_cls,
proxy=getattr(self.args, 'proxy', None),
uvicorn_kwargs=getattr(self.args, 'uvicorn_kwargs', None),
ssl_keyfile=getattr(self.args, 'ssl_keyfile', None),
ssl_certfile=getattr(self.args, 'ssl_certfile', None),
cors=getattr(self.args, 'cors', None),
is_cancel=self.is_cancel,
)
elif (
hasattr(self.args, 'provider')
and self.args.provider == ProviderType.AZURE
):
from jina.serve.runtimes.servers.http import AzureHTTPServer
return AzureHTTPServer(
name=self.args.name,
runtime_args=self.args,
req_handler_cls=self.req_handler_cls,
proxy=getattr(self.args, 'proxy', None),
uvicorn_kwargs=getattr(self.args, 'uvicorn_kwargs', None),
ssl_keyfile=getattr(self.args, 'ssl_keyfile', None),
ssl_certfile=getattr(self.args, 'ssl_certfile', None),
cors=getattr(self.args, 'cors', None),
is_cancel=self.is_cancel,
)
elif not hasattr(self.args, 'protocol') or (
len(self.args.protocol) == 1 and self.args.protocol[0] == ProtocolType.GRPC
):
from jina.serve.runtimes.servers.grpc import GRPCServer
return GRPCServer(
name=self.args.name,
runtime_args=self.args,
req_handler_cls=self.req_handler_cls,
grpc_server_options=self.args.grpc_server_options,
ssl_keyfile=getattr(self.args, 'ssl_keyfile', None),
ssl_certfile=getattr(self.args, 'ssl_certfile', None),
proxy=getattr(self.args, 'proxy', None),
)
elif (
len(self.args.protocol) == 1 and self.args.protocol[0] == ProtocolType.HTTP
):
from jina.serve.runtimes.servers.http import (
HTTPServer, # we need a concrete implementation of this
)
return HTTPServer(
name=self.args.name,
runtime_args=self.args,
req_handler_cls=self.req_handler_cls,
proxy=getattr(self.args, 'proxy', None),
uvicorn_kwargs=getattr(self.args, 'uvicorn_kwargs', None),
ssl_keyfile=getattr(self.args, 'ssl_keyfile', None),
ssl_certfile=getattr(self.args, 'ssl_certfile', None),
cors=getattr(self.args, 'cors', None),
is_cancel=self.is_cancel,
)
elif (
len(self.args.protocol) == 1
and self.args.protocol[0] == ProtocolType.WEBSOCKET
):
from jina.serve.runtimes.servers.websocket import (
WebSocketServer, # we need a concrete implementation of this
)
return WebSocketServer(
name=self.args.name,
runtime_args=self.args,
req_handler_cls=self.req_handler_cls,
proxy=getattr(self.args, 'proxy', None),
uvicorn_kwargs=getattr(self.args, 'uvicorn_kwargs', None),
ssl_keyfile=getattr(self.args, 'ssl_keyfile', None),
ssl_certfile=getattr(self.args, 'ssl_certfile', None),
is_cancel=self.is_cancel,
)
elif len(self.args.protocol) > 1:
from jina.serve.runtimes.servers.composite import (
CompositeServer, # we need a concrete implementation of this
)
return CompositeServer(
name=self.args.name,
runtime_args=self.args,
req_handler_cls=self.req_handler_cls,
ssl_keyfile=getattr(self.args, 'ssl_keyfile', None),
ssl_certfile=getattr(self.args, 'ssl_certfile', None),
is_cancel=self.is_cancel,
)
def _send_telemetry_event(self, event, extra_kwargs=None):
gateway_kwargs = {}
if self.req_handler_cls.__name__ == 'WorkerRequestHandler':
runtime_cls_name = 'WorkerRuntime'
elif self.req_handler_cls.__name__ == 'HeaderRequestHandler':
runtime_cls_name = 'HeadRuntime'
else:
runtime_cls_name = self.server.__class__.__name__
gateway_kwargs['is_custom_gateway'] = self.server.__class__ not in [
CompositeGateway,
GRPCGateway,
HTTPGateway,
WebSocketGateway,
]
gateway_kwargs['protocol'] = self.args.protocol
extra_kwargs = extra_kwargs or {}
send_telemetry_event(
event=event,
obj_cls_name=runtime_cls_name,
entity_id=self._entity_id,
**gateway_kwargs,
**extra_kwargs,
)
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
if not (is_port_free(self.args.host, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.server = self._get_server()
await self.server.setup_server()
async def async_teardown(self):
"""Shutdown the server."""
await self.server.shutdown()
async def async_run_forever(self):
"""Running method of the server."""
await self.server.run_server()
self.is_cancel.set()
@property
def _entity_id(self):
import uuid
if hasattr(self, '_entity_id_'):
return self._entity_id_
self._entity_id_ = uuid.uuid1().hex
return self._entity_id_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type == RuntimeTerminated:
self.logger.debug(f'{self!r} is ended')
elif exc_type == KeyboardInterrupt:
self.logger.debug(f'{self!r} is interrupted by user')
elif exc_type and issubclass(exc_type, Exception):
self.logger.error(
(
f'{exc_val!r} during {self.run_forever!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else ''
),
exc_info=not self.args.quiet_error,
)
try:
self.teardown()
except OSError:
# OSError(Stream is closed) already
pass
except Exception as ex:
self.logger.error(
(
f'{ex!r} during {self.teardown!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else ''
),
exc_info=not self.args.quiet_error,
)
# https://stackoverflow.com/a/28158006
# return True will silent all exception stack trace here, silence is desired here as otherwise it is too
# noisy
#
# doc: If an exception is supplied, and the method wishes to suppress the exception (i.e., prevent it
# from being propagated), it should return a true value. Otherwise, the exception will be processed normally
# upon exit from this method.
return True
| AsyncNewLoopRuntime |
python | huggingface__transformers | src/transformers/models/video_llama_3/configuration_video_llama_3.py | {
"start": 1256,
"end": 4210
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`VideoLlama3VisionModel`]. It is used to instantiate a
VideoLLaMA3 vision encoder model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
VideoLLaMA3-2B [lkhl/VideoLLaMA3-2B-Image-HF](https://huggingface.co/lkhl/VideoLLaMA3-2B-Image-HF).
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "video_llama_3_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
patch_size=16,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
| VideoLlama3VisionConfig |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 36175,
"end": 37964
} | class ____(CompositeModel):
def __init__(self, dmm, fe_type):
super(GeneratorModel, self).__init__(dmm, fe_type)
# XXX Fold this in DataPacker?
self._arg_models = [self._dmm.lookup(t) for t in fe_type.arg_types
if not isinstance(t, types.Omitted)]
self._state_models = [self._dmm.lookup(t) for t in fe_type.state_types]
self._args_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._arg_models])
self._state_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._state_models])
# The whole generator closure
self._be_type = ir.LiteralStructType(
[self._dmm.lookup(types.int32).get_value_type(),
self._args_be_type, self._state_be_type])
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""
The generator closure is passed around as a reference.
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
stack = cgutils.alloca_once(builder, value.type)
builder.store(value, stack)
return stack
@register_default(types.ArrayCTypes)
| GeneratorModel |
python | networkx__networkx | networkx/algorithms/components/tests/test_weakly_connected.py | {
"start": 83,
"end": 3083
} | class ____:
@classmethod
def setup_class(cls):
cls.gc = []
G = nx.DiGraph()
G.add_edges_from(
[
(1, 2),
(2, 3),
(2, 8),
(3, 4),
(3, 7),
(4, 5),
(5, 3),
(5, 6),
(7, 4),
(7, 6),
(8, 1),
(8, 7),
]
)
C = [[3, 4, 5, 7], [1, 2, 8], [6]]
cls.gc.append((G, C))
G = nx.DiGraph()
G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)])
C = [[2, 3, 4], [1]]
cls.gc.append((G, C))
G = nx.DiGraph()
G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)])
C = [[1, 2, 3]]
cls.gc.append((G, C))
# Eppstein's tests
G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []})
C = [[0], [1], [2], [3], [4], [5], [6]]
cls.gc.append((G, C))
G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]})
C = [[0, 1, 2], [3, 4]]
cls.gc.append((G, C))
def test_weakly_connected_components(self):
for G, C in self.gc:
U = G.to_undirected()
w = {frozenset(g) for g in nx.weakly_connected_components(G)}
c = {frozenset(g) for g in nx.connected_components(U)}
assert w == c
def test_number_weakly_connected_components(self):
for G, C in self.gc:
U = G.to_undirected()
w = nx.number_weakly_connected_components(G)
c = nx.number_connected_components(U)
assert w == c
def test_is_weakly_connected(self):
for G, C in self.gc:
U = G.to_undirected()
assert nx.is_weakly_connected(G) == nx.is_connected(U)
def test_null_graph(self):
G = nx.DiGraph()
assert list(nx.weakly_connected_components(G)) == []
assert nx.number_weakly_connected_components(G) == 0
with pytest.raises(nx.NetworkXPointlessConcept):
next(nx.is_weakly_connected(G))
def test_connected_raise(self):
G = nx.Graph()
with pytest.raises(NetworkXNotImplemented):
next(nx.weakly_connected_components(G))
pytest.raises(NetworkXNotImplemented, nx.number_weakly_connected_components, G)
pytest.raises(NetworkXNotImplemented, nx.is_weakly_connected, G)
def test_connected_mutability(self):
DG = nx.path_graph(5, create_using=nx.DiGraph)
G = nx.disjoint_union(DG, DG)
seen = set()
for component in nx.weakly_connected_components(G):
assert len(seen & component) == 0
seen.update(component)
component.clear()
def test_is_weakly_connected_empty_graph_raises():
G = nx.DiGraph()
with pytest.raises(nx.NetworkXPointlessConcept, match="Connectivity is undefined"):
nx.is_weakly_connected(G)
| TestWeaklyConnected |
python | neetcode-gh__leetcode | python/0025-reverse-nodes-in-k-group.py | {
"start": 0,
"end": 772
} | class ____:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
dummy = ListNode(0, head)
groupPrev = dummy
while True:
kth = self.getKth(groupPrev, k)
if not kth:
break
groupNext = kth.next
# reverse group
prev, curr = kth.next, groupPrev.next
while curr != groupNext:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
tmp = groupPrev.next
groupPrev.next = kth
groupPrev = tmp
return dummy.next
def getKth(self, curr, k):
while curr and k > 0:
curr = curr.next
k -= 1
return curr
| Solution |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator2/shared/exported_api.py | {
"start": 1546,
"end": 3278
} | class ____(object):
"""ExportedApi is a collection of ExportedSymbols."""
_docs: set[ExportedDoc]
_symbols: set[ExportedSymbol]
def __init__(
self,
*,
docs: Iterable[ExportedDoc] = (),
symbols: Iterable[ExportedSymbol] = (),
):
self._docs = set(docs)
self._symbols = set(symbols)
def write(self, filename: str, **kwargs) -> None:
"""Writes exports to filename."""
with open(filename, mode="w", encoding="utf-8") as f:
json.dump(
{
"docs": [d._asdict() for d in sorted(self.docs)],
"symbols": [s._asdict() for s in sorted(self.symbols)],
},
f,
**kwargs,
)
def read(self, filename: str) -> None:
"""Reads exports from filename."""
with open(filename, mode="r", encoding="utf-8") as f:
data = json.load(f)
self._docs.update(ExportedDoc.create(**d) for d in data["docs"])
self._symbols.update(ExportedSymbol.create(**s) for s in data["symbols"])
def add_symbol(self, export: ExportedSymbol) -> None:
self._symbols.add(export)
def add_doc(self, export: ExportedDoc) -> None:
self._docs.add(export)
@property
def docs(self) -> Iterable[ExportedDoc]:
return self._docs
@property
def symbols(self) -> Iterable[ExportedSymbol]:
return self._symbols
def __str__(self) -> str:
return json.dumps({
"docs": [d._asdict() for d in sorted(self.docs)],
"symbols": [s._asdict() for s in sorted(self.symbols)],
})
def __repr__(self) -> str:
return str(self)
def __eq__(self, o: Any) -> bool:
return (
type(self) is type(o)
and self.docs == o.docs
and self.symbols == o.symbols
)
| ExportedApi |
python | streamlit__streamlit | lib/streamlit/elements/widgets/color_picker.py | {
"start": 1896,
"end": 10028
} | class ____:
@gather_metrics("color_picker")
def color_picker(
self,
label: str,
value: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> str:
r"""Display a color picker widget.
Parameters
----------
label : str
A short label explaining to the user what this input is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
value : str
The hex value of this widget when it first renders. If None,
defaults to black.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this color_picker's value
changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the color picker if set to
``True``. The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "content", "stretch", or int
The width of the color picker widget. This can be one of the
following:
- ``"content"`` (default): The width of the widget matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the widget matches the width of the
parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
str
The selected color as a hex string.
Example
-------
>>> import streamlit as st
>>>
>>> color = st.color_picker("Pick A Color", "#00f900")
>>> st.write("The current color is", color)
.. output::
https://doc-color-picker.streamlit.app/
height: 335px
"""
ctx = get_script_run_ctx()
return self._color_picker(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
width=width,
ctx=ctx,
)
def _color_picker(
self,
label: str,
value: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
ctx: ScriptRunContext | None = None,
) -> str:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=value,
)
maybe_raise_label_warnings(label, label_visibility)
# Enforce minimum width of 40px to match the color block's intrinsic size.
# The color block is always 40x40px, so the widget should never be smaller.
min_width_px = 40
if isinstance(width, int) and width < min_width_px:
width = min_width_px
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
element_id = compute_and_register_element_id(
"color_picker",
user_key=key,
key_as_main_identity=True,
dg=self.dg,
label=label,
value=str(value),
help=help,
width=width,
)
# set value default
if value is None:
value = "#000000"
# make sure the value is a string
if not isinstance(value, str):
raise StreamlitAPIException(f"""
Color Picker Value has invalid type: {type(value).__name__}. Expects a hex string
like '#00FFAA' or '#000'.
""")
# validate the value and expects a hex string
match = re.match(r"^#(?:[0-9a-fA-F]{3}){1,2}$", value)
if not match:
raise StreamlitAPIException(f"""
'{value}' is not a valid hex code for colors. Valid ones are like
'#00FFAA' or '#000'.
""")
color_picker_proto = ColorPickerProto()
color_picker_proto.id = element_id
color_picker_proto.label = label
color_picker_proto.default = str(value)
color_picker_proto.form_id = current_form_id(self.dg)
color_picker_proto.disabled = disabled
color_picker_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
color_picker_proto.help = dedent(help)
serde = ColorPickerSerde(value)
widget_state = register_widget(
color_picker_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="string_value",
)
if widget_state.value_changed:
color_picker_proto.value = widget_state.value
color_picker_proto.set_value = True
self.dg._enqueue(
"color_picker", color_picker_proto, layout_config=layout_config
)
return widget_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| ColorPickerMixin |
python | pydata__xarray | xarray/coding/common.py | {
"start": 1709,
"end": 5083
} | class ____(indexing.ExplicitlyIndexedNDArrayMixin):
"""Lazily computed array holding values of elemwise-function.
Do not construct this object directly: call lazy_elemwise_func instead.
Values are computed upon indexing or coercion to a NumPy array.
"""
def __init__(self, array, func: Callable, dtype: np.typing.DTypeLike | None):
assert not is_chunked_array(array)
self.array = indexing.as_indexable(array)
self.func = func
self._dtype = dtype
@property
def dtype(self) -> np.dtype:
return np.dtype(self._dtype)
def transpose(self, order):
# For elementwise functions, we can compose transpose and function application
return type(self)(self.array.transpose(order), self.func, self.dtype)
def _oindex_get(self, key):
return type(self)(self.array.oindex[key], self.func, self.dtype)
def _vindex_get(self, key):
return type(self)(self.array.vindex[key], self.func, self.dtype)
def __getitem__(self, key):
return type(self)(self.array[key], self.func, self.dtype)
def get_duck_array(self):
return self.func(self.array.get_duck_array())
async def async_get_duck_array(self):
return self.func(await self.array.async_get_duck_array())
def __repr__(self) -> str:
return f"{type(self).__name__}({self.array!r}, func={self.func!r}, dtype={self.dtype!r})"
def lazy_elemwise_func(array, func: Callable, dtype: np.typing.DTypeLike | None):
"""Lazily apply an element-wise function to an array.
Parameters
----------
array : any valid value of Variable._data
func : callable
Function to apply to indexed slices of an array. For use with dask,
this should be a pickle-able object.
dtype : coercible to np.dtype
Dtype for the result of this function.
Returns
-------
Either a dask.array.Array or _ElementwiseFunctionArray.
"""
if is_chunked_array(array):
chunkmanager = get_chunked_array_type(array)
return chunkmanager.map_blocks(func, array, dtype=dtype) # type: ignore[arg-type]
else:
return _ElementwiseFunctionArray(array, func, dtype)
def safe_setitem(dest, key: Hashable, value, name: T_Name = None):
if key in dest:
var_str = f" on variable {name!r}" if name else ""
raise ValueError(
f"failed to prevent overwriting existing key {key} in attrs{var_str}. "
"This is probably an encoding field used by xarray to describe "
"how a variable is serialized. To proceed, remove this key from "
"the variable's attributes manually."
)
dest[key] = value
def pop_to(
source: MutableMapping, dest: MutableMapping, key: Hashable, name: T_Name = None
) -> Any:
"""
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already exists in dest an
error is raised.
"""
value = source.pop(key, None)
if value is not None:
safe_setitem(dest, key, value, name=name)
return value
def unpack_for_encoding(var: Variable) -> T_VarTuple:
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def unpack_for_decoding(var: Variable) -> T_VarTuple:
return var.dims, var._data, var.attrs.copy(), var.encoding.copy()
| _ElementwiseFunctionArray |
python | pyparsing__pyparsing | examples/adventureEngine.py | {
"start": 6888,
"end": 7605
} | class ____(Command):
def __init__(self, quals):
super().__init__("EXAMINE", "examining")
self.subject = Item.items[quals.item]
@staticmethod
def help_description():
return "EXAMINE or EX or X - look closely at an object"
def _do_command(self, player):
msg = random.choice(
[
"It's {}.",
"It's just {}.",
"It's a beautiful {1}.",
"It's a rare and beautiful {1}.",
"It's a rare {1}.",
"Just {}, nothing special...",
"{0}, just {0}."
]
)
print(msg.format(a_or_an(self.subject), self.subject).capitalize())
| ExamineCommand |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/__main__.py | {
"start": 744,
"end": 8499
} | class ____:
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
for y in range(0, 5):
for x in range(options.max_width):
h = x / options.max_width
l = 0.1 + ((y / 5) * 0.7)
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
yield Segment.line()
def __rich_measure__(
self, console: "Console", options: ConsoleOptions
) -> Measurement:
return Measurement(1, options.max_width)
def make_test_card() -> Table:
"""Get a renderable that demonstrates a number of features."""
table = Table.grid(padding=1, pad_edge=True)
table.title = "Rich features"
table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
table.add_column("Demonstration")
color_table = Table(
box=None,
expand=False,
show_header=False,
show_edge=False,
pad_edge=False,
)
color_table.add_row(
(
"✓ [bold green]4-bit color[/]\n"
"✓ [bold blue]8-bit color[/]\n"
"✓ [bold magenta]Truecolor (16.7 million)[/]\n"
"✓ [bold yellow]Dumb terminals[/]\n"
"✓ [bold cyan]Automatic color conversion"
),
ColorBox(),
)
table.add_row("Colors", color_table)
table.add_row(
"Styles",
"All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
)
lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
lorem_table = Table.grid(padding=1, collapse_padding=True)
lorem_table.pad_edge = False
lorem_table.add_row(
Text(lorem, justify="left", style="green"),
Text(lorem, justify="center", style="yellow"),
Text(lorem, justify="right", style="blue"),
Text(lorem, justify="full", style="red"),
)
table.add_row(
"Text",
Group(
Text.from_markup(
"""Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
),
lorem_table,
),
)
def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
table = Table(show_header=False, pad_edge=False, box=None, expand=True)
table.add_column("1", ratio=1)
table.add_column("2", ratio=1)
table.add_row(renderable1, renderable2)
return table
table.add_row(
"Asian\nlanguage\nsupport",
":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
)
markup_example = (
"[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
)
table.add_row("Markup", markup_example)
example_table = Table(
show_edge=False,
show_header=True,
expand=False,
row_styles=["none", "dim"],
box=box.SIMPLE,
)
example_table.add_column("[green]Date", style="green", no_wrap=True)
example_table.add_column("[blue]Title", style="blue")
example_table.add_column(
"[cyan]Production Budget",
style="cyan",
justify="right",
no_wrap=True,
)
example_table.add_column(
"[magenta]Box Office",
style="magenta",
justify="right",
no_wrap=True,
)
example_table.add_row(
"Dec 20, 2019",
"Star Wars: The Rise of Skywalker",
"$275,000,000",
"$375,126,118",
)
example_table.add_row(
"May 25, 2018",
"[b]Solo[/]: A Star Wars Story",
"$275,000,000",
"$393,151,347",
)
example_table.add_row(
"Dec 15, 2017",
"Star Wars Ep. VIII: The Last Jedi",
"$262,000,000",
"[bold]$1,332,539,889[/bold]",
)
example_table.add_row(
"May 19, 1999",
"Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
"$115,000,000",
"$1,027,044,677",
)
table.add_row("Tables", example_table)
code = '''\
def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value'''
pretty_data = {
"foo": [
3.1427,
(
"Paul Atreides",
"Vladimir Harkonnen",
"Thufir Hawat",
),
],
"atomic": (False, True, None),
}
table.add_row(
"Syntax\nhighlighting\n&\npretty\nprinting",
comparison(
Syntax(code, "python3", line_numbers=True, indent_guides=True),
Pretty(pretty_data, indent_guides=True),
),
)
markdown_example = """\
# Markdown
Supports much of the *markdown* __syntax__!
- Headers
- Basic formatting: **bold**, *italic*, `code`
- Block quotes
- Lists, and more...
"""
table.add_row(
"Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
)
table.add_row(
"+more!",
"""Progress bars, columns, styled logging handler, tracebacks, etc...""",
)
return table
if __name__ == "__main__": # pragma: no cover
console = Console(
file=io.StringIO(),
force_terminal=True,
)
test_card = make_test_card()
# Print once to warm cache
start = process_time()
console.print(test_card)
pre_cache_taken = round((process_time() - start) * 1000.0, 1)
console.file = io.StringIO()
start = process_time()
console.print(test_card)
taken = round((process_time() - start) * 1000.0, 1)
c = Console(record=True)
c.print(test_card)
print(f"rendered in {pre_cache_taken}ms (cold cache)")
print(f"rendered in {taken}ms (warm cache)")
from pipenv.patched.pip._vendor.rich.panel import Panel
console = Console()
sponsor_message = Table.grid(padding=1)
sponsor_message.add_column(style="green", justify="right")
sponsor_message.add_column(no_wrap=True)
sponsor_message.add_row(
"Textualize",
"[u blue link=https://github.com/textualize]https://github.com/textualize",
)
sponsor_message.add_row(
"Twitter",
"[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
)
intro_message = Text.from_markup(
"""\
We hope you enjoy using Rich!
Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/]
- Will McGugan"""
)
message = Table.grid(padding=2)
message.add_column()
message.add_column(no_wrap=True)
message.add_row(intro_message, sponsor_message)
console.print(
Panel.fit(
message,
box=box.ROUNDED,
padding=(1, 2),
title="[b red]Thanks for trying out Rich!",
border_style="bright_blue",
),
justify="center",
)
| ColorBox |
python | pallets__jinja | src/jinja2/utils.py | {
"start": 22949,
"end": 23239
} | class ____:
"""A joining helper for templates."""
def __init__(self, sep: str = ", ") -> None:
self.sep = sep
self.used = False
def __call__(self) -> str:
if not self.used:
self.used = True
return ""
return self.sep
| Joiner |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/components/workspace_component/component.py | {
"start": 6652,
"end": 7898
} | class ____(dg.Model):
by_id: Annotated[
Sequence[str],
pydantic.Field(..., description="A list of connection IDs to include in the collection."),
]
def resolve_connection_selector(
context: dg.ResolutionContext, model
) -> Optional[Callable[[AirbyteConnection], bool]]:
if isinstance(model, str):
model = context.resolve_value(model)
if isinstance(model, AirbyteConnectionSelectorByName):
return lambda connection: connection.name in model.by_name
elif isinstance(model, AirbyteConnectionSelectorById):
return lambda connection: connection.id in model.by_id
else:
check.failed(f"Unknown connection target type: {type(model)}")
def resolve_airbyte_workspace_type(context: dg.ResolutionContext, model):
if isinstance(model, AirbyteWorkspaceModel):
return AirbyteWorkspace(**resolve_fields(model, AirbyteWorkspaceModel, context))
elif isinstance(model, AirbyteCloudWorkspaceModel):
return AirbyteCloudWorkspace(**resolve_fields(model, AirbyteCloudWorkspaceModel, context))
else:
check.failed(f"Unknown Airbyte workspace type: {type(model)}")
@public
@dg.scaffold_with(AirbyteWorkspaceComponentScaffolder)
| AirbyteConnectionSelectorById |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_validators.py | {
"start": 3420,
"end": 3773
} | class ____(BaseDataConditionGroupValidator):
conditions = serializers.ListField(required=True)
def validate_conditions(self, value: list[dict[str, Any]]) -> list[dict[str, Any]]:
for condition in value:
MockDataConditionValidator(data=condition).is_valid(raise_exception=True)
return value
| MockConditionGroupValidator |
python | google__pytype | pytype/tools/environment_test.py | {
"start": 2092,
"end": 2772
} | class ____(unittest.TestCase):
"""Tests for {do_x}_or_die() methods.
Since whether these functions complete successfully depends on one's
particular environment, these tests allow either succeeding or raising
SystemExit. Any other exception will cause a test failure.
"""
def _test(self, method, *args):
try:
method(*args)
except SystemExit:
pass
def test_pytype(self):
self._test(environment.check_pytype_or_die)
def test_python_exe(self):
self._test(environment.check_python_exe_or_die, 3.0)
def test_typeshed(self):
self._test(environment.initialize_typeshed_or_die)
if __name__ == '__main__':
unittest.main()
| TestDoXOrDie |
python | doocs__leetcode | solution/0200-0299/0278.First Bad Version/Solution.py | {
"start": 95,
"end": 350
} | class ____:
def firstBadVersion(self, n: int) -> int:
l, r = 1, n
while l < r:
mid = (l + r) >> 1
if isBadVersion(mid):
r = mid
else:
l = mid + 1
return l
| Solution |
python | celery__celery | t/unit/utils/test_time.py | {
"start": 8349,
"end": 9229
} | class ____:
def test_standard_tz(self):
tz = tzinfo()
wtz = make_aware(datetime.now(_timezone.utc), tz)
assert wtz.tzinfo == tz
def test_tz_when_zoneinfo(self):
tz = ZoneInfo('US/Eastern')
wtz = make_aware(datetime.now(_timezone.utc), tz)
assert wtz.tzinfo == tz
def test_maybe_make_aware(self):
aware = datetime.now(_timezone.utc).replace(tzinfo=timezone.utc)
assert maybe_make_aware(aware)
naive = datetime.now()
assert maybe_make_aware(naive)
assert maybe_make_aware(naive).tzinfo is ZoneInfo("UTC")
tz = ZoneInfo('US/Eastern')
eastern = datetime.now(_timezone.utc).replace(tzinfo=tz)
assert maybe_make_aware(eastern).tzinfo is tz
utcnow = datetime.now()
assert maybe_make_aware(utcnow, 'UTC').tzinfo is ZoneInfo("UTC")
| test_make_aware |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_dataproc.py | {
"start": 21399,
"end": 25676
} | class ____:
def test_submit_trigger_serialization(self, submit_trigger):
"""Test that the trigger serializes its configuration correctly."""
classpath, kwargs = submit_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger"
assert kwargs == {
"job_id": TEST_JOB_ID,
"project_id": TEST_PROJECT_ID,
"region": TEST_REGION,
"gcp_conn_id": TEST_GCP_CONN_ID,
"polling_interval_seconds": TEST_POLL_INTERVAL,
"cancel_on_kill": True,
"impersonation_chain": None,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger.get_async_hook")
async def test_submit_trigger_run_success(self, mock_get_async_hook, submit_trigger):
"""Test the trigger correctly handles a job completion."""
mock_job = Job(status=JobStatus(state=JobStatus.State.DONE))
future = asyncio.Future()
future.set_result(mock_job)
mock_get_async_hook.return_value.get_job.return_value = future
async_gen = submit_trigger.run()
event = await async_gen.asend(None)
expected_event = TriggerEvent(
{"job_id": TEST_JOB_ID, "job_state": JobStatus.State.DONE.name, "job": Job.to_dict(mock_job)}
)
assert event.payload == expected_event.payload
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger.get_async_hook")
async def test_submit_trigger_run_error(self, mock_get_async_hook, submit_trigger):
"""Test the trigger correctly handles a job error."""
mock_job = Job(status=JobStatus(state=JobStatus.State.ERROR))
future = asyncio.Future()
future.set_result(mock_job)
mock_get_async_hook.return_value.get_job.return_value = future
async_gen = submit_trigger.run()
event = await async_gen.asend(None)
expected_event = TriggerEvent(
{"job_id": TEST_JOB_ID, "job_state": JobStatus.State.ERROR.name, "job": Job.to_dict(mock_job)}
)
assert event.payload == expected_event.payload
@pytest.mark.asyncio
@pytest.mark.parametrize("is_safe_to_cancel", [True, False])
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger.get_async_hook")
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger.get_sync_hook")
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocSubmitTrigger.safe_to_cancel")
async def test_submit_trigger_run_cancelled(
self, mock_safe_to_cancel, mock_get_sync_hook, mock_get_async_hook, submit_trigger, is_safe_to_cancel
):
"""Test the trigger correctly handles an asyncio.CancelledError."""
mock_safe_to_cancel.return_value = is_safe_to_cancel
mock_async_hook = mock_get_async_hook.return_value
mock_async_hook.get_job.side_effect = asyncio.CancelledError
mock_sync_hook = mock_get_sync_hook.return_value
mock_sync_hook.cancel_job = mock.MagicMock()
async_gen = submit_trigger.run()
try:
await async_gen.asend(None)
# Should raise StopAsyncIteration if no more items to yield
await async_gen.asend(None)
except asyncio.CancelledError:
# Handle the cancellation as expected
pass
except StopAsyncIteration:
# The generator should be properly closed after handling the cancellation
pass
except Exception as e:
# Catch any other exceptions that should not occur
pytest.fail(f"Unexpected exception raised: {e}")
# Check if cancel_job was correctly called
if submit_trigger.cancel_on_kill and is_safe_to_cancel:
mock_sync_hook.cancel_job.assert_called_once_with(
job_id=submit_trigger.job_id,
project_id=submit_trigger.project_id,
region=submit_trigger.region,
)
else:
mock_sync_hook.cancel_job.assert_not_called()
# Clean up the generator
await async_gen.aclose()
| TestDataprocSubmitTrigger |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/gcs_upload.py | {
"start": 1226,
"end": 1330
} | class ____:
id: str
uploaded: bool
blob_id: Optional[str]
@dataclass(frozen=True)
| UploadedFile |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 72455,
"end": 73563
} | class ____(Affine2DBase):
"""
A special class that does one thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
# docstring inherited
return self
__str__ = _make_str_method()
def get_matrix(self):
# docstring inherited
return self._mtx
def transform(self, values):
# docstring inherited
return np.asanyarray(values)
def transform_affine(self, values):
# docstring inherited
return np.asanyarray(values)
def transform_non_affine(self, values):
# docstring inherited
return np.asanyarray(values)
def transform_path(self, path):
# docstring inherited
return path
def transform_path_affine(self, path):
# docstring inherited
return path
def transform_path_non_affine(self, path):
# docstring inherited
return path
def get_affine(self):
# docstring inherited
return self
def inverted(self):
# docstring inherited
return self
| IdentityTransform |
python | getsentry__sentry | src/sentry/integrations/slack/service.py | {
"start": 4604,
"end": 26257
} | class ____:
"""
Slack service is the main entry point for all business logic related to Slack.
We will consolidate the Slack logic in here to create an easier interface to interact with, and not worry about
figuring out which specific class or object you need, how to create them, in which order, and what to call.
This service will have plentiful logging, error catching and handling, and be mindful of performance impacts.
There will also be monitoring and alerting in place to give more visibility to the main business logic methods.
"""
def __init__(
self,
issue_alert_repository: IssueAlertNotificationMessageRepository,
notification_action_repository: NotificationActionNotificationMessageRepository,
message_block_builder: BlockSlackMessageBuilder,
activity_thread_notification_handlers: dict[ActivityType, type[GroupActivityNotification]],
logger: Logger,
) -> None:
self._issue_alert_repository = issue_alert_repository
self._notification_action_repository = notification_action_repository
self._slack_block_builder = message_block_builder
self._activity_thread_notification_handlers = activity_thread_notification_handlers
self._logger = logger
@classmethod
def default(cls) -> SlackService:
return SlackService(
issue_alert_repository=get_default_issue_alert_repository(),
notification_action_repository=NotificationActionNotificationMessageRepository.default(),
message_block_builder=BlockSlackMessageBuilder(),
activity_thread_notification_handlers=DEFAULT_SUPPORTED_ACTIVITY_THREAD_NOTIFICATION_HANDLERS,
logger=_default_logger,
)
def notify_all_threads_for_activity(self, activity: Activity) -> None:
"""
For an activity related to an issue group, send notifications in a Slack thread to all parent notifications for
that specific group and project.
If the group is not associated with an activity, return early as there's nothing to do.
If the user is not associated with an activity, return early as we only care about user activities.
"""
log_params = {
"activity_id": activity.id,
"project_id": activity.project.id,
}
if activity.group is None:
self._logger.debug(
"no group associated on the activity, nothing to do",
extra=log_params,
)
return None
log_params["group_id"] = activity.group.id
log_params["organization_id"] = activity.group.organization.id
uptime_resolved_notification = (
activity.type == ActivityType.SET_RESOLVED.value
and activity.group.issue_type.type_id == UptimeDomainCheckFailure.type_id
)
metric_resolved_notification = (
activity.type == ActivityType.SET_RESOLVED.value
and activity.group.issue_type.type_id == MetricIssue.type_id
)
if activity.user_id is None and metric_resolved_notification:
# For Metric Issue resolved notifications triggered by the workflow engine, we don't need to send a notification here -
# It will be sent via action.trigger.
self._logger.info(
"metric resolved notification, will be sent via action.trigger - nothing to do here",
extra=log_params,
)
return None
if activity.user_id is None and not uptime_resolved_notification:
# This is a machine/system update, so we don't need to send a notification
self._logger.debug(
"machine/system updates are ignored at this time, nothing to do",
extra=log_params,
)
return None
organization = activity.group.organization
organization_id = organization.id
# If the feature is turned off for the organization, exit early as there's nothing to do
if not OrganizationOption.objects.get_value(
organization=organization,
key="sentry:issue_alerts_thread_flag",
default=ISSUE_ALERTS_THREAD_DEFAULT,
):
self._logger.info(
"feature is turned off for this organization",
extra=log_params,
)
return None
# The same message is sent to all the threads, so this needs to only happen once
notification_to_send = self._get_notification_message_to_send(activity=activity)
if not notification_to_send:
self._logger.info(
"notification to send is invalid",
extra=log_params,
)
return None
# TODO: This will return None if there are multiple integrations for the same organization, meaning if there are multiple slack installations for the same organization.
# We need to associate integration with the threading logic in notif platform so we don't run into this issue.
integration = get_active_integration_for_organization(
organization_id=organization_id,
provider=ExternalProviderEnum.SLACK,
)
if integration is None:
self._logger.info(
"no integration found for activity",
extra=log_params,
)
return None
slack_client = SlackSdkClient(integration_id=integration.id)
self._notify_all_threads_for_activity(
activity=activity,
group=activity.group,
notification_to_send=notification_to_send,
client=slack_client,
)
def _notify_all_threads_for_activity(
self,
activity: Activity,
group: Group,
notification_to_send: str,
client: SlackSdkClient,
) -> None:
with MessagingInteractionEvent(
interaction_type=MessagingInteractionType.GET_PARENT_NOTIFICATION,
spec=SlackMessagingSpec(),
).capture() as lifecycle:
lifecycle.add_extras(
{
"activity_id": activity.id,
"group_id": group.id,
"project_id": activity.project.id,
"organization_id": group.organization.id,
}
)
use_open_period_start = False
parent_notifications: Generator[
NotificationActionNotificationMessage | IssueAlertNotificationMessage
]
will_fire_workflow_actions = should_fire_workflow_actions(
group.organization, group.type
)
if group.issue_category == GroupCategory.UPTIME:
use_open_period_start = True
open_period_start = open_period_start_for_group(group)
if will_fire_workflow_actions:
parent_notifications = self._notification_action_repository.get_all_parent_notification_messages_by_filters(
group_ids=[group.id],
open_period_start=open_period_start,
)
else:
parent_notifications = self._issue_alert_repository.get_all_parent_notification_messages_by_filters(
group_ids=[group.id],
project_ids=[activity.project.id],
open_period_start=open_period_start,
)
else:
if will_fire_workflow_actions:
parent_notifications = self._notification_action_repository.get_all_parent_notification_messages_by_filters(
group_ids=[group.id],
)
else:
parent_notifications = self._issue_alert_repository.get_all_parent_notification_messages_by_filters(
group_ids=[group.id],
project_ids=[activity.project.id],
)
# We don't wrap this in a lifecycle because _handle_parent_notification is already wrapped in a lifecycle
parent_notification_count = 0
for parent_notification in parent_notifications:
with MessagingInteractionEvent(
interaction_type=MessagingInteractionType.SEND_ACTIVITY_NOTIFICATION,
spec=SlackMessagingSpec(),
).capture() as lifecycle:
parent_notification_count += 1
lifecycle.add_extras(
{
"activity_id": activity.id,
"parent_notification_id": parent_notification.id,
"notification_to_send": notification_to_send,
"integration_id": client.integration_id,
"group_id": group.id,
"project_id": activity.project.id,
}
)
try:
if not parent_notification.message_identifier:
self._logger.info(
"parent notification does not have a message identifier, skipping",
extra={
"parent_notification_id": parent_notification.id,
},
)
lifecycle.record_failure(
"parent notification does not have a message identifier, skipping"
)
continue
if isinstance(parent_notification, NotificationActionNotificationMessage):
channel_id = (
self._get_channel_id_from_parent_notification_notification_action(
parent_notification
)
)
else:
channel_id = self._get_channel_id_from_parent_notification(
parent_notification
)
self._send_notification_to_slack_channel(
channel_id=channel_id,
message_identifier=parent_notification.message_identifier,
notification_to_send=notification_to_send,
client=client,
)
except Exception as err:
if isinstance(err, SlackApiError):
record_lifecycle_termination_level(lifecycle, err)
else:
lifecycle.record_failure(err)
if use_open_period_start and parent_notification_count > 1:
sentry_sdk.capture_message(
f"slack.notify_all_threads_for_activity.multiple_parent_notifications_for_single_open_period Activity: {activity.id}, Group: {group.id}, Project: {activity.project.id}, Integration: {client.integration_id}, Parent Notification Count: {parent_notification_count}"
)
self._logger.error(
"multiple parent notifications found for single open period",
extra={
"activity_id": activity.id,
"parent_notification_count": parent_notification_count,
},
)
def _get_channel_id_from_parent_notification_notification_action(
self,
parent_notification: NotificationActionNotificationMessage,
) -> str:
"""Get the channel ID from a parent notification by looking up the rule action details."""
if not parent_notification.action:
raise ActionDataError(
f"parent notification {parent_notification.id} does not have an action"
)
target_id = parent_notification.action.config.get("target_identifier")
if not target_id:
raise ActionDataError(
f"parent notification {parent_notification.id} does not have a target_identifier"
)
return str(target_id)
def _get_channel_id_from_parent_notification(
self,
parent_notification: IssueAlertNotificationMessage,
) -> str:
"""Get the channel ID from a parent notification by looking up the rule action details."""
if not parent_notification.rule_fire_history:
raise RuleDataError(
f"parent notification {parent_notification.id} does not have a rule_fire_history"
)
if not parent_notification.rule_action_uuid:
raise RuleDataError(
f"parent notification {parent_notification.id} does not have a rule_action_uuid"
)
rule: Rule = parent_notification.rule_fire_history.rule
rule_action = rule.get_rule_action_details_by_uuid(parent_notification.rule_action_uuid)
if not rule_action:
raise RuleDataError(
f"failed to find rule action {parent_notification.rule_action_uuid} for rule {rule.id}"
)
channel_id: str | None = rule_action.get("channel_id", None)
if not channel_id:
raise RuleDataError(
f"failed to get channel_id for rule {rule.id} and rule action {parent_notification.rule_action_uuid}"
)
return channel_id
def _send_notification_to_slack_channel(
self,
channel_id: str,
message_identifier: str,
notification_to_send: str,
client: SlackSdkClient,
) -> None:
block = self._slack_block_builder.get_markdown_block(text=notification_to_send)
payload = {"channel": channel_id, "thread_ts": message_identifier}
slack_payload = self._slack_block_builder._build_blocks(
block, fallback_text=notification_to_send
)
payload.update(slack_payload)
# TODO (Yash): Users should not have to remember to do this, interface should handle serializing the field
json_blocks = orjson.dumps(payload.get("blocks")).decode()
payload["blocks"] = json_blocks
client.chat_postMessage(
channel=channel_id,
thread_ts=message_identifier,
text=notification_to_send,
blocks=json_blocks,
unfurl_links=False,
unfurl_media=False,
)
def _get_notification_message_to_send(self, activity: Activity) -> str | None:
"""
Get the notification message that we need to send in a slack thread based on the activity type.
Apparently the get_context is a very computation heavy call, so make sure to only call this once.
"""
try:
activity_type: ActivityType = ActivityType(activity.type)
except ValueError as err:
self._logger.info(
"there was an error trying to get activity type, assuming activity is unsupported",
exc_info=err,
extra={
"error": str(err),
"activity_id": activity.id,
"activity_type": activity.type,
},
)
return None
notification_cls = self._activity_thread_notification_handlers.get(activity_type, None)
if not notification_cls:
self._logger.info(
"activity type is not currently supported",
extra={
"activity_id": activity.id,
"activity_type": activity.type,
},
)
return None
notification_obj = notification_cls(activity=activity)
context = notification_obj.get_context(provider=ExternalProviders.SLACK)
text_description = context.get("text_description", None)
if not text_description:
self._logger.info(
"context did not contain text_description",
extra={
"activity_id": activity.id,
"notification_type": activity.type,
"notification_cls": notification_cls.__name__,
"context": context,
},
)
return None
return text_description
def notify_recipient(
self,
notification: BaseNotification,
recipient: Actor,
attachments: SlackBlock,
channel: str,
integration: Integration | RpcIntegration,
shared_context: Mapping[str, Any],
) -> None:
from sentry.integrations.slack.tasks.post_message import post_message, post_message_control
"""Send an "activity" or "alert rule" notification to a Slack user or team, but NOT to a channel directly.
This is used in the send_notification_as_slack function."""
with sentry_sdk.start_span(op="notification.send_slack", name="notify_recipient"):
# Make a local copy to which we can append.
local_attachments = copy(attachments)
text = notification.get_notification_title(ExternalProviders.SLACK, shared_context)
blocks: list[SlackBlock] = []
if text:
blocks.append(BlockSlackMessageBuilder.get_markdown_block(text))
attachment_blocks = local_attachments.get("blocks")
if attachment_blocks:
for attachment in attachment_blocks:
blocks.append(attachment)
if len(blocks) >= 2 and blocks[1].get("block_id"):
# block id needs to be in the first block
first_block = blocks[0]
first_block["block_id"] = blocks[1]["block_id"]
del blocks[1]["block_id"]
additional_attachment = get_additional_attachment(
integration, notification.organization
)
if additional_attachment:
for block in additional_attachment:
blocks.append(block)
if (
not text
): # if there isn't a notification title, try using message description as fallback
text = notification.get_message_description(recipient, ExternalProviders.SLACK)
payload = {
"channel": channel,
"unfurl_links": False,
"unfurl_media": False,
"text": text if text else "",
"blocks": orjson.dumps(blocks).decode(),
}
callback_id = local_attachments.get("callback_id")
if callback_id:
# callback_id is now at the same level as blocks, rather than within attachments
if isinstance(callback_id, str):
payload["callback_id"] = callback_id
else:
payload["callback_id"] = orjson.dumps(
local_attachments.get("callback_id")
).decode()
post_message_task = post_message
if SiloMode.get_current_mode() == SiloMode.CONTROL:
post_message_task = post_message_control
log_params = {
"notification": str(notification),
"recipient": recipient.id,
"channel_id": channel,
}
post_message_task.apply_async(
kwargs={
"integration_id": integration.id,
"payload": payload,
"log_error_message": "slack.notify_recipient.fail",
"log_params": log_params,
}
)
# recording data outside of span
notification.record_notification_sent(recipient, ExternalProviders.SLACK)
def get_attachments(
self,
notification: BaseNotification,
recipient: Actor,
shared_context: Mapping[str, Any],
extra_context_by_actor: Mapping[Actor, Mapping[str, Any]] | None,
) -> SlackBlock:
"""Get the message to send in notify_recipient"""
extra_context = (
extra_context_by_actor[recipient] if extra_context_by_actor and recipient else {}
)
context = get_context(notification, recipient, shared_context, extra_context)
cls = get_message_builder(notification.message_builder)
attachments = cls(notification, context, recipient).build()
return attachments
def send_message_to_slack_channel(
self,
integration_id: int,
payload: Mapping[str, Any],
log_error_message: str,
log_params: Mapping[str, Any],
) -> None:
"""Execution of send_notification_as_slack."""
client = SlackSdkClient(integration_id=integration_id)
with MessagingInteractionEvent(
interaction_type=MessagingInteractionType.SEND_GENERIC_NOTIFICATION,
spec=SlackMessagingSpec(),
).capture() as lifecycle:
try:
lifecycle.add_extras({"integration_id": integration_id})
client.chat_postMessage(
blocks=str(payload.get("blocks", "")),
text=str(payload.get("text", "")),
channel=str(payload.get("channel", "")),
unfurl_links=False,
unfurl_media=False,
callback_id=str(payload.get("callback_id", "")),
)
except SlackApiError as e:
lifecycle.add_extras(
{k: str(v) for k, v in log_params.items() if isinstance(v, (int, str))}
)
record_lifecycle_termination_level(lifecycle, e)
| SlackService |
python | coleifer__peewee | peewee.py | {
"start": 25711,
"end": 26489
} | class ____(object):
def __init__(self, table, database):
self.table = table
self.database = database
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with _BoundTableContext(self.table, self.database):
return fn(*args, **kwargs)
return inner
def __enter__(self):
self._orig_database = self.table._database
self.table.bind(self.database)
if self.table._model is not None:
self.table._model.bind(self.database)
return self.table
def __exit__(self, exc_type, exc_val, exc_tb):
self.table.bind(self._orig_database)
if self.table._model is not None:
self.table._model.bind(self._orig_database)
| _BoundTableContext |
python | tensorflow__tensorflow | tensorflow/python/training/monitored_session_test.py | {
"start": 19583,
"end": 24023
} | class ____(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
| MonitoredTrainingSessionWithDistributeCoordinatorTest |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 94426,
"end": 94628
} | class ____(
ScalarRemoveTest, fixtures.DeclarativeMappedTest
):
run_create_tables = None
useobject = False
cascade_scalar_deletes = False
uselist = True
| ScalarRemoveListScalarNoCascade |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 22364,
"end": 25989
} | class ____(DashboardComponent):
"""Size of open data transfers from/to other workers per worker"""
@log_errors
def __init__(self, scheduler, width=600, **kwargs):
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"escaped_worker": [],
"transfer_incoming_bytes": [],
"transfer_outgoing_bytes": [],
"worker": [],
"y_incoming": [],
"y_outgoing": [],
}
)
self.root = figure(
title=f"Bytes transferring: {format_bytes(0)}",
tools="",
width=int(width / 2),
name="workers_transfer_bytes",
min_border_bottom=50,
**kwargs,
)
# transfer_incoming_bytes
self.root.hbar(
name="transfer_incoming_bytes",
y="y_incoming",
right="transfer_incoming_bytes",
line_color=None,
left=0,
height=0.5,
fill_color="red",
source=self.source,
)
# transfer_outgoing_bytes
self.root.hbar(
name="transfer_outgoing_bytes",
y="y_outgoing",
right="transfer_outgoing_bytes",
line_color=None,
left=0,
height=0.5,
fill_color="blue",
source=self.source,
)
self.root.axis[0].ticker = BasicTicker(**TICKS_1024)
self.root.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION
self.root.xaxis.minor_tick_line_alpha = 0
self.root.x_range = Range1d(start=0)
self.root.yaxis.visible = False
self.root.ygrid.visible = False
self.root.toolbar_location = None
tap = TapTool(callback=OpenURL(url="./info/worker/@escaped_worker.html"))
hover = HoverTool(
tooltips=[
("Worker", "@worker"),
("Incoming", "@transfer_incoming_bytes{0.00 b}"),
("Outgoing", "@transfer_outgoing_bytes{0.00 b}"),
],
point_policy="follow_mouse",
)
self.root.add_tools(hover, tap)
@without_property_validation
@log_errors
def update(self):
wss = self.scheduler.workers.values()
h = 0.1
y_incoming = [i + 0.75 + i * h for i in range(len(wss))]
y_outgoing = [i + 0.25 + i * h for i in range(len(wss))]
transfer_incoming_bytes = [
ws.metrics["transfer"]["incoming_bytes"] for ws in wss
]
transfer_outgoing_bytes = [
ws.metrics["transfer"]["outgoing_bytes"] for ws in wss
]
workers = [ws.address for ws in wss]
escaped_workers = [url_escape(worker) for worker in workers]
if wss:
x_limit = max(
max(transfer_incoming_bytes),
max(transfer_outgoing_bytes),
max(ws.memory_limit for ws in wss),
)
else:
x_limit = 0
self.root.x_range.end = x_limit
result = {
"escaped_worker": escaped_workers,
"transfer_incoming_bytes": transfer_incoming_bytes,
"transfer_outgoing_bytes": transfer_outgoing_bytes,
"worker": workers,
"y_incoming": y_incoming,
"y_outgoing": y_outgoing,
}
self.root.title.text = (
f"Bytes transferring: {format_bytes(sum(transfer_incoming_bytes))}"
)
update(self.source, result)
| WorkersTransferBytes |
python | huggingface__transformers | src/transformers/models/pop2piano/modeling_pop2piano.py | {
"start": 6642,
"end": 17100
} | class ____(nn.Module):
def __init__(
self,
config: Pop2PianoConfig,
has_relative_attention_bias=False,
layer_idx: Optional[int] = None,
):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.gradient_checkpointing = False
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_values=None,
query_length=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
batch_size, seq_length = hidden_states.shape[:2]
# if key_value_states are provided this layer is used as a cross-attention layer for the decoder
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
# cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device, cache_position=cache_position
)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, : key_states.shape[-2]]
position_bias = position_bias + causal_mask
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->Pop2Piano,t5->pop2piano
| Pop2PianoAttention |
python | pytest-dev__pytest | testing/test_mark.py | {
"start": 14690,
"end": 24020
} | class ____:
def test_merging_markers_deep(self, pytester: Pytester) -> None:
# issue 199 - propagate markers into nested classes
p = pytester.makepyfile(
"""
import pytest
class TestA(object):
pytestmark = pytest.mark.a
def test_b(self):
assert True
class TestC(object):
# this one didn't get marked
def test_d(self):
assert True
"""
)
items, _rec = pytester.inline_genitems(p)
for item in items:
print(item, item.keywords)
assert [x for x in item.iter_markers() if x.name == "a"]
def test_mark_decorator_subclass_does_not_propagate_to_base(
self, pytester: Pytester
) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.a
class Base(object): pass
@pytest.mark.b
class Test1(Base):
def test_foo(self): pass
class Test2(Base):
def test_bar(self): pass
"""
)
items, _rec = pytester.inline_genitems(p)
self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",))
def test_mark_should_not_pass_to_siebling_class(self, pytester: Pytester) -> None:
"""#568"""
p = pytester.makepyfile(
"""
import pytest
class TestBase(object):
def test_foo(self):
pass
@pytest.mark.b
class TestSub(TestBase):
pass
class TestOtherSub(TestBase):
pass
"""
)
items, _rec = pytester.inline_genitems(p)
base_item, sub_item, sub_item_other = items
print(items, [x.nodeid for x in items])
# new api segregates
assert not list(base_item.iter_markers(name="b"))
assert not list(sub_item_other.iter_markers(name="b"))
assert list(sub_item.iter_markers(name="b"))
def test_mark_decorator_baseclasses_merged(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.a
class Base(object): pass
@pytest.mark.b
class Base2(Base): pass
@pytest.mark.c
class Test1(Base2):
def test_foo(self): pass
class Test2(Base2):
@pytest.mark.d
def test_bar(self): pass
"""
)
items, _rec = pytester.inline_genitems(p)
self.assert_markers(items, test_foo=("a", "b", "c"), test_bar=("a", "b", "d"))
def test_mark_closest(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.c(location="class")
class Test:
@pytest.mark.c(location="function")
def test_has_own(self):
pass
def test_has_inherited(self):
pass
"""
)
items, _rec = pytester.inline_genitems(p)
has_own, has_inherited = items
has_own_marker = has_own.get_closest_marker("c")
has_inherited_marker = has_inherited.get_closest_marker("c")
assert has_own_marker is not None
assert has_inherited_marker is not None
assert has_own_marker.kwargs == {"location": "function"}
assert has_inherited_marker.kwargs == {"location": "class"}
assert has_own.get_closest_marker("missing") is None
def test_mark_with_wrong_marker(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
import pytest
class pytestmark(object):
pass
def test_func():
pass
"""
)
values = reprec.getfailedcollections()
assert len(values) == 1
assert "TypeError" in str(values[0].longrepr)
def test_mark_dynamically_in_funcarg(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
@pytest.fixture
def arg(request):
request.applymarker(pytest.mark.hello)
def pytest_terminal_summary(terminalreporter):
values = terminalreporter.stats['passed']
terminalreporter._tw.line("keyword: %s" % values[0].keywords)
"""
)
pytester.makepyfile(
"""
def test_func(arg):
pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["keyword: *hello*"])
def test_no_marker_match_on_unmarked_names(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
@pytest.mark.shouldmatch
def test_marked():
assert 1
def test_unmarked():
assert 1
"""
)
reprec = pytester.inline_run("-m", "test_unmarked", p)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) + len(skipped) + len(failed) == 0
dlist = reprec.getcalls("pytest_deselected")
deselected_tests = dlist[0].items
assert len(deselected_tests) == 2
def test_keywords_at_node_level(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope="session", autouse=True)
def some(request):
request.keywords["hello"] = 42
assert "world" not in request.keywords
@pytest.fixture(scope="function", autouse=True)
def funcsetup(request):
assert "world" in request.keywords
assert "hello" in request.keywords
@pytest.mark.world
def test_function():
pass
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_keyword_added_for_session(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
def pytest_collection_modifyitems(session):
session.add_marker("mark1")
session.add_marker(pytest.mark.mark2)
session.add_marker(pytest.mark.mark3)
pytest.raises(ValueError, lambda:
session.add_marker(10))
"""
)
pytester.makepyfile(
"""
def test_some(request):
assert "mark1" in request.keywords
assert "mark2" in request.keywords
assert "mark3" in request.keywords
assert 10 not in request.keywords
marker = request.node.get_closest_marker("mark1")
assert marker.name == "mark1"
assert marker.args == ()
assert marker.kwargs == {}
"""
)
reprec = pytester.inline_run("-m", "mark1")
reprec.assertoutcome(passed=1)
def assert_markers(self, items, **expected) -> None:
"""Assert that given items have expected marker names applied to them.
expected should be a dict of (item name -> seq of expected marker names).
Note: this could be moved to ``pytester`` if proven to be useful
to other modules.
"""
items = {x.name: x for x in items}
for name, expected_markers in expected.items():
markers = {m.name for m in items[name].iter_markers()}
assert markers == set(expected_markers)
@pytest.mark.filterwarnings("ignore")
def test_mark_from_parameters(self, pytester: Pytester) -> None:
"""#1540"""
pytester.makepyfile(
"""
import pytest
pytestmark = pytest.mark.skipif(True, reason='skip all')
# skipifs inside fixture params
params = [pytest.mark.skipif(False, reason='dont skip')('parameter')]
@pytest.fixture(params=params)
def parameter(request):
return request.param
def test_1(parameter):
assert True
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(skipped=1)
def test_reevaluate_dynamic_expr(self, pytester: Pytester) -> None:
"""#7360"""
py_file1 = pytester.makepyfile(
test_reevaluate_dynamic_expr1="""
import pytest
skip = True
@pytest.mark.skipif("skip")
def test_should_skip():
assert True
"""
)
py_file2 = pytester.makepyfile(
test_reevaluate_dynamic_expr2="""
import pytest
skip = False
@pytest.mark.skipif("skip")
def test_should_not_skip():
assert True
"""
)
file_name1 = os.path.basename(py_file1)
file_name2 = os.path.basename(py_file2)
reprec = pytester.inline_run(file_name1, file_name2)
reprec.assertoutcome(passed=1, skipped=1)
| TestFunctional |
python | catalyst-team__catalyst | catalyst/callbacks/optuna.py | {
"start": 303,
"end": 3003
} | class ____(Callback):
"""Optuna callback for pruning unpromising runs.
This callback can be used for early stopping (pruning) unpromising runs.
Args:
trial: Optuna.Trial for the experiment.
loader_key: loader key for best model selection
(based on metric score over the dataset)
metric_key: metric key for best model selection
(based on metric score over the dataset)
minimize: boolean flag to minimize the required metric
min_delta: minimal delta for metric improve
.. code-block:: python
import optuna
from catalyst.dl import SupervisedRunner, OptunaPruningCallback
# some python code ...
def objective(trial: optuna.Trial):
# standard optuna code for model and/or optimizer suggestion ...
runner = SupervisedRunner()
runner.train(
model=model,
loaders=loaders,
criterion=criterion,
optimizer=optimizer,
callbacks=[
OptunaPruningCallback(trial)
# some other callbacks ...
],
num_epochs=num_epochs,
)
return runner.best_valid_metrics[runner.valid_metric]
study = optuna.create_study()
study.optimize(objective, n_trials=100, timeout=600)
"""
def __init__(
self,
trial: "optuna.Trial",
loader_key: str,
metric_key: str,
minimize: bool,
min_delta: float = 1e-6,
):
"""Init."""
super().__init__(CallbackOrder.External)
self.trial = trial
self.loader_key = loader_key
self.metric_key = metric_key
self.minimize = minimize
self.is_better = MetricHandler(minimize=minimize, min_delta=min_delta)
self.best_score = None
def on_epoch_end(self, runner: "IRunner"):
"""Considering prune or not to prune current run at the end of the epoch.
Args:
runner: runner for current experiment
Raises:
TrialPruned: if current run should be pruned
"""
score = runner.epoch_metrics[self.loader_key][self.metric_key]
if self.best_score is None or self.is_better(score, self.best_score):
self.best_score = score
self.trial.report(score, step=runner.epoch_step)
# @TODO: hack
self.trial.best_score = self.best_score
if self.trial.should_prune():
message = "Trial was pruned at epoch {}.".format(runner.epoch_step)
raise optuna.TrialPruned(message)
__all__ = ["OptunaPruningCallback"]
| OptunaPruningCallback |
python | apache__airflow | helm-tests/tests/helm_tests/other/test_resource_quota.py | {
"start": 900,
"end": 1763
} | class ____:
"""Tests resource quota."""
def test_resource_quota_template(self):
docs = render_chart(
values={
"quotas": {
"configmaps": "10",
"persistentvolumeclaims": "4",
"pods": "4",
"replicationcontrollers": "20",
"secrets": "10",
"services": "10",
}
},
show_only=["templates/resourcequota.yaml"],
)
assert jmespath.search("kind", docs[0]) == "ResourceQuota"
assert jmespath.search("spec.hard.replicationcontrollers", docs[0]) == "20"
def test_resource_quota_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/resourcequota.yaml"],
)
assert docs == []
| TestResourceQuota |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 16449,
"end": 16730
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "UnauthorizedError"
def __init__(self, message=None):
super().__init__()
self.message = message if message else "Authorization failed"
| GrapheneUnauthorizedError |
python | pytorch__pytorch | torch/_inductor/compile_fx_ext.py | {
"start": 4268,
"end": 4920
} | class ____(contextlib.ExitStack):
"""
Helper for _LoweringSerializer.patch()
"""
def __init__(self, lowering: _LoweringSerializer) -> None:
super().__init__()
self.lowering = lowering
@override
def __enter__(self) -> Self:
super().__enter__()
from . import lowering
for k, v in lowering.lowerings.items():
name = str(k)
if name in self.lowering.fallbacks:
if not _is_fallback_handler(v):
self.enter_context(lowering.force_fallback(k)) # type: ignore[arg-type]
return self
@dataclass
| _LoweringSerializerContextManager |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 4350,
"end": 4437
} | class ____(models.Model):
f = models.FloatField(blank=True, null=True)
| WithFloatField |
python | dateutil__dateutil | src/dateutil/relativedelta.py | {
"start": 336,
"end": 24903
} | class ____(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
# Relative information
self.years = int(years)
self.months = int(months)
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return int(self.days / 7.0)
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __abs__(self):
return self.__class__(years=abs(self.years),
months=abs(self.months),
days=abs(self.days),
hours=abs(self.hours),
minutes=abs(self.minutes),
seconds=abs(self.seconds),
microseconds=abs(self.microseconds),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __hash__(self):
return hash((
self.weekday,
self.years,
self.months,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
self.leapdays,
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
))
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et
| relativedelta |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 26254,
"end": 26572
} | class ____(PendingDeprecationWarning):
"""A similar warning as :class:`_exc.SADeprecationWarning`, this warning
is not used in modern versions of SQLAlchemy.
"""
deprecated_since: Optional[str] = None
"Indicates the version that started raising this deprecation warning"
| SAPendingDeprecationWarning |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/base.py | {
"start": 2770,
"end": 3082
} | class ____(Enum):
"""Describe options for "reset on return" behaviors."""
reset_rollback = 0
reset_commit = 1
reset_none = 2
_ResetStyleArgType = Union[
ResetStyle,
Literal[True, None, False, "commit", "rollback"],
]
reset_rollback, reset_commit, reset_none = list(ResetStyle)
| ResetStyle |
python | tornadoweb__tornado | tornado/test/iostream_test.py | {
"start": 7278,
"end": 27107
} | class ____(AsyncTestCase):
# Tests where one stream reads and the other writes.
# These should work for BaseIOStream implementations.
def make_iostream_pair(self, **kwargs):
raise NotImplementedError
def iostream_pair(self, **kwargs):
"""Like make_iostream_pair, but called by ``async with``.
In py37 this becomes simpler with contextlib.asynccontextmanager.
"""
class IOStreamPairContext:
def __init__(self, test, kwargs):
self.test = test
self.kwargs = kwargs
async def __aenter__(self):
self.pair = await self.test.make_iostream_pair(**self.kwargs)
return self.pair
async def __aexit__(self, typ, value, tb):
for s in self.pair:
s.close()
return IOStreamPairContext(self, kwargs)
@gen_test
def test_write_zero_bytes(self):
# Attempting to write zero bytes should run the callback without
# going into an infinite loop.
rs, ws = yield self.make_iostream_pair()
yield ws.write(b"")
ws.close()
rs.close()
@gen_test
def test_future_delayed_close_callback(self):
# Same as test_delayed_close_callback, but with the future interface.
rs, ws = yield self.make_iostream_pair()
try:
ws.write(b"12")
chunks = []
chunks.append((yield rs.read_bytes(1)))
ws.close()
chunks.append((yield rs.read_bytes(1)))
self.assertEqual(chunks, [b"1", b"2"])
finally:
ws.close()
rs.close()
@gen_test
def test_close_buffered_data(self):
# Similar to the previous test, but with data stored in the OS's
# socket buffers instead of the IOStream's read buffer. Out-of-band
# close notifications must be delayed until all data has been
# drained into the IOStream buffer. (epoll used to use out-of-band
# close events with EPOLLRDHUP, but no longer)
#
# This depends on the read_chunk_size being smaller than the
# OS socket buffer, so make it small.
rs, ws = yield self.make_iostream_pair(read_chunk_size=256)
try:
ws.write(b"A" * 512)
data = yield rs.read_bytes(256)
self.assertEqual(b"A" * 256, data)
ws.close()
# Allow the close to propagate to the `rs` side of the
# connection. Using add_callback instead of add_timeout
# doesn't seem to work, even with multiple iterations
yield gen.sleep(0.01)
data = yield rs.read_bytes(256)
self.assertEqual(b"A" * 256, data)
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_close_after_close(self):
# Similar to test_delayed_close_callback, but read_until_close takes
# a separate code path so test it separately.
rs, ws = yield self.make_iostream_pair()
try:
ws.write(b"1234")
# Read one byte to make sure the client has received the data.
# It won't run the close callback as long as there is more buffered
# data that could satisfy a later read.
data = yield rs.read_bytes(1)
ws.close()
self.assertEqual(data, b"1")
data = yield rs.read_until_close()
self.assertEqual(data, b"234")
finally:
ws.close()
rs.close()
@gen_test
def test_large_read_until(self):
# Performance test: read_until used to have a quadratic component
# so a read_until of 4MB would take 8 seconds; now it takes 0.25
# seconds.
rs, ws = yield self.make_iostream_pair()
try:
# This test fails on pypy with ssl. I think it's because
# pypy's gc defeats moves objects, breaking the
# "frozen write buffer" assumption.
if (
isinstance(rs, SSLIOStream)
and platform.python_implementation() == "PyPy"
):
raise unittest.SkipTest("pypy gc causes problems with openssl")
NUM_KB = 4096
for i in range(NUM_KB):
ws.write(b"A" * 1024)
ws.write(b"\r\n")
data = yield rs.read_until(b"\r\n")
self.assertEqual(len(data), NUM_KB * 1024 + 2)
finally:
ws.close()
rs.close()
@gen_test
async def test_read_until_with_close_after_second_packet(self):
# This is a regression test for a regression in Tornado 6.0
# (maybe 6.0.3?) reported in
# https://github.com/tornadoweb/tornado/issues/2717
#
# The data arrives in two chunks; the stream is closed at the
# same time that the second chunk is received. If the second
# chunk is larger than the first, it works, but when this bug
# existed it would fail if the second chunk were smaller than
# the first. This is due to the optimization that the
# read_until condition is only checked when the buffer doubles
# in size
async with self.iostream_pair() as (rs, ws):
rf = asyncio.ensure_future(rs.read_until(b"done"))
# We need to wait for the read_until to actually start. On
# windows that's tricky because the selector runs in
# another thread; sleeping is the simplest way.
await asyncio.sleep(0.1)
await ws.write(b"x" * 2048)
ws.write(b"done")
ws.close()
await rf
@gen_test
async def test_read_until_unsatisfied_after_close(self):
# If a stream is closed while reading, it raises
# StreamClosedError instead of UnsatisfiableReadError (the
# latter should only be raised when byte limits are reached).
# The particular scenario tested here comes from #2717.
async with self.iostream_pair() as (rs, ws):
rf = asyncio.ensure_future(rs.read_until(b"done"))
await ws.write(b"x" * 2048)
ws.write(b"foo")
ws.close()
with self.assertRaises(StreamClosedError):
await rf
@gen_test
def test_close_callback_with_pending_read(self):
# Regression test for a bug that was introduced in 2.3
# where the IOStream._close_callback would never be called
# if there were pending reads.
OK = b"OK\r\n"
rs, ws = yield self.make_iostream_pair()
event = Event()
rs.set_close_callback(event.set)
try:
ws.write(OK)
res = yield rs.read_until(b"\r\n")
self.assertEqual(res, OK)
ws.close()
rs.read_until(b"\r\n")
# If _close_callback (self.stop) is not called,
# an AssertionError: Async operation timed out after 5 seconds
# will be raised.
yield event.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_future_close_callback(self):
# Regression test for interaction between the Future read interfaces
# and IOStream._maybe_add_error_listener.
rs, ws = yield self.make_iostream_pair()
closed = [False]
cond = Condition()
def close_callback():
closed[0] = True
cond.notify()
rs.set_close_callback(close_callback)
try:
ws.write(b"a")
res = yield rs.read_bytes(1)
self.assertEqual(res, b"a")
self.assertFalse(closed[0])
ws.close()
yield cond.wait()
self.assertTrue(closed[0])
finally:
rs.close()
ws.close()
@gen_test
def test_write_memoryview(self):
rs, ws = yield self.make_iostream_pair()
try:
fut = rs.read_bytes(4)
ws.write(memoryview(b"hello"))
data = yield fut
self.assertEqual(data, b"hell")
finally:
ws.close()
rs.close()
@gen_test
def test_read_bytes_partial(self):
rs, ws = yield self.make_iostream_pair()
try:
# Ask for more than is available with partial=True
fut = rs.read_bytes(50, partial=True)
ws.write(b"hello")
data = yield fut
self.assertEqual(data, b"hello")
# Ask for less than what is available; num_bytes is still
# respected.
fut = rs.read_bytes(3, partial=True)
ws.write(b"world")
data = yield fut
self.assertEqual(data, b"wor")
# Partial reads won't return an empty string, but read_bytes(0)
# will.
data = yield rs.read_bytes(0, partial=True)
self.assertEqual(data, b"")
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_max_bytes(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Extra room under the limit
fut = rs.read_until(b"def", max_bytes=50)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Just enough space
fut = rs.read_until(b"def", max_bytes=6)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
fut = rs.read_until(b"def", max_bytes=5)
ws.write(b"123456")
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_max_bytes_inline(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Similar to the error case in the previous test, but the
# ws writes first so rs reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
ws.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
with self.assertRaises(StreamClosedError):
yield rs.read_until(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_max_bytes_ignores_extra(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
ws.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
rs.read_until(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_regex_max_bytes(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Extra room under the limit
fut = rs.read_until_regex(b"def", max_bytes=50)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Just enough space
fut = rs.read_until_regex(b"def", max_bytes=6)
ws.write(b"abcdef")
data = yield fut
self.assertEqual(data, b"abcdef")
# Not enough space, but we don't know it until all we can do is
# log a warning and close the connection.
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
rs.read_until_regex(b"def", max_bytes=5)
ws.write(b"123456")
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_regex_max_bytes_inline(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Similar to the error case in the previous test, but the
# ws writes first so rs reads are satisfied
# inline. For consistency with the out-of-line case, we
# do not raise the error synchronously.
ws.write(b"123456")
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
rs.read_until_regex(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_read_until_regex_max_bytes_ignores_extra(self):
rs, ws = yield self.make_iostream_pair()
closed = Event()
rs.set_close_callback(closed.set)
try:
# Even though data that matches arrives the same packet that
# puts us over the limit, we fail the request because it was not
# found within the limit.
ws.write(b"abcdef")
with ExpectLog(gen_log, "Unsatisfiable read", level=logging.INFO):
rs.read_until_regex(b"def", max_bytes=5)
yield closed.wait()
finally:
ws.close()
rs.close()
@gen_test
def test_small_reads_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
rs, ws = yield self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
ws.write(b"a" * 1024 * 100)
for i in range(100):
data = yield rs.read_bytes(1024)
self.assertEqual(data, b"a" * 1024)
finally:
ws.close()
rs.close()
@gen_test
def test_small_read_untils_from_large_buffer(self):
# 10KB buffer size, 100KB available to read.
# Read 1KB at a time and make sure that the buffer is not eagerly
# filled.
rs, ws = yield self.make_iostream_pair(max_buffer_size=10 * 1024)
try:
ws.write((b"a" * 1023 + b"\n") * 100)
for i in range(100):
data = yield rs.read_until(b"\n", max_bytes=4096)
self.assertEqual(data, b"a" * 1023 + b"\n")
finally:
ws.close()
rs.close()
@gen_test
def test_flow_control(self):
MB = 1024 * 1024
rs, ws = yield self.make_iostream_pair(max_buffer_size=5 * MB)
try:
# Client writes more than the rs will accept.
ws.write(b"a" * 10 * MB)
# The rs pauses while reading.
yield rs.read_bytes(MB)
yield gen.sleep(0.1)
# The ws's writes have been blocked; the rs can
# continue to read gradually.
for i in range(9):
yield rs.read_bytes(MB)
finally:
rs.close()
ws.close()
@gen_test
def test_read_into(self):
rs, ws = yield self.make_iostream_pair()
def sleep_some():
self.io_loop.run_sync(lambda: gen.sleep(0.05))
try:
buf = bytearray(10)
fut = rs.read_into(buf)
ws.write(b"hello")
yield gen.sleep(0.05)
self.assertTrue(rs.reading())
ws.write(b"world!!")
data = yield fut
self.assertFalse(rs.reading())
self.assertEqual(data, 10)
self.assertEqual(bytes(buf), b"helloworld")
# Existing buffer is fed into user buffer
fut = rs.read_into(buf)
yield gen.sleep(0.05)
self.assertTrue(rs.reading())
ws.write(b"1234567890")
data = yield fut
self.assertFalse(rs.reading())
self.assertEqual(data, 10)
self.assertEqual(bytes(buf), b"!!12345678")
# Existing buffer can satisfy read immediately
buf = bytearray(4)
ws.write(b"abcdefghi")
data = yield rs.read_into(buf)
self.assertEqual(data, 4)
self.assertEqual(bytes(buf), b"90ab")
data = yield rs.read_bytes(7)
self.assertEqual(data, b"cdefghi")
finally:
ws.close()
rs.close()
@gen_test
def test_read_into_partial(self):
rs, ws = yield self.make_iostream_pair()
try:
# Partial read
buf = bytearray(10)
fut = rs.read_into(buf, partial=True)
ws.write(b"hello")
data = yield fut
self.assertFalse(rs.reading())
self.assertEqual(data, 5)
self.assertEqual(bytes(buf), b"hello\0\0\0\0\0")
# Full read despite partial=True
ws.write(b"world!1234567890")
data = yield rs.read_into(buf, partial=True)
self.assertEqual(data, 10)
self.assertEqual(bytes(buf), b"world!1234")
# Existing buffer can satisfy read immediately
data = yield rs.read_into(buf, partial=True)
self.assertEqual(data, 6)
self.assertEqual(bytes(buf), b"5678901234")
finally:
ws.close()
rs.close()
@gen_test
def test_read_into_zero_bytes(self):
rs, ws = yield self.make_iostream_pair()
try:
buf = bytearray()
fut = rs.read_into(buf)
self.assertEqual(fut.result(), 0)
finally:
ws.close()
rs.close()
@gen_test
def test_many_mixed_reads(self):
# Stress buffer handling when going back and forth between
# read_bytes() (using an internal buffer) and read_into()
# (using a user-allocated buffer).
r = random.Random(42)
nbytes = 1000000
rs, ws = yield self.make_iostream_pair()
produce_hash = hashlib.sha1()
consume_hash = hashlib.sha1()
@gen.coroutine
def produce():
remaining = nbytes
while remaining > 0:
size = r.randint(1, min(1000, remaining))
data = os.urandom(size)
produce_hash.update(data)
yield ws.write(data)
remaining -= size
assert remaining == 0
@gen.coroutine
def consume():
remaining = nbytes
while remaining > 0:
if r.random() > 0.5:
# read_bytes()
size = r.randint(1, min(1000, remaining))
data = yield rs.read_bytes(size)
consume_hash.update(data)
remaining -= size
else:
# read_into()
size = r.randint(1, min(1000, remaining))
buf = bytearray(size)
n = yield rs.read_into(buf)
assert n == size
consume_hash.update(buf)
remaining -= size
assert remaining == 0
try:
yield [produce(), consume()]
assert produce_hash.hexdigest() == consume_hash.hexdigest()
finally:
ws.close()
rs.close()
@abstract_base_test
| TestReadWriteMixin |
python | mlflow__mlflow | mlflow/entities/model_registry/model_version_deployment_job_run_state.py | {
"start": 143,
"end": 2043
} | class ____:
"""Enum for model version deployment state of an
:py:class:`mlflow.entities.model_registry.ModelVersion`.
"""
NO_VALID_DEPLOYMENT_JOB_FOUND = ProtoModelVersionDeploymentJobState.DeploymentJobRunState.Value(
"NO_VALID_DEPLOYMENT_JOB_FOUND"
)
RUNNING = ProtoModelVersionDeploymentJobState.DeploymentJobRunState.Value("RUNNING")
SUCCEEDED = ProtoModelVersionDeploymentJobState.DeploymentJobRunState.Value("SUCCEEDED")
FAILED = ProtoModelVersionDeploymentJobState.DeploymentJobRunState.Value("FAILED")
PENDING = ProtoModelVersionDeploymentJobState.DeploymentJobRunState.Value("PENDING")
_STRING_TO_STATE = {
k: ProtoModelVersionDeploymentJobState.DeploymentJobRunState.Value(k)
for k in ProtoModelVersionDeploymentJobState.DeploymentJobRunState.keys()
}
_STATE_TO_STRING = {value: key for key, value in _STRING_TO_STATE.items()}
@staticmethod
def from_string(state_str):
if state_str not in ModelVersionDeploymentJobRunState._STRING_TO_STATE:
raise Exception(
f"Could not get deployment job run state corresponding to string {state_str}. "
f"Valid state strings: {ModelVersionDeploymentJobRunState.all_states()}"
)
return ModelVersionDeploymentJobRunState._STRING_TO_STATE[state_str]
@staticmethod
def to_string(state):
if state not in ModelVersionDeploymentJobRunState._STATE_TO_STRING:
raise Exception(
f"Could not get string corresponding to deployment job run {state}. "
f"Valid states: {ModelVersionDeploymentJobRunState.all_states()}"
)
return ModelVersionDeploymentJobRunState._STATE_TO_STRING[state]
@staticmethod
def all_states():
return list(ModelVersionDeploymentJobRunState._STATE_TO_STRING.keys())
| ModelVersionDeploymentJobRunState |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_graph.py | {
"start": 7947,
"end": 8197
} | class ____(graphene.ObjectType):
assetKey = graphene.NonNull(GrapheneAssetKey)
repositories = non_null_list(lambda: external.GrapheneRepository)
class Meta:
name = "AssetNodeDefinitionCollision"
| GrapheneAssetNodeDefinitionCollision |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/conversation/base.py | {
"start": 631,
"end": 5293
} | class ____(LLMChain):
"""Chain to have a conversation and load context from memory.
This class is deprecated in favor of `RunnableWithMessageHistory`. Please refer
to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/
`RunnableWithMessageHistory` offers several benefits, including:
- Stream, batch, and async support;
- More flexible memory handling, including the ability to manage memory
outside the chain;
- Support for multiple threads.
Below is a minimal implementation, analogous to using `ConversationChain` with
the default `ConversationBufferMemory`:
```python
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(model, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
```
Memory objects can also be incorporated into the `get_session_history` callable:
```python
from langchain_classic.memory import ConversationBufferWindowMemory
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
store = {} # memory is maintained outside the chain
def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
memory = ConversationBufferWindowMemory(
chat_memory=store[session_id],
k=3,
return_messages=True,
)
assert len(memory.memory_variables) == 1
key = memory.memory_variables[0]
messages = memory.load_memory_variables({})[key]
store[session_id] = InMemoryChatMessageHistory(messages=messages)
return store[session_id]
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
chain = RunnableWithMessageHistory(model, get_session_history)
chain.invoke(
"Hi I'm Bob.",
config={"configurable": {"session_id": "1"}},
) # session_id determines thread
```
Example:
```python
from langchain_classic.chains import ConversationChain
from langchain_openai import OpenAI
conversation = ConversationChain(llm=OpenAI())
```
"""
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
"""Default memory store."""
prompt: BasePromptTemplate = PROMPT
"""Default conversation prompt to use."""
input_key: str = "input"
output_key: str = "response"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> list[str]:
"""Use this since so some prompt vars come from history."""
return [self.input_key]
@model_validator(mode="after")
def validate_prompt_input_variables(self) -> Self:
"""Validate that prompt input variables are consistent."""
memory_keys = self.memory.memory_variables
input_key = self.input_key
if input_key in memory_keys:
msg = (
f"The input key {input_key} was also found in the memory keys "
f"({memory_keys}) - please provide keys that don't overlap."
)
raise ValueError(msg)
prompt_variables = self.prompt.input_variables
expected_keys = [*memory_keys, input_key]
if set(expected_keys) != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but got {memory_keys} as inputs from "
f"memory, and {input_key} as the normal input key."
)
raise ValueError(msg)
return self
| ConversationChain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.