language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | encode__starlette | starlette/routing.py | {
"start": 11030,
"end": 13792
} | class ____(BaseRoute):
def __init__(
self,
path: str,
endpoint: Callable[..., Any],
*,
name: str | None = None,
middleware: Sequence[Middleware] | None = None,
) -> None:
assert path.startswith("/"), "Routed paths must start with '/'"
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
endpoint_handler = endpoint
while isinstance(endpoint_handler, functools.partial):
endpoint_handler = endpoint_handler.func
if inspect.isfunction(endpoint_handler) or inspect.ismethod(endpoint_handler):
# Endpoint is function or method. Treat it as `func(websocket)`.
self.app = websocket_session(endpoint)
else:
# Endpoint is a class. Treat it as ASGI.
self.app = endpoint
if middleware is not None:
for cls, args, kwargs in reversed(middleware):
self.app = cls(self.app, *args, **kwargs)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
def matches(self, scope: Scope) -> tuple[Match, Scope]:
path_params: dict[str, Any]
if scope["type"] == "websocket":
route_path = get_route_path(scope)
match = self.path_regex.match(route_path)
if match:
matched_params = match.groupdict()
for key, value in matched_params.items():
matched_params[key] = self.param_convertors[key].convert(value)
path_params = dict(scope.get("path_params", {}))
path_params.update(matched_params)
child_scope = {"endpoint": self.endpoint, "path_params": path_params}
return Match.FULL, child_scope
return Match.NONE, {}
def url_path_for(self, name: str, /, **path_params: Any) -> URLPath:
seen_params = set(path_params.keys())
expected_params = set(self.param_convertors.keys())
if name != self.name or seen_params != expected_params:
raise NoMatchFound(name, path_params)
path, remaining_params = replace_params(self.path_format, self.param_convertors, path_params)
assert not remaining_params
return URLPath(path=path, protocol="websocket")
async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:
await self.app(scope, receive, send)
def __eq__(self, other: Any) -> bool:
return isinstance(other, WebSocketRoute) and self.path == other.path and self.endpoint == other.endpoint
def __repr__(self) -> str:
return f"{self.__class__.__name__}(path={self.path!r}, name={self.name!r})"
| WebSocketRoute |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 131255,
"end": 133110
} | class ____(fixtures.TestBase):
"""Test what DBAPIs and dialects return without any typing
information supplied at the SQLA level.
"""
__sparse_driver_backend__ = True
def _fixture(self, connection, metadata, type_, data):
t = Table("t", metadata, Column("val", type_))
metadata.create_all(connection)
connection.execute(t.insert(), dict(val=data))
@testing.requires.numeric_received_as_decimal_untyped
@testing.provide_metadata
def test_decimal_fp(self, connection):
metadata = self.metadata
self._fixture(
connection, metadata, Numeric(10, 5), decimal.Decimal("45.5")
)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45.5"))
@testing.requires.numeric_received_as_decimal_untyped
@testing.provide_metadata
def test_decimal_int(self, connection):
metadata = self.metadata
self._fixture(
connection, metadata, Numeric(10, 5), decimal.Decimal("45")
)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, decimal.Decimal)
eq_(val, decimal.Decimal("45"))
@testing.provide_metadata
def test_ints(self, connection):
metadata = self.metadata
self._fixture(connection, metadata, Integer, 45)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, int)
eq_(val, 45)
@testing.provide_metadata
def test_float(self, connection):
metadata = self.metadata
self._fixture(connection, metadata, Float, 46.583)
val = connection.exec_driver_sql("select val from t").scalar()
assert isinstance(val, float)
eq_(val, 46.583)
| NumericRawSQLTest |
python | tensorflow__tensorflow | tensorflow/python/saved_model/registration/registration_saving_test.py | {
"start": 4962,
"end": 7720
} | class ____(test.TestCase, parameterized.TestCase):
def test_registered_serializable(self, cycles):
@registration.register_serializable(name=f"SaveAndLoad{cycles}")
class Module(autotrackable.AutoTrackable):
def __init__(self, name="module"):
self.v = variables.Variable(1.)
self.name = name
def _serialize_to_proto(self, **unused_kwargs):
return wrappers_pb2.StringValue(value=self.name)
@classmethod
def _deserialize_from_proto(cls, proto, **unused_kwargs):
if proto.Is(wrappers_pb2.StringValue.DESCRIPTOR):
unpacked = wrappers_pb2.StringValue()
proto.Unpack(unpacked)
return cls(name=unpacked.value)
raise AssertionError(
"Did not receive proto of correct type during deserialization. "
f"Expected type {wrappers_pb2.StringValue.DESCRIPTOR.full_name}, "
f"got {proto.TypeName()}")
m = Module("a")
m.v.assign(5)
loaded = cycle(m, cycles)
self.assertIsInstance(loaded, Module)
self.assertEqual(5, loaded.v.numpy())
self.assertEqual("a", loaded.name)
def test_none_proto(self, cycles):
@registration.register_serializable(name=f"NoneProto{cycles}")
class Module(autotrackable.AutoTrackable):
def __init__(self, name="module"):
self.v = variables.Variable(1.)
self.name = name
# Leave _serialize_to_proto as the default (returns `None`).
@classmethod
def _deserialize_from_proto(cls, proto, **unused_kwargs):
self.assertEqual(proto.ByteSize(), 0)
return cls("deserialized")
m = Module("a")
m.v.assign(5)
loaded = cycle(m, cycles)
self.assertIsInstance(loaded, Module)
self.assertEqual(5, loaded.v.numpy())
self.assertEqual("deserialized", loaded.name)
def test_deserialization_dependencies(self, cycles):
@registration.register_serializable(name=f"Dependency{cycles}")
class Module(autotrackable.AutoTrackable):
def __init__(self, v=None):
self.v = v if v is not None else variables.Variable(1.)
def _deserialization_dependencies(self, children):
del children # Unused.
return {"v": self.v}
@classmethod
def _deserialize_from_proto(cls, dependencies, **unused_kwargs):
self.assertIn("v", dependencies)
return cls(v=dependencies["v"])
m = Module()
m.v.assign(5)
loaded = cycle(m, cycles)
self.assertIsInstance(loaded, Module)
self.assertEqual(5, loaded.v.numpy())
def test_registered_saver(self, cycles):
p1 = Part([1, 4])
p2 = Part([2, 5])
p3 = Part([3, 6])
s = Stack([p1, p2, p3])
loaded = cycle(s, cycles)
self.assertAllEqual(s.value(), loaded.value())
| SavedModelTest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/attrs/test_inference.py | {
"start": 745,
"end": 2472
} | class ____:
type_ = attr.ib(type=int)
type_converter = attr.ib(converter=bool)
validator_type = attr.ib(validator=attr.validators.instance_of(str))
validator_type_tuple = attr.ib(validator=attr.validators.instance_of((str, int)))
validator_type_multiple = attr.ib(
validator=[
attr.validators.instance_of(str),
attr.validators.instance_of((str, int, bool)),
]
)
validator_type_has_overlap = attr.ib(
validator=[
attr.validators.instance_of(str),
attr.validators.instance_of((str, list)),
attr.validators.instance_of(object),
]
)
validator_optional = attr.ib(
validator=attr.validators.optional(lambda inst, atrib, val: float(val))
)
validator_in = attr.ib(validator=attr.validators.in_([1, 2, 3]))
validator_in_multiple = attr.ib(
validator=[attr.validators.in_(list(range(100))), attr.validators.in_([1, -1])]
)
validator_in_multiple_strings = attr.ib(
validator=[attr.validators.in_("abcd"), attr.validators.in_(["ab", "cd"])]
)
typing_list = attr.ib(type=list[int])
typing_list_of_list = attr.ib(type=list[list[int]])
typing_dict = attr.ib(type=dict[str, int])
typing_optional = attr.ib(type=typing.Optional[bool])
typing_optional_new = attr.ib(type=bool | None)
typing_union = attr.ib(type=typing.Union[str, int])
typing_union_new = attr.ib(type=str | int)
has_default = attr.ib(default=0)
has_default_factory = attr.ib(default=attr.Factory(list))
has_default_factory_takes_self = attr.ib( # uninferrable but has default
default=attr.Factory(lambda _: [], takes_self=True)
)
@attr.s
| Inferrables |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0051_project_urlconf_feature.py | {
"start": 149,
"end": 779
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0050_migrate_external_builds"),
]
operations = [
migrations.AddField(
model_name="project",
name="urlconf",
field=models.CharField(
default=None,
help_text="Supports the following keys: $language, $version, $subproject, $filename. An example: `$language/$version/$filename`.",
max_length=255,
null=True,
verbose_name="Documentation URL Configuration",
),
),
]
| Migration |
python | miyuchina__mistletoe | mistletoe/contrib/toc_renderer.py | {
"start": 204,
"end": 2496
} | class ____(HtmlRenderer):
"""
Extends HtmlRenderer class for table of contents support.
"""
def __init__(self, *extras, depth=5, omit_title=True, filter_conds=[], **kwargs):
"""
Args:
extras (list): allows subclasses to add even more custom tokens.
depth (int): the maximum level of heading to be included in TOC.
omit_title (bool): whether to ignore tokens where token.level == 1.
filter_conds (list): when any of these functions evaluate to true,
current heading will not be included.
**kwargs: additional parameters to be passed to the ancestor's
constructor.
"""
super().__init__(*extras, **kwargs)
self._headings = []
self.depth = depth
self.omit_title = omit_title
self.filter_conds = filter_conds
@property
def toc(self):
"""
Returns table of contents as a block_token.List instance.
"""
def get_indent(level):
if self.omit_title:
level -= 1
return ' ' * 4 * (level - 1)
def build_list_item(heading):
level, content = heading
template = '{indent}- {content}\n'
return template.format(indent=get_indent(level), content=content)
lines = [build_list_item(heading) for heading in self._headings]
items = block_token.tokenize(lines)
return items[0]
def render_heading(self, token):
"""
Overrides super().render_heading; stores rendered heading first,
then returns it.
"""
rendered = super().render_heading(token)
content = self.parse_rendered_heading(rendered)
if not (self.omit_title and token.level == 1
or token.level > self.depth
or any(cond(content) for cond in self.filter_conds)):
self._headings.append((token.level, content))
return rendered
@staticmethod
def parse_rendered_heading(rendered):
"""
Helper method; converts rendered heading to plain text.
"""
return re.sub(r'<.+?>', '', rendered)
TOCRenderer = TocRenderer
"""
Deprecated name of the `TocRenderer` class.
"""
| TocRenderer |
python | Textualize__textual | tests/suggester/test_input_suggestions.py | {
"start": 155,
"end": 3042
} | class ____(App[ComposeResult]):
def __init__(self, suggestions):
self.suggestions = suggestions
self.input = Input(suggester=SuggestFromList(self.suggestions))
super().__init__()
def compose(self) -> ComposeResult:
yield self.input
async def test_no_suggestions():
app = SuggestionsApp([])
async with app.run_test() as pilot:
assert app.input._suggestion == ""
await pilot.press("a")
assert app.input._suggestion == ""
async def test_suggestion():
app = SuggestionsApp(["hello"])
async with app.run_test() as pilot:
for char in "hello":
await pilot.press(char)
assert app.input._suggestion == "hello"
async def test_accept_suggestion():
app = SuggestionsApp(["hello"])
async with app.run_test() as pilot:
await pilot.press("h")
await pilot.press("right")
assert app.input.value == "hello"
async def test_no_suggestion_on_empty_value():
app = SuggestionsApp(["hello"])
async with app.run_test():
assert app.input._suggestion == ""
async def test_no_suggestion_on_empty_value_after_deleting():
app = SuggestionsApp(["hello"])
async with app.run_test() as pilot:
await pilot.press("h", "e", "backspace", "backspace")
assert app.input.value == "" # Sanity check.
assert app.input._suggestion == ""
async def test_suggestion_shows_up_after_deleting_extra_chars():
app = SuggestionsApp(["hello"])
async with app.run_test() as pilot:
await pilot.press(*"help")
assert app.input._suggestion == ""
await pilot.press("backspace")
assert app.input._suggestion == "hello"
async def test_suggestion_shows_up_after_deleting_extra_chars_in_middle_of_word():
app = SuggestionsApp(["hello"])
async with app.run_test() as pilot:
await pilot.press(*"hefl")
assert app.input._suggestion == ""
await pilot.press("left", "backspace")
assert app.input._suggestion == "hello"
@pytest.mark.parametrize(
("suggestion", "truncate_at"),
[
(".......", 3),
("hey there", 3),
("Olá, tudo bem?", 3),
("áàóãõñç", 2),
(string.punctuation, 3),
(string.punctuation[::-1], 5),
(string.punctuation[::3], 5),
],
)
async def test_suggestion_with_special_characters(suggestion: str, truncate_at: int):
app = SuggestionsApp([suggestion])
async with app.run_test() as pilot:
await pilot.press(*suggestion[:truncate_at])
assert app.input._suggestion == suggestion
async def test_suggestion_priority():
app = SuggestionsApp(["dog", "dad"])
async with app.run_test() as pilot:
await pilot.press("d")
assert app.input._suggestion == "dog"
await pilot.press("a")
assert app.input._suggestion == "dad"
| SuggestionsApp |
python | pypa__hatch | tests/cli/fmt/test_fmt.py | {
"start": 7142,
"end": 10793
} | class ____:
def test_fix_flag(self, hatch, helpers, temp_dir, config_file, env_run, mocker, platform, defaults_file_preview):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
config_dir = data_path / "env" / ".internal" / "hatch-static-analysis" / ".config" / project_path.id
default_config = config_dir / "ruff_defaults.toml"
user_config = config_dir / "pyproject.toml"
user_config_path = platform.join_command_args([str(user_config)])
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("fmt", "--preview")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
cmd [1] | ruff check --config {user_config_path} --preview --fix .
cmd [2] | ruff format --config {user_config_path} --preview .
"""
)
assert env_run.call_args_list == [
mocker.call(f"ruff check --config {user_config_path} --preview --fix .", shell=True),
mocker.call(f"ruff format --config {user_config_path} --preview .", shell=True),
]
assert default_config.read_text() == defaults_file_preview
old_contents = (project_path / "pyproject.toml").read_text()
config_path = str(default_config).replace("\\", "\\\\")
assert (
user_config.read_text()
== f"""\
{old_contents}
[tool.ruff]
extend = "{config_path}\""""
)
def test_check_flag(self, hatch, helpers, temp_dir, config_file, env_run, mocker, platform, defaults_file_preview):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
config_dir = data_path / "env" / ".internal" / "hatch-static-analysis" / ".config" / project_path.id
default_config = config_dir / "ruff_defaults.toml"
user_config = config_dir / "pyproject.toml"
user_config_path = platform.join_command_args([str(user_config)])
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("fmt", "--check", "--preview")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
cmd [1] | ruff check --config {user_config_path} --preview .
cmd [2] | ruff format --config {user_config_path} --preview --check --diff .
"""
)
assert env_run.call_args_list == [
mocker.call(f"ruff check --config {user_config_path} --preview .", shell=True),
mocker.call(f"ruff format --config {user_config_path} --preview --check --diff .", shell=True),
]
assert default_config.read_text() == defaults_file_preview
old_contents = (project_path / "pyproject.toml").read_text()
config_path = str(default_config).replace("\\", "\\\\")
assert (
user_config.read_text()
== f"""\
{old_contents}
[tool.ruff]
extend = "{config_path}\""""
)
| TestPreview |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 27340,
"end": 27424
} | class ____(FailingQueueTest, unittest.TestCase):
queue = c_queue
| CFailingQueueTest |
python | pennersr__django-allauth | allauth/headless/account/inputs.py | {
"start": 8214,
"end": 8491
} | class ____(SelectEmailInput):
def clean_email(self):
email = super().clean_email()
if not flows.manage_email.can_delete_email(email):
raise get_account_adapter().validation_error("cannot_remove_primary_email")
return email
| DeleteEmailInput |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py | {
"start": 3455,
"end": 7377
} | class ____(test.TestCase, parameterized.TestCase, PythonOpImpl):
"""Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.
This uses the Python compatibility wrapper that forwards to space_to_batch_nd.
"""
def _testPad(self,
inputs,
paddings,
block_size,
outputs,
dtype=dtypes.float32):
with self.cached_session():
# outputs = space_to_batch(inputs)
x_tf = self.space_to_batch(
math_ops.cast(inputs, dtype), paddings, block_size=block_size)
self.assertAllEqual(x_tf, outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
math_ops.cast(outputs, dtype), paddings, block_size=block_size)
self.assertAllEqual(x_tf, inputs)
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
paddings = np.zeros((2, 2), dtype=np.int32)
self._testPad(inputs, paddings, block_size, outputs, dtype)
# [1, 2, 2, 1] <-> [4, 1, 1, 1]
@parameterized.parameters(dtypes.float32, dtypes.float16, dtypes.bfloat16,
dtypes.uint8)
@test_util.run_deprecated_v1
def testSmallInput2x2(self, dtype):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
self._testOne(x_np, block_size, x_out, dtype)
# [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1]
@test_util.run_deprecated_v1
def testSmallInput2x2Pad1x0(self):
x_np = [[[[1], [2]], [[3], [4]]]]
paddings = np.array([[1, 0], [1, 0]], dtype=np.int32)
block_size = 3
x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]],
[[[3]]], [[[4]]]]
self._testPad(x_np, paddings, block_size, x_out)
# Test with depth larger than 1.
# [1, 2, 2, 3] <-> [4, 1, 1, 3]
@test_util.run_deprecated_v1
def testDepthInput2x2(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Test for larger input dimensions.
# [1, 4, 4, 1] <-> [4, 2, 2, 1]
@test_util.run_deprecated_v1
def testLargerInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Test with batch larger than 1.
# [2, 2, 4, 1] <-> [8, 1, 2, 1]
@test_util.run_deprecated_v1
def testBatchInput2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]],
[[[9], [10], [11], [12]], [[13], [14], [15], [16]]]]
block_size = 2
x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input spatial dimensions AND batch larger than 1, to ensure
# that elements are correctly laid out spatially and properly interleaved
# along the batch dimension.
# [2, 4, 4, 1] <-> [8, 2, 2, 1]
@test_util.run_deprecated_v1
def testLargerInputBatch2x2(self):
x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]],
[[9], [10], [11], [12]], [[13], [14], [15], [16]]],
[[[17], [18], [19], [20]], [[21], [22], [23], [24]],
[[25], [26], [27], [28]], [[29], [30], [31], [32]]]]
x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]],
[[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]],
[[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]],
[[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]]
block_size = 2
self._testOne(x_np, block_size, x_out)
| SpaceToBatchTest |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 8385,
"end": 8567
} | class ____(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self) -> str:
return self.__class__.__name__ + repr(self.args)
| ResolutionError |
python | numba__numba | numba/typed/dictobject.py | {
"start": 1490,
"end": 2215
} | class ____(IntEnum):
"""Status code for other dict operations.
"""
OK = 0
OK_REPLACED = 1
ERR_NO_MEMORY = -1
ERR_DICT_MUTATED = -2
ERR_ITER_EXHAUSTED = -3
ERR_DICT_EMPTY = -4
ERR_CMP_FAILED = -5
def new_dict(key, value, n_keys=0):
"""Construct a new dict with enough space for *n_keys* without a resize.
Parameters
----------
key, value : TypeRef
Key type and value type of the new dict.
n_keys : int, default 0
The number of keys to insert without needing a resize.
A value of 0 creates a dict with minimum size.
"""
# With JIT disabled, ignore all arguments and return a Python dict.
return dict()
@register_model(DictType)
| Status |
python | python-openxml__python-docx | src/docx/image/jpeg.py | {
"start": 707,
"end": 1249
} | class ____(Jpeg):
"""Image header parser for Exif image format."""
@classmethod
def from_stream(cls, stream):
"""Return |Exif| instance having header properties parsed from Exif image in
`stream`."""
markers = _JfifMarkers.from_stream(stream)
# print('\n%s' % markers)
px_width = markers.sof.px_width
px_height = markers.sof.px_height
horz_dpi = markers.app1.horz_dpi
vert_dpi = markers.app1.vert_dpi
return cls(px_width, px_height, horz_dpi, vert_dpi)
| Exif |
python | doocs__leetcode | solution/2100-2199/2185.Counting Words With a Given Prefix/Solution2.py | {
"start": 572,
"end": 757
} | class ____:
def prefixCount(self, words: List[str], pref: str) -> int:
tree = Trie()
for w in words:
tree.insert(w)
return tree.search(pref)
| Solution |
python | eth-brownie__brownie | brownie/_config.py | {
"start": 4862,
"end": 13175
} | class ____(Dict[str, Any]):
"""Dict subclass that prevents adding new keys when locked"""
def __init__(self, values: Dict[str, Any] = {}) -> None:
self._locked = False
super().__init__()
self.update(values)
def __setitem__(self, key: str, value: Any) -> None:
if self._locked and key not in self:
raise KeyError(f"{key} is not a known config setting")
if type(value) is dict:
value = ConfigDict(value)
super().__setitem__(key, value)
def update(self, arg: Dict[str, Any]) -> None: # type: ignore [override]
for k, v in arg.items():
self.__setitem__(k, v)
def _lock(self) -> None:
"""Locks the dict so that new keys cannot be added"""
for obj in self.values():
if type(obj) is ConfigDict:
obj._lock()
self._locked = True
def _unlock(self) -> None:
"""Unlocks the dict so that new keys can be added"""
for obj in self.values():
if type(obj) is ConfigDict:
obj._unlock()
self._locked = False
def _copy(self) -> Dict[str, Any]:
config_copy = {}
for key, value in self.items():
if isinstance(value, ConfigDict):
value = value._copy()
config_copy[key] = value
return config_copy
def _get_project_config_path(project_path: pathlib.Path) -> Optional[pathlib.Path]:
if project_path.is_dir():
path = project_path.joinpath("brownie-config")
else:
path = project_path
suffix = next((i for i in (".yml", ".yaml", ".json") if path.with_suffix(i).exists()), None)
return None if suffix is None else path.with_suffix(suffix)
def _load_config(project_path: pathlib.Path) -> Dict:
"""Loads configuration data from a file, returns as a dict"""
path = _get_project_config_path(project_path)
if path is None:
return {}
with path.open() as fp:
if path.suffix in (".yaml", ".yml"):
return yaml.safe_load(fp) or {}
raw_json = fp.read()
valid_json = regex_sub(r'\/\/[^"]*?(?=\n|$)', "", raw_json)
return ujson_loads(valid_json)
def _load_project_config(project_path: pathlib.Path) -> None:
"""Loads configuration settings from a project's brownie-config.yaml"""
config_path = project_path.joinpath("brownie-config")
config_data = _load_config(config_path)
config_vars = _load_project_envvars(project_path)
if "dotenv" in config_data:
if not isinstance(config_data["dotenv"], str):
raise ValueError(f'Invalid value passed to dotenv: {config_data["dotenv"]}')
env_path = project_path.joinpath(config_data["dotenv"])
if not env_path.is_file():
raise ValueError(f"Dotenv specified in config but not found at path: {env_path}")
config_vars.update(dotenv_values(dotenv_path=env_path)) # type: ignore
load_dotenv(dotenv_path=env_path)
config_data = expand_posix_vars(config_data, config_vars)
if not config_data:
return
if "network" in config_data:
warnings.warn(
"The `network` field in `brownie-config.yaml` has been deprecated. "
"Network settings are now handled via `brownie networks` in the CLI. "
f"Remove `network` from {config_path} to silence this warning.",
DeprecationWarning,
)
del config_data["network"]
# Update the network config cmd_settings with project specific cmd_settings
if "networks" in config_data and isinstance(config_data["networks"], dict):
for network, values in config_data["networks"].items():
if (
network != "default"
and network in CONFIG.networks.keys()
and "cmd_settings" in values
and isinstance(values["cmd_settings"], dict)
):
if "cmd_settings" in CONFIG.networks[network]:
_recursive_update(
CONFIG.networks[network]["cmd_settings"], values["cmd_settings"]
)
else:
CONFIG.networks[network]["cmd_settings"] = values["cmd_settings"]
settings = CONFIG.settings
settings._unlock()
_recursive_update(settings, config_data)
_recursive_update(settings, expand_posix_vars(settings, config_vars))
settings._lock()
if "hypothesis" in config_data:
_modify_hypothesis_settings(config_data["hypothesis"], "brownie", "brownie-base")
def _load_project_compiler_config(project_path: Optional[pathlib.Path]) -> Dict:
if not project_path:
return CONFIG.settings["compiler"]
compiler_data = CONFIG.settings["compiler"]._copy()
project_data = _load_config(project_path.joinpath("brownie-config")).get("compiler", {})
_recursive_update(compiler_data, project_data)
return compiler_data
def _load_project_envvars(project_path: pathlib.Path) -> Dict:
config_vars = dict(os.environ)
settings = CONFIG.settings
if settings.get("dotenv"):
dotenv_path = settings["dotenv"]
if not isinstance(dotenv_path, str):
raise ValueError(f"Invalid value passed to dotenv: {dotenv_path}")
env_path = project_path.joinpath(dotenv_path)
if not env_path.is_file():
raise ValueError(f"Dotenv specified in config but not found at path: {env_path}")
config_vars.update(dotenv_values(dotenv_path=env_path)) # type: ignore
return config_vars
def _load_project_structure_config(project_path):
structure = CONFIG.settings["project_structure"]._copy()
path = _get_project_config_path(project_path)
if path is None:
return structure
data = _load_config(project_path).get("project_structure", {})
structure.update(data)
return structure
def _load_project_dependencies(project_path: pathlib.Path) -> List[str]:
data = _load_config(project_path.joinpath("brownie-config"))
dependencies = data.get("dependencies", []) or []
if isinstance(dependencies, str):
dependencies = [dependencies]
return dependencies
def _modify_hypothesis_settings(settings, name, parent=None):
settings = settings.copy()
if parent is None:
parent = hp_settings._current_profile # type: ignore [attr-defined]
if "phases" in settings:
try:
settings["phases"] = [getattr(Phase, k) for k, v in settings["phases"].items() if v]
except AttributeError as exc:
raise ValueError(f"'{exc.args[0]}' is not a valid hypothesis phase setting")
hp_settings.register_profile(
name,
parent=hp_settings.get_profile(parent),
database=DirectoryBasedExampleDatabase(str(_get_data_folder().joinpath("hypothesis"))),
**settings,
)
hp_settings.load_profile(name)
def _recursive_update(original: Dict, new: Dict) -> None:
"""Recursively merges a new dict into the original dict"""
if not original:
original = {}
for k in new:
if k in original and isinstance(new[k], dict):
_recursive_update(original[k], new[k])
else:
original[k] = new[k]
def _update_argv_from_docopt(args: Dict[str, Any]) -> None:
CONFIG.argv.update({k.lstrip("-"): v for k, v in args.items()})
def _get_data_folder() -> pathlib.Path:
return DATA_FOLDER
def _make_data_folders(data_folder: pathlib.Path) -> None:
# create data folder structure
data_folder.mkdir(exist_ok=True)
for folder in DATA_SUBFOLDERS:
data_folder.joinpath(folder).mkdir(exist_ok=True)
if not data_folder.joinpath("network-config.yaml").exists():
shutil.copyfile(
BROWNIE_FOLDER.joinpath("data/network-config.yaml"),
data_folder.joinpath("network-config.yaml"),
)
if not data_folder.joinpath("providers-config.yaml").exists():
shutil.copyfile(
BROWNIE_FOLDER.joinpath("data/providers-config.yaml"),
data_folder.joinpath("providers-config.yaml"),
)
def _None_factory() -> None:
return None
warnings.filterwarnings("once", category=DeprecationWarning, module="brownie")
# create data folders
_make_data_folders(DATA_FOLDER)
CONFIG: Final = Config()
| ConfigDict |
python | run-llama__llama_index | llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-yugabytedb/llama_index/storage/chat_store/yugabytedb/base.py | {
"start": 2428,
"end": 14105
} | class ____(BaseChatStore):
table_name: Optional[str] = Field(
default="chatstore", description="YugabyteDB table name."
)
schema_name: Optional[str] = Field(
default="public", description="YugabyteDB schema name."
)
_table_class: Optional[Any] = PrivateAttr()
_session: Optional[sessionmaker] = PrivateAttr()
def __init__(
self,
session: sessionmaker,
table_name: str,
schema_name: str = "public",
use_jsonb: bool = False,
):
super().__init__(
table_name=table_name.lower(),
schema_name=schema_name.lower(),
)
# sqlalchemy model
base = declarative_base()
self._table_class = get_data_model(
base,
table_name,
schema_name,
use_jsonb=use_jsonb,
)
self._session = session
self._initialize(base)
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
load_balance: Optional[bool] = False,
topology_keys: Optional[str] = None,
yb_servers_refresh_interval: Optional[int] = 300,
fallback_to_topology_keys_only: Optional[bool] = False,
failed_host_ttl_seconds: Optional[int] = 5,
table_name: str = "chatstore",
schema_name: str = "public",
connection_string: Optional[str] = None,
debug: bool = False,
use_jsonb: bool = False,
) -> "YugabyteDBChatStore":
"""
Return connection string from database parameters.
Args:
host (str): YugabyteDB host.
port (str): YugabyteDB port.
database (str): YugabyteDB database name.
user (str): YugabyteDB user.
password (str): YugabyteDB password.
load_balance (bool, optional): Enables uniform load balancing. Defaults to False.
topology_keys (str, optional): Enables topology-aware load balancing.
Specify comma-separated geo-locations in the form of cloud.region.zone:priority.
Ignored if load_balance is false. Defaults to None.
yb_servers_refresh_interval (int, optional): The interval in seconds to refresh the servers list;
ignored if load_balance is false. Defaults to 300.
fallback_to_topology_keys_only (bool, optional): If set to true and topology_keys are specified,
the driver only tries to connect to nodes specified in topology_keys
Defaults to False.
failed_host_ttl_seconds (int, optional): Time, in seconds, to wait before trying to connect to failed nodes.
Defaults to 5.
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to yugabytedb db.
table_name (str): Table name.
schema_name (str): Schema name.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
"""
from urllib.parse import urlencode
query_params = {"load_balance": str(load_balance)}
if topology_keys is not None:
query_params["topology_keys"] = topology_keys
if yb_servers_refresh_interval is not None:
query_params["yb_servers_refresh_interval"] = yb_servers_refresh_interval
if fallback_to_topology_keys_only:
query_params["fallback_to_topology_keys_only"] = (
fallback_to_topology_keys_only
)
if failed_host_ttl_seconds is not None:
query_params["failed_host_ttl_seconds"] = failed_host_ttl_seconds
query_str = urlencode(query_params)
conn_str = (
connection_string
or f"yugabytedb+psycopg2://{user}:{password}@{host}:{port}/{database}?{query_str}"
)
session = cls._connect(conn_str, debug)
return cls(
session=session,
table_name=table_name,
schema_name=schema_name,
use_jsonb=use_jsonb,
)
@classmethod
def from_uri(
cls,
uri: str,
table_name: str = "chatstore",
schema_name: str = "public",
debug: bool = False,
use_jsonb: bool = False,
) -> "YugabyteDBChatStore":
"""Return connection string from database parameters."""
params = params_from_uri(uri)
return cls.from_params(
**params,
table_name=table_name,
schema_name=schema_name,
debug=debug,
use_jsonb=use_jsonb,
)
@classmethod
def _connect(
cls, connection_string: str, debug: bool
) -> tuple[sessionmaker, sessionmaker]:
_engine = create_engine(connection_string, echo=debug)
return sessionmaker(_engine)
def _create_schema_if_not_exists(self) -> None:
with self._session() as session, session.begin():
# Check if the specified schema exists with "CREATE" statement
check_schema_statement = text(
f"SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{self.schema_name}'"
)
result = session.execute(check_schema_statement).fetchone()
# If the schema does not exist, then create it
if not result:
create_schema_statement = text(
f"CREATE SCHEMA IF NOT EXISTS {self.schema_name}"
)
session.execute(create_schema_statement)
session.commit()
def _create_tables_if_not_exists(self, base) -> None:
with self._session() as session, session.begin():
base.metadata.create_all(session.connection())
def _initialize(self, base) -> None:
self._create_schema_if_not_exists()
self._create_tables_if_not_exists(base)
def set_messages(self, key: str, messages: list[ChatMessage]) -> None:
"""Set messages for a key."""
with self._session() as session:
stmt = (
insert(self._table_class)
.values(
key=bindparam("key"), value=cast(bindparam("value"), ARRAY(JSONB))
)
.on_conflict_do_update(
index_elements=["key"],
set_={"value": cast(bindparam("value"), ARRAY(JSONB))},
)
)
params = {
"key": key,
"value": [message.model_dump_json() for message in messages],
}
# Execute the bulk upsert
session.execute(stmt, params)
session.commit()
def get_messages(self, key: str) -> list[ChatMessage]:
"""Get messages for a key."""
with self._session() as session:
result = session.execute(select(self._table_class).filter_by(key=key))
result = result.scalars().first()
if result:
return [
ChatMessage.model_validate(removed_message)
for removed_message in result.value
]
return []
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
with self._session() as session:
stmt = (
insert(self._table_class)
.values(
key=bindparam("key"), value=cast(bindparam("value"), ARRAY(JSONB))
)
.on_conflict_do_update(
index_elements=["key"],
set_={"value": cast(bindparam("value"), ARRAY(JSONB))},
)
)
params = {"key": key, "value": [message.model_dump_json()]}
session.execute(stmt, params)
session.commit()
def delete_messages(self, key: str) -> Optional[list[ChatMessage]]:
"""Delete messages for a key."""
with self._session() as session:
session.execute(delete(self._table_class).filter_by(key=key))
session.commit()
return None
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
with self._session() as session:
# First, retrieve the current list of messages
stmt = select(self._table_class.value).where(self._table_class.key == key)
result = session.execute(stmt).scalar_one_or_none()
if result is None or idx < 0 or idx >= len(result):
# If the key doesn't exist or the index is out of bounds
return None
# Remove the message at the given index
removed_message = result[idx]
stmt = text(
f"""
UPDATE {self._table_class.__tablename__}
SET value = array_cat(
{self._table_class.__tablename__}.value[: :idx],
{self._table_class.__tablename__}.value[:idx+2:]
)
WHERE key = :key;
"""
)
params = {"key": key, "idx": idx}
session.execute(stmt, params)
session.commit()
return ChatMessage.model_validate(removed_message)
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
with self._session() as session:
# First, retrieve the current list of messages
stmt = select(self._table_class.value).where(self._table_class.key == key)
result = session.execute(stmt).scalar_one_or_none()
if result is None or len(result) == 0:
# If the key doesn't exist or the array is empty
return None
# Remove the message at the given index
removed_message = result[-1]
stmt = text(
f"""
UPDATE {self._table_class.__tablename__}
SET value = value[1:array_length(value, 1) - 1]
WHERE key = :key;
"""
)
params = {"key": key}
session.execute(stmt, params)
session.commit()
return ChatMessage.model_validate(removed_message)
def get_keys(self) -> list[str]:
"""Get all keys."""
with self._session() as session:
stmt = select(self._table_class.key)
return session.execute(stmt).scalars().all()
def params_from_uri(uri: str) -> dict:
result = urlparse(uri)
database = result.path[1:]
query_params = parse_qs(result.query)
port = result.port if result.port else 5433
return {
"database": database,
"user": result.username,
"password": result.password,
"host": result.hostname,
"port": port,
"load_balance": query_params.get("load_balance", ["false"])[0].lower()
== "true",
"topology_keys": query_params.get("topology_keys", [None])[0],
"yb_servers_refresh_interval": int(
query_params.get("yb_servers_refresh_interval", [300])[0]
),
"fallback_to_topology_keys_only": query_params.get(
"fallback_to_topology_keys_only", ["false"]
)[0].lower()
== "true",
"failed_host_ttl_seconds": int(
query_params.get("failed_host_ttl_seconds", [5])[0]
),
}
| YugabyteDBChatStore |
python | davidhalter__parso | parso/python/errors.py | {
"start": 35838,
"end": 37450
} | class ____(SyntaxRule):
_fstring_grammar = None
message_expr = "f-string expression part cannot include a backslash"
message_nested = "f-string: expressions nested too deeply"
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
def _check_format_spec(self, format_spec, depth):
self._check_fstring_contents(format_spec.children[1:], depth)
def _check_fstring_expr(self, fstring_expr, depth):
if depth >= 2:
self.add_issue(fstring_expr, message=self.message_nested)
expr = fstring_expr.children[1]
if '\\' in expr.get_code():
self.add_issue(expr, message=self.message_expr)
children_2 = fstring_expr.children[2]
if children_2.type == 'operator' and children_2.value == '=':
conversion = fstring_expr.children[3]
else:
conversion = children_2
if conversion.type == 'fstring_conversion':
name = conversion.children[1]
if name.value not in ('s', 'r', 'a'):
self.add_issue(name, message=self.message_conversion)
format_spec = fstring_expr.children[-2]
if format_spec.type == 'fstring_format_spec':
self._check_format_spec(format_spec, depth + 1)
def is_issue(self, fstring):
self._check_fstring_contents(fstring.children[1:-1])
def _check_fstring_contents(self, children, depth=0):
for fstring_content in children:
if fstring_content.type == 'fstring_expr':
self._check_fstring_expr(fstring_content, depth)
| _FStringRule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 883822,
"end": 884566
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for PushAllowance."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PushAllowanceEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("PushAllowance"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| PushAllowanceConnection |
python | pypa__pip | src/pip/_internal/configuration.py | {
"start": 2601,
"end": 14568
} | class ____:
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated: bool, load_only: Kind | None = None) -> None:
super().__init__()
if load_only is not None and load_only not in VALID_LOAD_ONLY:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, VALID_LOAD_ONLY))
)
)
self.isolated = isolated
self.load_only = load_only
# Because we keep track of where we got the data from
self._parsers: dict[Kind, list[tuple[str, RawConfigParser]]] = {
variant: [] for variant in OVERRIDE_ORDER
}
self._config: dict[Kind, dict[str, dict[str, Any]]] = {
variant: {} for variant in OVERRIDE_ORDER
}
self._modified_parsers: list[tuple[str, RawConfigParser]] = []
def load(self) -> None:
"""Loads configuration from configuration files and environment"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self) -> str | None:
"""Returns the file with highest priority in configuration"""
assert self.load_only is not None, "Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self) -> Iterable[tuple[str, Any]]:
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key: str) -> Any:
"""Get a value from the configuration."""
orig_key = key
key = _normalize_name(key)
try:
clean_config: dict[str, Any] = {}
for file_values in self._dictionary.values():
clean_config.update(file_values)
return clean_config[key]
except KeyError:
# disassembling triggers a more useful error message than simply
# "No such key" in the case that the key isn't in the form command.option
_disassemble_key(key)
raise ConfigurationError(f"No such key - {orig_key}")
def set_value(self, key: str, value: Any) -> None:
"""Modify a value in the configuration."""
key = _normalize_name(key)
self._ensure_have_load_only()
assert self.load_only
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only].setdefault(fname, {})
self._config[self.load_only][fname][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key: str) -> None:
"""Unset a value in the configuration."""
orig_key = key
key = _normalize_name(key)
self._ensure_have_load_only()
assert self.load_only
fname, parser = self._get_parser_to_modify()
if (
key not in self._config[self.load_only][fname]
and key not in self._config[self.load_only]
):
raise ConfigurationError(f"No such key - {orig_key}")
if parser is not None:
section, name = _disassemble_key(key)
if not (
parser.has_section(section) and parser.remove_option(section, name)
):
# The option was not removed.
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
# The section may be empty after the option was removed.
if not parser.items(section):
parser.remove_section(section)
self._mark_as_modified(fname, parser)
try:
del self._config[self.load_only][fname][key]
except KeyError:
del self._config[self.load_only][key]
def save(self) -> None:
"""Save the current in-memory state."""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
# Ensure directory's permission(need to be writeable)
try:
with open(fname, "w") as f:
parser.write(f)
except OSError as error:
raise ConfigurationError(
f"An error occurred while writing to the configuration file "
f"{fname}: {error}"
)
#
# Private routines
#
def _ensure_have_load_only(self) -> None:
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self) -> dict[str, dict[str, Any]]:
"""A dictionary representing the loaded configuration."""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in OVERRIDE_ORDER:
retval.update(self._config[variant])
return retval
def _load_config_files(self) -> None:
"""Loads configuration from configuration files"""
config_files = dict(self.iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug("Skipping file '%s' (variant: %s)", fname, variant)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant: Kind, fname: str) -> RawConfigParser:
logger.verbose("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].setdefault(fname, {})
self._config[variant][fname].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname: str) -> RawConfigParser:
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
locale_encoding = locale.getpreferredencoding(False)
try:
parser.read(fname, encoding=locale_encoding)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason=f"contains invalid {locale_encoding} characters",
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self) -> None:
"""Loads configuration from environment variables"""
self._config[kinds.ENV_VAR].setdefault(":env:", {})
self._config[kinds.ENV_VAR][":env:"].update(
self._normalized_keys(":env:", self.get_environ_vars())
)
def _normalized_keys(
self, section: str, items: Iterable[tuple[str, Any]]
) -> dict[str, Any]:
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def get_environ_vars(self) -> Iterable[tuple[str, str]]:
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith("PIP_"):
name = key[4:].lower()
if name not in ENV_NAMES_IGNORED:
yield name, val
# XXX: This is patched in the tests.
def iter_config_files(self) -> Iterable[tuple[Kind, list[str]]]:
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary. The order
here doesn't affect what gets overridden. That is controlled
by OVERRIDE_ORDER. However this does control the order they are
displayed to the user. It's probably most ergonomic to display
things in the same order as OVERRIDE_ORDER
"""
# SMELL: Move the conditions out of this function
env_config_file = os.environ.get("PIP_CONFIG_FILE", None)
config_files = get_configuration_files()
yield kinds.GLOBAL, config_files[kinds.GLOBAL]
# per-user config is not loaded when env_config_file exists
should_load_user_config = not self.isolated and not (
env_config_file and os.path.exists(env_config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, config_files[kinds.USER]
# virtualenv config
yield kinds.SITE, config_files[kinds.SITE]
if env_config_file is not None:
yield kinds.ENV, [env_config_file]
else:
yield kinds.ENV, []
def get_values_in_config(self, variant: Kind) -> dict[str, Any]:
"""Get values present in a config file"""
return self._config[variant]
def _get_parser_to_modify(self) -> tuple[str, RawConfigParser]:
# Determine which parser to modify
assert self.load_only
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None:
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._dictionary!r})"
| Configuration |
python | django__django | tests/admin_views/models.py | {
"start": 21100,
"end": 21316
} | class ____(models.Model):
order = models.IntegerField()
stuff = models.CharField(max_length=200)
@admin.display(ordering="order")
def some_order(self):
return self.order
| AdminOrderedModelMethod |
python | python__mypy | mypy/types.py | {
"start": 113686,
"end": 116434
} | class ____(ProperType):
"""A synthetic type representing some arbitrary expression that does not cleanly
translate into a type.
This synthetic type is only used at the beginning stages of semantic analysis
and should be completely removing during the process for mapping UnboundTypes to
actual types: we either turn it into a LiteralType or an AnyType.
For example, suppose `Foo[1]` is initially represented as the following:
UnboundType(
name='Foo',
args=[
RawExpressionType(value=1, base_type_name='builtins.int'),
],
)
As we perform semantic analysis, this type will transform into one of two
possible forms.
If 'Foo' was an alias for 'Literal' all along, this type is transformed into:
LiteralType(value=1, fallback=int_instance_here)
Alternatively, if 'Foo' is an unrelated class, we report an error and instead
produce something like this:
Instance(type=typeinfo_for_foo, args=[AnyType(TypeOfAny.from_error))
If the "note" field is not None, the provided note will be reported alongside the
error at this point.
Note: if "literal_value" is None, that means this object is representing some
expression that cannot possibly be a parameter of Literal[...]. For example,
"Foo[3j]" would be represented as:
UnboundType(
name='Foo',
args=[
RawExpressionType(value=None, base_type_name='builtins.complex'),
],
)
"""
__slots__ = ("literal_value", "base_type_name", "note")
def __init__(
self,
literal_value: LiteralValue | None,
base_type_name: str,
line: int = -1,
column: int = -1,
note: str | None = None,
) -> None:
super().__init__(line, column)
self.literal_value = literal_value
self.base_type_name = base_type_name
self.note = note
def simple_name(self) -> str:
return self.base_type_name.replace("builtins.", "")
def accept(self, visitor: TypeVisitor[T]) -> T:
assert isinstance(visitor, SyntheticTypeVisitor)
ret: T = visitor.visit_raw_expression_type(self)
return ret
def serialize(self) -> JsonDict:
assert False, "Synthetic types don't serialize"
def __hash__(self) -> int:
return hash((self.literal_value, self.base_type_name))
def __eq__(self, other: object) -> bool:
if isinstance(other, RawExpressionType):
return (
self.base_type_name == other.base_type_name
and self.literal_value == other.literal_value
)
else:
return NotImplemented
| RawExpressionType |
python | scipy__scipy | scipy/signal/tests/test_fir_filter_design.py | {
"start": 21159,
"end": 24150
} | class ____:
def test_bad_args(self):
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
def test_hilbert(self):
N = 11 # number of taps in the filter
a = 0.1 # width of the transition band
# design an unity gain hilbert bandpass filter from w to 0.5-w
h = remez(11, [a, 0.5-a], [1], type='hilbert')
# make sure the filter has correct # of taps
assert len(h) == N, "Number of Taps"
# make sure it is type III (anti-symmetric tap coefficients)
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
# Since the requested response is symmetric, all even coefficients
# should be zero (or in this case really small)
assert (abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero"
# now check the frequency response
w, H = freqz(h, 1)
f = w/2/np.pi
Hmag = abs(H)
# should have a zero at 0 and pi (in this case close to zero)
assert (Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi"
# check that the pass band is close to unity
idx = np.logical_and(f > a, f < 0.5-a)
assert (abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity"
def test_compare(self, xp):
# test comparison to MATLAB
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
-0.003530911231040, 0.193140296954975, 0.373400753484939,
0.373400753484939, 0.193140296954975, -0.003530911231040,
-0.075943803756711, -0.041314581814658, 0.024590270518440]
h = remez(12, xp.asarray([0, 0.3, 0.5, 1]), xp.asarray([1, 0]), fs=2.)
atol_arg = {'atol': 1e-8} if xp_default_dtype(xp) == xp.float32 else {}
xp_assert_close(h, xp.asarray(k, dtype=xp.float64), **atol_arg)
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
0.002879152556419, 0.016849978528150, -0.043276706138248,
0.073641298245579, -0.103908158578635, 0.129770906801075,
-0.147163447297124, 0.153302248456347, -0.147163447297124,
0.129770906801075, -0.103908158578635, 0.073641298245579,
-0.043276706138248, 0.016849978528150, 0.002879152556419,
-0.014644062687875, 0.018704846485491, -0.038976016082299]
atol_arg = {'atol': 3e-8} if xp_default_dtype(xp) == xp.float32 else {}
xp_assert_close(
remez(21, xp.asarray([0, 0.8, 0.9, 1]), xp.asarray([0, 1]), fs=2.),
xp.asarray(h, dtype=xp.float64), **atol_arg
)
def test_fs_validation(self):
with pytest.raises(ValueError, match="Sampling.*single scalar"):
remez(11, .1, 1, fs=np.array([10, 20]))
def test_gh_23266(self, xp):
bands = xp.asarray([0.0, 0.2, 0.3, 0.5])
desired = xp.asarray([1.0, 0.0])
weight = xp.asarray([1.0, 2.0])
remez(21, bands, desired, weight=weight)
@make_xp_test_case(firls)
| TestRemez |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/utils/eks_test_constants.py | {
"start": 5378,
"end": 5768
} | class ____:
"""Key names for the dictionaries representing EKS Managed Nodegroups."""
ARN = "nodegroupArn"
AUTOSCALING_GROUPS = "autoScalingGroups"
CREATED_AT = "createdAt"
MODIFIED_AT = "modifiedAt"
NAME = "name"
NODEGROUP_NAME = "nodegroupName"
REMOTE_ACCESS_SG = "remoteAccessSecurityGroup"
RESOURCES = "resources"
TAGS = "tags"
| NodegroupAttributes |
python | astropy__astropy | astropy/io/fits/hdu/compressed/_codecs.py | {
"start": 1243,
"end": 2272
} | class ____(Codec):
"""
A dummy compression/decompression algorithm that stores the data as-is.
While the data is not compressed/decompressed, it is converted to big
endian during encoding as this is what is expected in FITS files.
"""
codec_id = "FITS_NOCOMPRESS"
def decode(self, buf):
"""
Decompress buffer using the NOCOMPRESS algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to decompress.
Returns
-------
buf : np.ndarray
The decompressed buffer.
"""
return np.frombuffer(buf, dtype=np.uint8)
def encode(self, buf):
"""
Compress the data in the buffer using the NOCOMPRESS algorithm.
Parameters
----------
buf : bytes or array_like
The buffer to compress.
Returns
-------
bytes
The compressed bytes.
"""
return _as_big_endian_array(buf).tobytes()
| NoCompress |
python | ray-project__ray | python/ray/util/collective/examples/nccl_allreduce_example.py | {
"start": 97,
"end": 941
} | class ____:
def __init__(self):
self.send = cp.ones((4,), dtype=cp.float32)
self.recv = cp.zeros((4,), dtype=cp.float32)
def setup(self, world_size, rank):
collective.init_collective_group(world_size, rank, "nccl", "default")
return True
def compute(self):
collective.allreduce(self.send, "default")
return self.send
def destroy(self):
collective.destroy_group()
if __name__ == "__main__":
send = cp.ones((4,), dtype=cp.float32)
ray.init(num_gpus=2)
num_workers = 2
workers = []
init_rets = []
for i in range(num_workers):
w = Worker.remote()
workers.append(w)
init_rets.append(w.setup.remote(num_workers, i))
_ = ray.get(init_rets)
results = ray.get([w.compute.remote() for w in workers])
ray.shutdown()
| Worker |
python | kamyu104__LeetCode-Solutions | Python/redistribute-characters-to-make-all-strings-equal.py | {
"start": 50,
"end": 365
} | class ____(object):
def makeEqual(self, words):
"""
:type words: List[str]
:rtype: bool
"""
cnt = collections.defaultdict(int)
for w in words:
for c in w:
cnt[c] += 1
return all(v%len(words) == 0 for v in cnt.itervalues())
| Solution |
python | pytorch__pytorch | torch/distributions/transforms.py | {
"start": 20875,
"end": 21463
} | class ____(Transform):
r"""
Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`.
The implementation reverts to the linear function when :math:`x > 20`.
"""
domain = constraints.real
codomain = constraints.positive
bijective = True
sign = +1
def __eq__(self, other):
return isinstance(other, SoftplusTransform)
def _call(self, x):
return softplus(x)
def _inverse(self, y):
return (-y).expm1().neg().log() + y
def log_abs_det_jacobian(self, x, y):
return -softplus(-x)
| SoftplusTransform |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/pools.py | {
"start": 2063,
"end": 2310
} | class ____(StrictBaseModel):
"""Pool serializer for patch bodies."""
name: str | None = Field(default=None, alias="pool")
slots: int | None = None
description: str | None = None
include_deferred: bool | None = None
| PoolPatchBody |
python | getsentry__sentry | src/sentry/integrations/discord/requests/base.py | {
"start": 1333,
"end": 10166
} | class ____:
"""
A Request from Discord to our interactions endpoint.
Handles request verification and data access.
Raises DiscordRequestError whenever something goes wrong, including the
appropriate response code that the endpoint should respond with.
"""
def __init__(self, request: Request):
self.request = request
self._body = self.request.body.decode()
self._data: Mapping[str, object] = orjson.loads(self.request.body)
self._integration: RpcIntegration | None = None
self._provider: RpcIdentityProvider | None = None
self._identity: RpcIdentity | None = None
self._user: RpcUser | None = None
self.user: RpcUser | None = None
@property
def integration(self) -> RpcIntegration | None:
return self._integration
@property
def data(self) -> Mapping[str, object]:
"""This is the data object nested within request.data"""
data = self._data.get("data")
if isinstance(data, dict):
return data
else:
return {}
@property
def guild_id(self) -> str | None:
guild_id = self._data.get("guild_id")
return str(guild_id) if guild_id else None
@property
def channel_id(self) -> str | None:
channel_id = self._data.get("channel_id")
return str(channel_id) if channel_id else None
@property
def user_id(self) -> str | None:
try:
# 'member' object is sent when the interaction is invoked in a guild, and 'user' object is sent when
# invoked in a DM.
# See: https://discord.com/developers/docs/interactions/receiving-and-responding#interaction-object
user_source = self._data.get("member", None)
if user_source is None:
user_source = self._data
return user_source["user"]["id"] # type: ignore[index]
except (AttributeError, TypeError, KeyError):
return None
@property
def logging_data(self) -> Mapping[str, str | int]:
# TODO: come back to this later and see what additional metadata makes sense to include here
data: dict[str, str | int | None] = {
"discord_guild_id": self.guild_id,
"discord_channel_id": self.channel_id,
}
if self.integration:
data["integration_id"] = self.integration.id
if self.user_id:
data["discord_user_id"] = self.user_id
if self.user:
data["user"] = self.user.email
if self._identity:
data["has_identity"] = True
if self.has_identity():
data["identity"] = self.get_identity_str()
if self.is_command():
data["command"] = self.get_command_name()
if self.is_message_component():
data["component_custom_id"] = self.get_component_custom_id()
return {k: v for k, v in data.items() if v}
@property
def response_url(self) -> str | None:
"""Used for async responses in DiscordRequestParser"""
application_id = self._data.get("application_id")
token = self._data.get("token")
if not token or not application_id:
return None
return f"{DISCORD_BASE_URL}/webhooks/{application_id}/{token}"
def _get_context(self):
context = integration_service.get_integration_identity_context(
integration_provider=IntegrationProviderSlug.DISCORD.value,
integration_external_id=self.guild_id,
identity_external_id=self.user_id,
identity_provider_external_id=self.guild_id,
)
if not context:
return
self._integration = context.integration
self._provider = context.identity_provider
self._identity = context.identity
self._user = context.user
def validate(self) -> None:
self._log_request()
self._get_context()
self.authorize()
self.validate_integration()
self._validate_identity()
def authorize(self) -> None:
public_key: str = options.get("discord.public-key")
signature: str | None = self.request.META.get("HTTP_X_SIGNATURE_ED25519")
timestamp: str | None = self.request.META.get("HTTP_X_SIGNATURE_TIMESTAMP")
body: str = self._body
if not signature or not timestamp:
self._info(
"discord.authorize.auth.missing.data",
{**self.logging_data, "signature": signature, "timestamp": timestamp},
)
raise DiscordRequestError(status=status.HTTP_401_UNAUTHORIZED)
try:
verify_signature(public_key, signature, timestamp, body)
except InvalidSignature:
self._info(
"discord.authorize.auth.invalid.signature",
{**self.logging_data, "signature": signature, "timestamp": timestamp, "body": body},
)
raise DiscordRequestError(status=status.HTTP_401_UNAUTHORIZED)
except ValueError:
self._info(
"discord.authorize.auth.value.error",
{**self.logging_data, "signature": signature, "timestamp": timestamp, "body": body},
)
raise DiscordRequestError(status=status.HTTP_401_UNAUTHORIZED)
def _validate_identity(self) -> None:
self.user = self.get_identity_user()
if not self.user:
self._info("discord.validate.identity.no.user")
def get_identity_user(self) -> RpcUser | None:
if self._user:
return self._user
identity = self.get_identity()
if not identity:
return None
return user_service.get_user(identity.user_id)
def get_identity(self) -> RpcIdentity | None:
if not self._provider:
self._provider = identity_service.get_provider(
provider_type=IntegrationProviderSlug.DISCORD.value, provider_ext_id=self.guild_id
)
if not self._provider:
self._info("discord.validate.identity.no.provider")
if not self._identity and self._provider is not None:
self._info("discord.validate.identity.no.identity")
self._identity = (
identity_service.get_identity(
filter={"provider_id": self._provider.id, "identity_ext_id": self.user_id}
)
if self._provider
else None
)
if not self._identity:
self._info("discord.validate.identity.get.identity.fail")
self._info("discord.validate.identity")
return self._identity
def get_identity_str(self) -> str | None:
if self.user is None:
return None
return self.user.email if self.user else None
def validate_integration(self) -> None:
if not self._integration:
self._integration = integration_service.get_integration(
provider=IntegrationProviderSlug.DISCORD.value,
external_id=self.guild_id,
status=ObjectStatus.ACTIVE,
)
self._info("discord.validate.integration")
def has_identity(self) -> bool:
return self.user is not None
def _log_request(self) -> None:
self._info("discord.request")
def _info(self, key: str, extra=None) -> None:
if not extra:
extra = {**self.logging_data}
logger.info(key, extra=extra)
def _error(self, key: str) -> None:
logger.error(key, extra={**self.logging_data})
def is_ping(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.PING
def is_command(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.COMMAND
def is_message_component(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.MESSAGE_COMPONENT
def get_command_name(self) -> str:
if not self.is_command():
return ""
return str(self.data.get("name", ""))
def get_component_custom_id(self) -> str:
if not self.is_message_component():
return ""
return str(self.data.get("custom_id", ""))
def is_select_component(self) -> bool:
return self.data.get("component_type", None) == DiscordMessageComponentTypes.SELECT
def get_selected_options(self) -> list[str]:
if not self.is_select_component():
logger.info("discord.interaction.component.not.is_select_component")
return []
values = self.data.get("values", [])
logger.info(
"discord.interaction.component.get_selected_options",
extra={"data": self.data, "values": values},
)
return values # type: ignore[return-value]
| DiscordRequest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/step.py | {
"start": 1690,
"end": 3033
} | class ____:
@property
@abstractmethod
def handle(self) -> Union[StepHandle, UnresolvedStepHandle, ResolvedFromDynamicStepHandle]:
pass
@property
@abstractmethod
def key(self) -> str:
pass
@property
@abstractmethod
def node_handle(self) -> "NodeHandle":
pass
@property
@abstractmethod
def kind(self) -> StepKind:
pass
@property
@abstractmethod
def tags(self) -> Optional[Mapping[str, str]]:
pass
@property
@abstractmethod
def pool(self) -> Optional[str]:
pass
@property
@abstractmethod
def step_inputs(
self,
) -> Sequence[Union[StepInput, UnresolvedCollectStepInput, UnresolvedMappedStepInput]]:
pass
@property
@abstractmethod
def step_outputs(self) -> Sequence[StepOutput]:
pass
@abstractmethod
def step_input_named(
self, name: str
) -> Union[StepInput, UnresolvedCollectStepInput, UnresolvedMappedStepInput]:
pass
@abstractmethod
def step_output_named(self, name: str) -> StepOutput:
pass
@property
@abstractmethod
def step_output_dict(self) -> Mapping[str, StepOutput]:
pass
@property
@abstractmethod
def step_input_dict(self) -> Mapping[str, StepInput]:
pass
| IExecutionStep |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 7574,
"end": 7788
} | class ____(str, BaseEnum):
"""Preset stopwords to use in the `Stopwords` class.
Attributes:
EN: English stopwords.
NONE: No stopwords.
"""
NONE = "none"
EN = "en"
| StopwordsPreset |
python | astropy__astropy | astropy/units/tests/test_quantity_ufuncs.py | {
"start": 1423,
"end": 2441
} | class ____(NamedTuple):
"""A test case for a ufunc that should raise a warning."""
f: Callable
"""The ufunc to test."""
q_in: tuple[u.Quantity]
"""The input quantities."""
wfilter: str
"""The expected warning filter."""
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results,) if not isinstance(results, tuple) else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.0e-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
| testwarn |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 20075,
"end": 20806
} | class ____(VOTableSpecWarning):
"""
The number of rows explicitly specified in the ``nrows`` attribute
does not match the actual number of rows (``TR`` elements) present
in the ``TABLE``. This may indicate truncation of the file, or an
internal error in the tool that produced it. If ``verify`` is not
``'exception'``, parsing will proceed, with the loss of some performance.
**References:** `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#ToC10>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#ToC10>`__
"""
message_template = "TABLE specified nrows={}, but table contains {} rows"
default_args = ("x", "y")
| W18 |
python | dagster-io__dagster | python_modules/libraries/dagster-sling/dagster_sling/resources.py | {
"start": 1214,
"end": 1548
} | class ____(str, Enum):
"""The mode to use when syncing.
See the Sling docs for more information: https://docs.slingdata.io/sling-cli/run/configuration#modes
"""
INCREMENTAL = "incremental"
TRUNCATE = "truncate"
FULL_REFRESH = "full-refresh"
SNAPSHOT = "snapshot"
BACKFILL = "backfill"
@public
| SlingMode |
python | django__django | tests/utils_tests/test_autoreload.py | {
"start": 699,
"end": 6470
} | class ____(SimpleTestCase):
def import_and_cleanup(self, name):
import_module(name)
self.addCleanup(lambda: sys.path_importer_cache.clear())
self.addCleanup(lambda: sys.modules.pop(name, None))
def clear_autoreload_caches(self):
autoreload.iter_modules_and_files.cache_clear()
def assertFileFound(self, filename):
# Some temp directories are symlinks. Python resolves these fully while
# importing.
resolved_filename = filename.resolve(strict=True)
self.clear_autoreload_caches()
# Test uncached access
self.assertIn(
resolved_filename, list(autoreload.iter_all_python_module_files())
)
# Test cached access
self.assertIn(
resolved_filename, list(autoreload.iter_all_python_module_files())
)
self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1)
def assertFileNotFound(self, filename):
resolved_filename = filename.resolve(strict=True)
self.clear_autoreload_caches()
# Test uncached access
self.assertNotIn(
resolved_filename, list(autoreload.iter_all_python_module_files())
)
# Test cached access
self.assertNotIn(
resolved_filename, list(autoreload.iter_all_python_module_files())
)
self.assertEqual(autoreload.iter_modules_and_files.cache_info().hits, 1)
def temporary_file(self, filename):
dirname = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, dirname)
return Path(dirname) / filename
def test_paths_are_pathlib_instances(self):
for filename in autoreload.iter_all_python_module_files():
self.assertIsInstance(filename, Path)
def test_file_added(self):
"""
When a file is added, it's returned by iter_all_python_module_files().
"""
filename = self.temporary_file("test_deleted_removed_module.py")
filename.touch()
with extend_sys_path(str(filename.parent)):
self.import_and_cleanup("test_deleted_removed_module")
self.assertFileFound(filename.absolute())
def test_check_errors(self):
"""
When a file containing an error is imported in a function wrapped by
check_errors(), gen_filenames() returns it.
"""
filename = self.temporary_file("test_syntax_error.py")
filename.write_text("Ceci n'est pas du Python.")
with extend_sys_path(str(filename.parent)):
try:
with self.assertRaises(SyntaxError):
autoreload.check_errors(import_module)("test_syntax_error")
finally:
autoreload._exception = None
self.assertFileFound(filename)
def test_check_errors_catches_all_exceptions(self):
"""
Since Python may raise arbitrary exceptions when importing code,
check_errors() must catch Exception, not just some subclasses.
"""
filename = self.temporary_file("test_exception.py")
filename.write_text("raise Exception")
with extend_sys_path(str(filename.parent)):
try:
with self.assertRaises(Exception):
autoreload.check_errors(import_module)("test_exception")
finally:
autoreload._exception = None
self.assertFileFound(filename)
def test_zip_reload(self):
"""
Modules imported from zipped files have their archive location included
in the result.
"""
zip_file = self.temporary_file("zip_import.zip")
with zipfile.ZipFile(str(zip_file), "w", zipfile.ZIP_DEFLATED) as zipf:
zipf.writestr("test_zipped_file.py", "")
with extend_sys_path(str(zip_file)):
self.import_and_cleanup("test_zipped_file")
self.assertFileFound(zip_file)
def test_bytecode_conversion_to_source(self):
""".pyc and .pyo files are included in the files list."""
filename = self.temporary_file("test_compiled.py")
filename.touch()
compiled_file = Path(
py_compile.compile(str(filename), str(filename.with_suffix(".pyc")))
)
filename.unlink()
with extend_sys_path(str(compiled_file.parent)):
self.import_and_cleanup("test_compiled")
self.assertFileFound(compiled_file)
def test_weakref_in_sys_module(self):
"""iter_all_python_module_file() ignores weakref modules."""
time_proxy = weakref.proxy(time)
sys.modules["time_proxy"] = time_proxy
self.addCleanup(lambda: sys.modules.pop("time_proxy", None))
list(autoreload.iter_all_python_module_files()) # No crash.
def test_module_without_spec(self):
module = types.ModuleType("test_module")
del module.__spec__
self.assertEqual(
autoreload.iter_modules_and_files((module,), frozenset()), frozenset()
)
def test_main_module_is_resolved(self):
main_module = sys.modules["__main__"]
self.assertFileFound(Path(main_module.__file__))
def test_main_module_without_file_is_not_resolved(self):
fake_main = types.ModuleType("__main__")
self.assertEqual(
autoreload.iter_modules_and_files((fake_main,), frozenset()), frozenset()
)
def test_path_with_embedded_null_bytes(self):
for path in (
"embedded_null_byte\x00.py",
"di\x00rectory/embedded_null_byte.py",
):
with self.subTest(path=path):
self.assertEqual(
autoreload.iter_modules_and_files((), frozenset([path])),
frozenset(),
)
| TestIterModulesAndFiles |
python | ipython__ipython | tests/test_formatters.py | {
"start": 391,
"end": 455
} | class ____(object):
def __repr__(self):
return "A()"
| A |
python | astropy__astropy | astropy/nddata/mixins/ndio.py | {
"start": 3395,
"end": 3689
} | class ____:
"""
Mixin class to connect NDData to the astropy input/output registry.
This mixin adds two methods to its subclasses, ``read`` and ``write``.
"""
read = registry.UnifiedReadWriteMethod(NDDataRead)
write = registry.UnifiedReadWriteMethod(NDDataWrite)
| NDIOMixin |
python | django__django | tests/postgres_tests/models.py | {
"start": 5921,
"end": 5995
} | class ____(models.Model):
number = models.IntegerField(unique=True)
| Room |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 216948,
"end": 217946
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"annotation_level",
"blob_url",
"database_id",
"location",
"message",
"path",
"raw_details",
"title",
)
annotation_level = sgqlc.types.Field(
CheckAnnotationLevel, graphql_name="annotationLevel"
)
blob_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="blobUrl")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
location = sgqlc.types.Field(
sgqlc.types.non_null("CheckAnnotationSpan"), graphql_name="location"
)
message = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="message")
path = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="path")
raw_details = sgqlc.types.Field(String, graphql_name="rawDetails")
title = sgqlc.types.Field(String, graphql_name="title")
| CheckAnnotation |
python | huggingface__transformers | src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py | {
"start": 1177,
"end": 19974
} | class ____(TokenizersBackend):
"""
Construct a SeamlessM4T tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=bpe#models).
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language
code> <tokens> <eos>` for target language documents.
Examples:
```python
>>> from transformers import SeamlessM4TTokenizer
>>> tokenizer = SeamlessM4TTokenizer.from_pretrained(
... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra"
... )
>>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
>>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie."
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt")
```
Args:
vocab (`list` or `dict`, *optional*):
List of (token, score) tuples or dict mapping tokens to indices. If not provided, uses default vocab.
merges (`list`, *optional*):
List of merge rules for BPE model. If not provided, uses empty list.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
src_lang (`str`, *optional*, defaults to `"eng"`):
The language to use as source language for translation.
tgt_lang (`str`, *optional*, defaults to `"fra"`):
The language to use as target language for translation.
additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
A tuple or a list of additional special tokens.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
prefix_tokens: list[int] = []
suffix_tokens: list[int] = []
def __init__(
self,
vocab: Optional[list] = None,
merges: Optional[list] = None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
src_lang="eng",
tgt_lang="fra",
additional_special_tokens=None,
keep_accents=None,
vocab_file=None,
**kwargs,
):
if vocab is None:
vocab = {
str(pad_token): 0,
str(unk_token): 1,
str(bos_token): 2,
str(eos_token): 3,
}
# Process vocab - SeamlessM4T uses fairseq vocab alignment: <pad>=0, <unk>=1, <s>=2, </s>=3, then SPM pieces[3:]
if isinstance(vocab, list):
# Convert list of (token, score) tuples to dict {token: idx}
# Check if vocab is already in SeamlessM4T order (pad, unk, s, /s) or tokenizer.json order (unk, s, /s, ...)
first_tokens = [str(item[0]) if isinstance(item, (list, tuple)) else str(item) for item in vocab[:4]]
is_seamless_order = (
len(first_tokens) >= 4
and first_tokens[0] == str(pad_token)
and first_tokens[1] == str(unk_token)
and first_tokens[2] == str(bos_token)
and first_tokens[3] == str(eos_token)
)
if is_seamless_order:
# Already in correct order, use list index directly as token ID
vocab_dict = {}
for idx, item in enumerate(vocab):
token = str(item[0]) if isinstance(item, (list, tuple)) else str(item)
vocab_dict[token] = idx
self._vocab = vocab_dict
else:
# Reorder to fairseq: <pad>, <unk>, <s>, </s>, ... (rest of vocab)
vocab_dict = {}
vocab_dict[str(pad_token)] = 0
vocab_dict[str(unk_token)] = 1
vocab_dict[str(bos_token)] = 2
vocab_dict[str(eos_token)] = 3
# Add rest of vocab starting from index 4, skipping tokens we already added
idx = 4
for item in vocab:
token = str(item[0]) if isinstance(item, (list, tuple)) else str(item)
if token not in vocab_dict:
vocab_dict[token] = idx
idx += 1
self._vocab = vocab_dict
else:
self._vocab = vocab
if merges is None:
self._merges = []
else:
self._merges = [tuple(merge) if isinstance(merge, list) else merge for merge in merges]
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
unk_token=str(unk_token),
fuse_unk=True,
byte_fallback=False,
)
)
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace(Regex(r"[\n\r\t]"), " "),
normalizers.NFKC(),
normalizers.Strip(left=False, right=True),
normalizers.Replace(Regex(r" +▁"), "▁"),
normalizers.Replace(Regex(r"^▁+$"), ""),
normalizers.Replace(Regex(r" {2,}"), "▁"),
]
)
self._tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement="▁", prepend_scheme="first", split=True)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme="first", split=True)
if "__" not in src_lang:
src_lang = f"__{src_lang}__"
if "__" not in tgt_lang:
tgt_lang = f"__{tgt_lang}__"
# V5: Convert additional_special_tokens parameter to extra_special_tokens for backward compatibility
# PreTrainedTokenizerBase.__init__() will handle the conversion, but we need to pass it via kwargs
if additional_special_tokens is not None:
kwargs.setdefault("additional_special_tokens", additional_special_tokens)
super().__init__(
tokenizer_object=self._tokenizer,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
src_lang=src_lang,
tgt_lang=tgt_lang,
keep_accents=keep_accents,
vocab_file=vocab_file,
**kwargs,
)
# Build fairseq mappings
self.fairseq_offset = 1
self.fairseq_tokens_to_ids = {
"<pad>": 0,
"<unk>": 1,
"<s>": 2,
"</s>": 3,
}
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
self._src_lang = src_lang
self._tgt_lang = tgt_lang
self.set_tgt_lang_special_tokens(self._tgt_lang)
@property
def src_lang(self) -> str:
return self._src_lang
@src_lang.setter
def src_lang(self, new_src_lang: str) -> None:
if "__" not in new_src_lang:
self._src_lang = f"__{new_src_lang}__"
else:
self._src_lang = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def tgt_lang(self) -> str:
return self._tgt_lang
@tgt_lang.setter
def tgt_lang(self, new_tgt_lang: str) -> None:
if "__" not in new_tgt_lang:
self._tgt_lang = f"__{new_tgt_lang}__"
else:
self._tgt_lang = new_tgt_lang
self.set_tgt_lang_special_tokens(self._tgt_lang)
def _build_translation_inputs(
self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
):
"""Used by translation pipeline, to prepare inputs for the generate function"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
self.src_lang = src_lang
inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
if "__" not in tgt_lang:
tgt_lang = f"__{tgt_lang}__"
tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
inputs["forced_bos_token_id"] = tgt_lang_id
return inputs
def prepare_seq2seq_batch(
self,
src_texts: list[str],
src_lang: str = "eng",
tgt_texts: Optional[list[str]] = None,
tgt_lang: str = "fra",
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: Optional[str] = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
self.src_lang = src_lang
self.tgt_lang = tgt_lang
if max_length is None:
max_length = self.model_max_length
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = max_length
self._switch_to_target_mode()
labels = self(
tgt_texts,
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels["input_ids"]
self._switch_to_input_mode()
return model_inputs
def _switch_to_input_mode(self):
return self.set_src_lang_special_tokens(self.src_lang)
def _switch_to_target_mode(self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def set_src_lang_special_tokens(self, src_lang) -> None:
"""Reset the special tokens to the source lang setting.
Prefix=[src_lang_code], suffix = [eos]
"""
self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
if self.cur_lang_code == self.unk_token_id:
logger.warning_once(
f"`src_lang={src_lang}` has not be found in the `vocabulary`. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
)
self.prefix_tokens = [self.cur_lang_code]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def set_tgt_lang_special_tokens(self, lang: str) -> None:
"""Reset the special tokens to the target lang setting.
Prefix=[eos, tgt_lang_code] and suffix=[eos].
"""
self.cur_lang_code = self.convert_tokens_to_ids(lang)
if self.cur_lang_code == self.unk_token_id:
logger.warning_once(
f"`tgt_lang={lang}` has not be found in the `vocabulary`. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id."
)
self.prefix_tokens = [self.eos_token_id, self.cur_lang_code]
self.suffix_tokens = [self.eos_token_id]
prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
)
def __call__(
self,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]
] = None,
padding: Union[bool, str, PaddingStrategy] = False,
pad_to_multiple_of: Optional[int] = None,
src_lang: Optional[str] = None,
tgt_lang: Optional[str] = None,
**kwargs,
):
"""
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*, defaults to `None`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
src_lang (`str`, *optional*):
A string representing the source language. If not specified, the last `src_lang` specified (either
during initialization or when calling this tokenizer) will be used.
tgt_lang (`str`, *optional*):
A string representing the target language. If not specified, the last `tgt_lang` specified (either
during initialization or when calling this tokenizer) will be used.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to [`TokenizersBackend.__call__`].
"""
if src_lang is not None:
self.src_lang = src_lang
if tgt_lang is not None:
self.tgt_lang = tgt_lang
output = super().__call__(
text=text,
text_pair=text_pair,
text_target=text_target,
text_pair_target=text_pair_target,
padding=padding,
pad_to_multiple_of=pad_to_multiple_of,
**kwargs,
)
return output
__all__ = ["SeamlessM4TTokenizer"]
| SeamlessM4TTokenizer |
python | django__django | tests/m2m_through/tests.py | {
"start": 15890,
"end": 21536
} | class ____(TestCase):
def test_self_referential_empty_qs(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerySetEqual(tony.friends.all(), [])
def test_self_referential_non_symmetrical_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerySetEqual(tony.friends.all(), ["Chris"], attrgetter("name"))
def test_self_referential_non_symmetrical_second_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerySetEqual(chris.friends.all(), [])
def test_self_referential_non_symmetrical_clear_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
chris.friends.clear()
self.assertQuerySetEqual(chris.friends.all(), [])
# Since this isn't a symmetrical relation, Tony's friend link still
# exists.
self.assertQuerySetEqual(tony.friends.all(), ["Chris"], attrgetter("name"))
def test_self_referential_non_symmetrical_both(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
Friendship.objects.create(
first=chris, second=tony, date_friended=datetime.now()
)
self.assertQuerySetEqual(tony.friends.all(), ["Chris"], attrgetter("name"))
self.assertQuerySetEqual(chris.friends.all(), ["Tony"], attrgetter("name"))
def test_through_fields_self_referential(self):
john = Employee.objects.create(name="john")
peter = Employee.objects.create(name="peter")
mary = Employee.objects.create(name="mary")
harry = Employee.objects.create(name="harry")
Relationship.objects.create(source=john, target=peter, another=None)
Relationship.objects.create(source=john, target=mary, another=None)
Relationship.objects.create(source=john, target=harry, another=peter)
self.assertQuerySetEqual(
john.subordinates.all(), ["peter", "mary", "harry"], attrgetter("name")
)
def test_self_referential_symmetrical(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
SymmetricalFriendship.objects.create(
first=tony,
second=chris,
date_friended=date.today(),
)
self.assertSequenceEqual(tony.sym_friends.all(), [chris])
# Manually created symmetrical m2m relation doesn't add mirror entry
# automatically.
self.assertSequenceEqual(chris.sym_friends.all(), [])
SymmetricalFriendship.objects.create(
first=chris, second=tony, date_friended=date.today()
)
self.assertSequenceEqual(chris.sym_friends.all(), [tony])
def test_add_on_symmetrical_m2m_with_intermediate_model(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
date_friended = date(2017, 1, 3)
tony.sym_friends.add(chris, through_defaults={"date_friended": date_friended})
self.assertSequenceEqual(tony.sym_friends.all(), [chris])
self.assertSequenceEqual(chris.sym_friends.all(), [tony])
friendship = tony.symmetricalfriendship_set.get()
self.assertEqual(friendship.date_friended, date_friended)
def test_set_on_symmetrical_m2m_with_intermediate_model(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
anne = PersonSelfRefM2M.objects.create(name="Anne")
kate = PersonSelfRefM2M.objects.create(name="Kate")
date_friended_add = date(2013, 1, 5)
date_friended_set = date.today()
tony.sym_friends.add(
anne,
chris,
through_defaults={"date_friended": date_friended_add},
)
tony.sym_friends.set(
[anne, kate],
through_defaults={"date_friended": date_friended_set},
)
self.assertSequenceEqual(tony.sym_friends.all(), [anne, kate])
self.assertSequenceEqual(anne.sym_friends.all(), [tony])
self.assertSequenceEqual(kate.sym_friends.all(), [tony])
self.assertEqual(
kate.symmetricalfriendship_set.get().date_friended,
date_friended_set,
)
# Date is preserved.
self.assertEqual(
anne.symmetricalfriendship_set.get().date_friended,
date_friended_add,
)
# Recreate relationship.
tony.sym_friends.set(
[anne],
clear=True,
through_defaults={"date_friended": date_friended_set},
)
self.assertSequenceEqual(tony.sym_friends.all(), [anne])
self.assertSequenceEqual(anne.sym_friends.all(), [tony])
self.assertEqual(
anne.symmetricalfriendship_set.get().date_friended,
date_friended_set,
)
| M2mThroughReferentialTests |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_quant.py | {
"start": 851,
"end": 3769
} | class ____(TestCase):
backend = ""
def test_simple(self):
def gn(x, y):
return (torch.mul(x, y) + y,)
def fn(x, y):
return invoke_quant_tracer(
gn, x, y, scheme="nf4", quant_options=invoke_quant_tracer
)[0]
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
ref = gn(x, y)[0]
x_clone = x.clone().detach().requires_grad_(False)
y_clone = y.clone().detach().requires_grad_(False)
res = torch.compile(fn, backend=self.backend)(x_clone, y_clone)
self.assertEqual(ref, res)
def test_construct_inline(self):
def gn(x, y):
return (torch.mul(x, y) + y,)
def fn(x, y):
return InvokeQuant(codegen_low_precision=False)(gn, x, y, scheme="nf4")[0]
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
ref = gn(x, y)[0]
x_clone = x.clone().detach().requires_grad_(False)
y_clone = y.clone().detach().requires_grad_(False)
res = torch.compile(fn, backend=self.backend)(x_clone, y_clone)
self.assertEqual(ref, res)
def test_inline(self):
def gn(x, y):
return (torch.mul(x, y) + y,)
def fn(x, y):
return InvokeQuant()(gn, x, y, scheme="nf4")[0]
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
ref = gn(x, y)[0]
x_clone = x.clone().detach().requires_grad_(False)
y_clone = y.clone().detach().requires_grad_(False)
res = torch.compile(fn, backend=self.backend)(x_clone, y_clone)
self.assertEqual(ref, res)
def test_multiple(self):
torch._logging.set_logs(post_grad_graphs=True)
def gn(x, y):
return torch.mul(x, y) + y
def fn(x, y, z):
o1 = invoke_quant_tracer(gn, x, y, scheme="nf4")
o2 = invoke_quant_tracer(gn, y, z, scheme="nf4")
return o1 + o2
x = torch.randn(8, requires_grad=False)
y = torch.randn(8, requires_grad=False)
z = torch.randn(8, requires_grad=False)
ref = fn(x, y, z)
log_context = (
contextlib.nullcontext()
if self.backend != "inductor"
else self.assertLogs(logger="torch._inductor", level=logging.DEBUG)
)
with log_context as log:
res = torch.compile(fn, backend=self.backend)(x, y, z)
self.assertEqual(ref, res)
if self.backend == "inductor":
logs = "\n".join(r.getMessage() for r in log.records)
f = FileCheck()
f.check("AFTER POST GRAD")
f.check("subgraph0").check("subgraph1")
for _ in range(2):
f.check("torch.ops.higher_order.invoke_quant(").check_same("nf4")
f.run(logs)
| TestInvokeQuant |
python | pytorch__pytorch | test/export/test_schema.py | {
"start": 246,
"end": 13554
} | class ____(TestCase):
def test_schema_compatibility(self):
msg = """
Detected an invalidated change to export schema. Please run the following script to update the schema:
Example(s):
python scripts/export/update_schema.py --prefix <path_to_torch_development_directory>
"""
if IS_FBCODE:
msg += """or
buck run caffe2:export_update_schema -- --prefix /data/users/$USER/fbsource/fbcode/caffe2/
"""
try:
commit = update_schema()
except SchemaUpdateError as e:
self.fail(f"Failed to update schema: {e}\n{msg}")
self.assertEqual(commit.checksum_head, commit.checksum_next, msg)
def test_thrift_schema_unchanged(self):
msg = """
Detected an unexpected change to schema.thrift. Please update schema.py instead and run the following script:
Example(s):
python scripts/export/update_schema.py --prefix <path_to_torch_development_directory>
"""
if IS_FBCODE:
msg += """or
buck run caffe2:export_update_schema -- --prefix /data/users/$USER/fbsource/fbcode/caffe2/
"""
try:
commit = update_schema()
except SchemaUpdateError as e:
self.fail(f"Failed to update schema: {e}\n{msg}")
self.assertEqual(commit.thrift_checksum_head, commit.thrift_checksum_real, msg)
self.assertEqual(commit.thrift_checksum_head, commit.thrift_checksum_next, msg)
def test_schema_diff(self):
additions, subtractions = _diff_schema(
{
"Type0": {"kind": "struct", "fields": {}},
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
"field2": {"type": ""},
"field3": {"type": "", "default": "[]"},
},
},
},
{
"Type2": {
"kind": "struct",
"fields": {
"field1": {"type": "", "default": "0"},
"field2": {"type": "", "default": "[]"},
"field3": {"type": ""},
},
},
"Type1": {"kind": "struct", "fields": {}},
},
)
self.assertEqual(
additions,
{
"Type1": {"kind": "struct", "fields": {}},
"Type2": {
"fields": {
"field1": {"type": "", "default": "0"},
"field2": {"default": "[]"},
},
},
},
)
self.assertEqual(
subtractions,
{
"Type0": {"kind": "struct", "fields": {}},
"Type2": {
"fields": {
"field0": {"type": ""},
"field3": {"default": "[]"},
},
},
},
)
def test_schema_check(self):
# Adding field without default value
dst = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
"field1": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [4, 1])
# Removing field
dst = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "struct",
"fields": {},
},
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [4, 1])
# Adding field with default value
dst = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
"field1": {"type": "", "default": "[]"},
},
},
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [3, 3])
# Changing field type
dst = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": "int"},
},
},
"SCHEMA_VERSION": [3, 2],
}
with self.assertRaises(SchemaUpdateError):
_diff_schema(dst, src)
# Adding new type.
dst = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"Type1": {"kind": "struct", "fields": {}},
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [3, 3])
# Removing a type.
dst = {
"Type2": {
"kind": "struct",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [3, 3])
# Adding new field in union.
dst = {
"Type2": {
"kind": "union",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "union",
"fields": {
"field0": {"type": ""},
"field1": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [3, 3])
# Removing a field in union.
dst = {
"Type2": {
"kind": "union",
"fields": {
"field0": {"type": ""},
},
},
"SCHEMA_VERSION": [3, 2],
}
src = {
"Type2": {
"kind": "union",
"fields": {},
},
"SCHEMA_VERSION": [3, 2],
}
additions, subtractions = _diff_schema(dst, src)
commit = _Commit(
result=src,
checksum_next="",
yaml_path="",
additions=additions,
subtractions=subtractions,
base=dst,
checksum_head="",
cpp_header="",
cpp_header_path="",
thrift_checksum_head="",
thrift_checksum_real="",
thrift_checksum_next="",
thrift_schema="",
thrift_schema_path="",
)
next_version, _ = check(commit)
self.assertEqual(next_version, [4, 1])
def test_schema_comparison(self):
import torch._export.serde.schema as schema
sig = schema.ModuleCallSignature(
inputs=[
schema.Argument.create(as_none=True),
schema.Argument.create(
as_sym_int=schema.SymIntArgument.create(as_name="s0")
),
],
outputs=[
schema.Argument.create(
as_sym_int=schema.SymIntArgument.create(as_name="s1")
)
],
in_spec="foo",
out_spec="bar",
forward_arg_names=["None", "symint"],
)
# same content as sig
sig_same = schema.ModuleCallSignature(
inputs=[
schema.Argument.create(as_none=True),
schema.Argument.create(
as_sym_int=schema.SymIntArgument.create(as_name="s0")
),
],
outputs=[
schema.Argument.create(
as_sym_int=schema.SymIntArgument.create(as_name="s1")
)
],
in_spec="foo",
out_spec="bar",
forward_arg_names=["None", "symint"],
)
# as_name of symint is different
sig_diff = schema.ModuleCallSignature(
inputs=[
schema.Argument.create(as_none=True),
schema.Argument.create(
as_sym_int=schema.SymIntArgument.create(as_name="s0")
),
],
outputs=[
schema.Argument.create(
as_sym_int=schema.SymIntArgument.create(as_name="s2")
)
],
in_spec="foo",
out_spec="bar",
forward_arg_names=["None", "symint"],
)
self.assertEqual(sig, sig_same)
self.assertNotEqual(sig, sig_diff)
if __name__ == "__main__":
run_tests()
| TestSchema |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_test.py | {
"start": 9315,
"end": 68679
} | class ____(TestModels, parameterized.TestCase):
def testFloatModel(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatModelQuantizedInput(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_input_type = dtypes.uint8
converter.inference_type = dtypes.float32
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization']) # float
def testForgottenCallToAllocateTensors(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_index = interpreter.get_input_details()[0]['index']
dummy_tensor = np.ones(shape=[1, 16, 16, 3], dtype=np.float32)
with self.assertRaises(ValueError):
interpreter.set_tensor(input_index, dummy_tensor)
@parameterized.named_parameters(
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),
('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),
('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True))
def testIntegerQuantizationWithUnsupportedOps(self,
is_int_only,
is_int16_quantize,
inference_input_output_type,
enable_mlir_quantizer=False):
with ops.Graph().as_default():
in_tensor_a = array_ops.placeholder(shape=[3], dtype=dtypes.float32)
in_tensor_b = array_ops.placeholder(shape=[3], dtype=dtypes.float32)
# ceil kernel does not support int8 nor int16 types neither.
left = math_ops.ceil(in_tensor_a)
out_tensor_b = math_ops.tanh(in_tensor_b)
add = math_ops.add(left, out_tensor_b)
# ceil kernel does not support int8 nor int16 types neither.
out_tensor_a = math_ops.ceil(add)
sess = session.Session()
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(3)).astype(np.float32),
np.random.uniform(-1, 1, size=(3)).astype(np.float32)
]
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_a, in_tensor_b], [out_tensor_a, out_tensor_b])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
expected_dtype = inference_input_output_type.as_numpy_dtype
# Allow float32 for fallback on non-quantizable op.
expected_ceil_dtype = (
expected_dtype if enable_mlir_quantizer else dtypes.float32)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual(input_details[0]['dtype'], expected_ceil_dtype)
self.assertEqual(input_details[1]['dtype'], expected_dtype)
output_details = interpreter.get_output_details()
self.assertLen(output_details, 2)
self.assertEqual(output_details[0]['dtype'], expected_ceil_dtype)
self.assertEqual(output_details[1]['dtype'], expected_dtype)
@parameterized.named_parameters(
('_PerChannelQuant', False, False),
('_PerChannelMlirQuant', False, True),
('_PerTensorQuant', True, False),
('_PerTensorMlirQuant', True, True),
('_PerChannelMlirDynamicRangeQuant', False, False, False),
('_PerTensorMlirDynamicRangeQuant', True, False, False))
def testDisablePerChannelQuantization(self,
disable_per_channel=False,
enable_mlir_quantizer=False,
representative_dataset=True):
if enable_mlir_quantizer:
if disable_per_channel:
k_conv_name = 'output1'
else:
k_conv_name = 'Conv2D1'
else:
k_conv_name = 'Conv2D1'
# Dynamic range quant requires total num elements of filters > 1024.
k_num_filters = 38
with ops.Graph().as_default():
inp, output, calibration_gen = self._getIntegerQuantizeModel(
k_num_filters)
sess = session.Session()
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
if representative_dataset:
quantized_converter.representative_dataset = calibration_gen
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
if disable_per_channel:
quantized_converter._experimental_disable_per_channel = (
disable_per_channel)
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Do not apply delegates as XNNPack converts per tensor to per channel.
interp = Interpreter(
model_content=quantized_tflite_model,
experimental_op_resolver_type=OpResolverType.BUILTIN_WITHOUT_DEFAULT_DELEGATES,
)
interp.allocate_tensors()
detail = next(
(d for d in interp.get_tensor_details() if d['name'] == k_conv_name)
)
quant_params = detail['quantization_parameters']
expected_num_params = 1 if disable_per_channel else k_num_filters
self.assertLen(quant_params['scales'], expected_num_params)
if len(quant_params['zero_points']) != 1:
self.assertLen(quant_params['zero_points'], expected_num_params)
def testString(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.bytes_, input_details[0]['dtype'])
self.assertAllEqual([4], input_details[0]['shape'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('Reshape', output_details[0]['name'])
self.assertEqual(np.bytes_, output_details[0]['dtype'])
self.assertAllEqual([2, 2], output_details[0]['shape'])
# TODO(b/122659643): Test setting/getting string data via the python
# interpreter API after support has been added.
def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array."""
with ops.Graph().as_default():
in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init
out_tensor = in_tensor_final + in_tensor_final
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('add', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None as shape when dynamic shapes are disabled. Run with TOCO in
# order to invoke shape checking code.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = False
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testScalarValid(self):
# Construct a graph using a scalar (empty shape) input.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test conversion with the scalar input shape.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertEmpty(input_details[0]['shape'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertEmpty(input_details[0]['shape'])
# Validate inference using the scalar inputs/outputs.
test_input = np.array(4.0, dtype=np.float32)
expected_output = np.array(8.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual(expected_output, output_data)
def testSizeInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension. Run with TOCO in order to
# invoke shape checking code.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.experimental_new_converter = False
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testSizeNone(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test None after 1st dimension.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 1, 16, 3], input_details[0]['shape'])
self.assertAllEqual([1, -1, 16, 3], input_details[0]['shape_signature'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
# Resize tensor with strict checking.
with self.assertRaises(RuntimeError) as error:
interpreter.resize_tensor_input(0, [3, 16, 16, 3], strict=True)
self.assertIn(
'ResizeInputTensorStrict only allows mutating unknown dimensions '
'identified by -1.', str(error.exception))
# Resize tensor and invoke.
interpreter.resize_tensor_input(0, [1, 16, 16, 3], strict=True)
interpreter.allocate_tensors()
test_input = np.full([1, 16, 16, 3], 1.0, dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertAllEqual([1, -1, 16, 3], input_details[0]['shape_signature'])
output_details = interpreter.get_output_details()
self.assertAllEqual([1, -1, 16, 3], output_details[0]['shape_signature'])
def testResizeTensorInputStrict(self):
# Ensures that resize_tensor_input(strict=True) works as expected.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
# Resize incorrect value.
with self.assertRaises(RuntimeError) as error:
interpreter.resize_tensor_input(0, [3, 16, 16, 3], strict=True)
self.assertIn(
'ResizeInputTensorStrict only allows mutating unknown dimensions '
'identified by -1.', str(error.exception))
# Resize correct value.
interpreter.resize_tensor_input(0, [1, 16, 16, 3], strict=True)
interpreter.allocate_tensors()
def testBatchSizeValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testBatchSizeNonZero(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[None, 4], dtype=dtypes.float32, name='input1')
in_tensor_2 = array_ops.placeholder(
shape=[4, 10], dtype=dtypes.float32, name='input2')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('input1', input_details[0]['name'])
self.assertAllEqual([1, 4], input_details[0]['shape'])
self.assertEqual('input2', input_details[1]['name'])
self.assertAllEqual([4, 10], input_details[1]['shape'])
def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
# Get the second output to ensure freezing properly processes tensor names
# like 'X:1'.
out_tensor = nn_ops.top_k(in_tensor + var, name='top_k')[1]
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('top_k:1', output_details[0]['name'])
self.assertEqual(np.int32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 1], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertIsNotNone(graphviz_output)
def testDumpGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertIsNotNone(num_items_graphviz)
self.assertIsNotNone(
os.path.exists(os.path.join(graphviz_dir, 'toco_AT_IMPORT.dot')))
self.assertIsNotNone(
os.path.exists(
os.path.join(graphviz_dir, 'toco_AFTER_TRANSFORMATIONS.dot')))
def testQuantizeDynamicRange(self):
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def testQuantizeDynamicRangeDeprecatedPostTrainingQuantizeAttribute(
self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
self.assertFalse(quantized_converter.post_training_quantize)
quantized_converter.post_training_quantize = True
self.assertTrue(quantized_converter.post_training_quantize)
self.assertEqual(quantized_converter.optimizations, [lite.Optimize.DEFAULT])
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
def _getIntegerQuantizeModel(self, num_filters=16):
np.random.seed(0)
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=(1, 5, 5, 3), name='input')
conv = nn_ops.conv2d(
inp,
filter=array_ops.ones([3, 3, 3, num_filters]),
strides=[1, 1, 1, 1],
padding='SAME')
output = nn_ops.relu(conv, name='output')
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
return (inp, output, calibration_gen)
def testQuantizeInt8AllowFloat(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getIntegerQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(float_tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(
metadata.environment.tensorflowVersion.decode('utf-8'),
versions.__version__)
self.assertEqual(metadata.environment.apiVersion, 1)
self.assertEqual(metadata.environment.modelType,
metadata_fb.ModelType.TF_SESSION)
self.assertEqual(metadata.options.allowCustomOps, False)
self.assertEqual(metadata.options.enableSelectTfOps, False)
self.assertEqual(metadata.options.forceSelectTfOps, False)
self.assertAllEqual([], metadata.options.modelOptimizationModes)
# Convert quantized model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER],
metadata.options.modelOptimizationModes)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@parameterized.named_parameters(
# Quantize model to Int8
('UseTfliteBuiltinsInt', [lite.OpsSet.TFLITE_BUILTINS_INT8],
[metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]),
('UseTfliteBuiltinsInt16', [
lite.OpsSet
.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
], [metadata_fb.ModelOptimizationMode.PTQ_INT16]))
def testQuantizeInt8And16x8(self, supported_ops, expected_opt_modes):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getIntegerQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Convert model by specifying target spec (instead of optimizations), since
# when targeting an integer only backend, quantization is mandatory.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_ops = supported_ops
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# Check the conversion metadata.
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
self.assertEqual(
metadata.environment.tensorflowVersion.decode('utf-8'),
versions.__version__)
self.assertEqual(metadata.environment.apiVersion, 1)
self.assertEqual(metadata.environment.modelType,
metadata_fb.ModelType.TF_SESSION)
self.assertEqual(metadata.options.allowCustomOps, False)
self.assertEqual(metadata.options.enableSelectTfOps, False)
self.assertEqual(metadata.options.forceSelectTfOps, False)
self.assertAllEqual(expected_opt_modes,
metadata.options.modelOptimizationModes)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def testQuantizeInt8InputOutput(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getIntegerQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Convert quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.inference_input_type = dtypes.int8
quantized_converter.inference_output_type = dtypes.int8
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# The input and output types should be int8.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.int8, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.int8, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def testInvalidQuantizeInt8(self):
np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Attempt to convert to quantized weights model.
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
# Restricting to int8 type only
quantized_converter.target_spec.supported_types = [dtypes.int8]
# A representative dataset is required for full fixed point quantization.
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'For full integer quantization, a `representative_dataset` '
'must be specified.', str(error.exception))
def testQuantizeUInt8(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = dtypes.uint8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((1., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[1]['shape'])
self.assertEqual((1., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertGreater(output_details[0]['quantization'][0], 0) # scale
def testQuantizeUInt8UsingDefaultRangeStats(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = dtypes.uint8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
self.assertGreater(output_details[0]['quantization'][0], 0) # scale
@parameterized.named_parameters(
# Quantize to Float16 even if rep data provided.
('UseRepresentativeData', True, False, True, False, False, False,
[metadata_fb.ModelOptimizationMode.PTQ_FLOAT16]),
# Quantize to Float16 if no rep data provided.
('NoRepresentativeData', False, False, True, False, False, False,
[metadata_fb.ModelOptimizationMode.PTQ_FLOAT16]),
# Post training quantization if both rep data and int8 included.
('SampleDataIncludeInt8', True, True, False, False, True, False,
[metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]),
# Same as above, but using MLIR quantizer
('SampleDataIncludeInt8Quant', True, True, False, False, True, True,
[metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]))
def testQuantizeFloat16(self, use_rep_data, include_int8,
is_float16_quantized, is_float16_accumulation,
is_post_training_quantized, enable_mlir_quantizer,
expected_opt_modes):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getIntegerQuantizeModel()
sess = session.Session()
bias_idx = 1
bias_name = 'Conv2D'
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
interpreter = Interpreter(model_content=float_tflite_model)
interpreter.allocate_tensors()
self.assertEqual(interpreter.get_tensor_details()[bias_idx]['name'],
bias_name)
self.assertEqual(interpreter.get_tensor_details()[bias_idx]['dtype'],
dtypes.float32)
# Convert model to quantized version
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.experimental_new_quantizer = enable_mlir_quantizer
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_types = [dtypes.float16]
if include_int8:
quantized_converter.target_spec.supported_types.append(dtypes.int8)
if use_rep_data:
quantized_converter.representative_dataset = calibration_gen
if is_float16_accumulation:
quantized_converter.target_spec.experimental_supported_accumulation_type = dtypes.float16 # pylint: disable=line-too-long
else:
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
metadata = get_conversion_metadata(quantized_tflite_model)
self.assertIsNotNone(metadata)
self.assertAllEqual(expected_opt_modes,
metadata.options.modelOptimizationModes)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
# MLIR quantizer has different bias index.
bias_name = 'Conv2D'
bias_tensor = [
tensor for tensor in interpreter.get_tensor_details()
if tensor['name'] == bias_name
]
self.assertLen(bias_tensor, 1)
if is_float16_quantized:
# Verify that bias constant is float16 type.
self.assertEqual(bias_tensor[0]['dtype'], dtypes.float16)
elif is_post_training_quantized:
# Verify that bias constants is int32 type.
self.assertEqual(bias_tensor[0]['dtype'], dtypes.int32)
else:
raise ValueError('Invalid test options.')
def testInvalidQuantizeFloat16(self):
with ops.Graph().as_default():
inp, output, _ = self._getIntegerQuantizeModel()
sess = session.Session()
# Specify float16 quantization
quantized_converter = lite.TFLiteConverter.from_session(
sess, [inp], [output])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.target_spec.supported_types = [dtypes.float16]
# Specify only int8 builtin ops
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
with self.assertRaises(ValueError) as error:
quantized_converter.convert()
self.assertEqual(
'As full integer quantization has been enabled by setting '
'`target_spec.supported_ops`={tf.lite.OpsSet.TFLITE_BUILTINS_INT8}, '
'thus `target_spec.supported_types` should be left uninitizalized '
'or set to {tf.int8}.', str(error.exception))
@parameterized.named_parameters(('InferenceType_INT8', dtypes.int8),
('InferenceType_UINT8', dtypes.uint8))
def testInvalidQuantizeQATModelRequiresInputStats(self, quantized_type):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1.)
sess = session.Session()
quantized_converter = lite.TFLiteConverter.from_session(
sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
quantized_converter.inference_type = quantized_type
quantized_converter.convert()
self.assertEqual(
'The `quantized_input_stats` flag must be defined when either '
'`inference_type` flag or `inference_input_type` flag is set to '
'tf.int8 or tf.uint8. Currently, `inference_type=tf.{}` and '
'`inference_input_type=None`.'.format(quantized_type.name),
str(error.exception))
with self.assertRaises(ValueError) as error:
quantized_converter.inference_type = dtypes.float32
quantized_converter.inference_input_type = quantized_type
quantized_converter.convert()
self.assertEqual(
'The `quantized_input_stats` flag must be defined when either '
'`inference_type` flag or `inference_input_type` flag is set to '
'tf.int8 or tf.uint8. Currently, `inference_type=tf.float32` and '
'`inference_input_type=tf.{}`.'.format(quantized_type.name),
str(error.exception))
quantized_converter.inference_type = quantized_type
quantized_converter.inference_input_type = quantized_type
input_arrays = quantized_converter.get_input_arrays()
quantized_converter.quantized_input_stats = {input_arrays[0]: (0., 1.)}
quantized_converter.convert()
def testInvalidQuantizeQATModelMissingInputStats(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess,
[in_tensor_1, in_tensor_2],
[out_tensor])
converter.inference_type = dtypes.uint8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testTrainingTimeAndPostTrainingCalibrateAndQuantize(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getIntegerQuantizeModel()
sess = session.Session()
# Convert float model.
float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
converter = lite.TFLiteConverter.from_session(sess, [inp], [output])
# extra flags to trigger training time quantization conversion
converter.inference_type = dtypes.int8
converter.inference_input_type = dtypes.float32
converter.inference_output_type = dtypes.float32
input_arrays = converter.get_input_arrays()
converter.quantized_input_stats = {input_arrays[0]: (0., 1.)}
# trigger post-training quantization
converter.optimizations = [lite.Optimize.DEFAULT]
converter.representative_dataset = calibration_gen
converter.experimental_new_quantizer = True
quantized_tflite_model = converter.convert()
self.assertIsNotNone(quantized_tflite_model)
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
# calibration only api
converter._experimental_calibrate_only = True
calibrated_tflite = converter.convert()
quantized_tflite_model = mlir_quantize(
calibrated_tflite, fully_quantize=True)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(np.int8, input_details[0]['dtype'])
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(np.int8, output_details[0]['dtype'])
def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Ensure the interpreter is able to load.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
def testMultipleOutputNodeNames(self):
"""Tests converting a graph with an op that have multiple outputs."""
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
out0, out1, out2, out3 = array_ops.split(
input_tensor, [1, 1, 1, 1], axis=0)
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [input_tensor],
[out0, out1, out2, out3])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
interpreter.set_tensor(input_details[0]['index'],
np.asarray([1.0, 2.0, 3.0, 4.0], dtype=np.float32))
interpreter.invoke()
output_details = interpreter.get_output_details()
self.assertLen(output_details, 4)
self.assertEqual(1.0, interpreter.get_tensor(output_details[0]['index']))
self.assertEqual(2.0, interpreter.get_tensor(output_details[1]['index']))
self.assertEqual(3.0, interpreter.get_tensor(output_details[2]['index']))
self.assertEqual(4.0, interpreter.get_tensor(output_details[3]['index']))
@test_util.run_in_graph_and_eager_modes
def testFunctions(self):
"""Tests tf.function in 1.X."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('output_node', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInferenceInputOutputTypeFloatDefault(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
def testInferenceInputOutputTypeQuantizedUint8Default(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.inference_type = dtypes.uint8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], output_details[0]['shape'])
def testReusingConverterWithDifferentPostTrainingQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor + in_tensor, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
converter.post_training_quantize = True
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
converter.post_training_quantize = False
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
def testResizeWithShape(self):
with ops.Graph().as_default():
# Construct a graph with a dynamically shapped input and an internal node
# that relies on the output of that input's shape.
in_tensor = array_ops.placeholder(
shape=[None, None], dtype=dtypes.float32)
in_tensor2 = [[1, 2], [3, 4]]
out_tensor = array_ops.reshape(in_tensor2, array_ops.shape(in_tensor))
sess = session.Session()
converter = lite.TFLiteConverter.from_session(sess, [in_tensor],
[out_tensor])
tflite_model = converter.convert()
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertAllEqual([1, 1], input_details[0]['shape'])
self.assertAllEqual([-1, -1], input_details[0]['shape_signature'])
# Resize tensor and invoke.
interpreter.resize_tensor_input(0, [4])
interpreter.allocate_tensors()
interpreter.invoke()
# The output should be reshaped properly according to the resized input.
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.int32, output_details[0]['dtype'])
self.assertAllEqual([4], output_details[0]['shape'])
output_data = interpreter.get_tensor(output_details[0]['index'])
self.assertAllEqual([1, 2, 3, 4], output_data)
def testResizingIntermediateDynamicTensor(self):
# This is a regression test for the case where shape of dynamic output
# tensors changes between invocations.
# See also https://github.com/tensorflow/tensorflow/issues/26549
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32)
input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32)
# The bug is triggered only when dynamic tensor is intermediate. Putting
# some other ops around it.
neg = math_ops.negative(input2_tensor)
padding = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int32)
output_tensor = array_ops.pad(input_tensor, padding) + neg
sess = session.Session()
converter = lite.TFLiteConverter.from_session(
sess, [input_tensor, padding, input2_tensor], [output_tensor])
tflite_model = converter.convert()
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[1]['index'],
np.array([[1, 1], [1, 1]], dtype=np.int32))
interpreter.invoke()
# Without the fix, invocation will fail when changing the shape of
# intermediate dynamic tensors.
interpreter.set_tensor(input_details[1]['index'],
np.array([[2, 2], [2, 2]], dtype=np.int32))
interpreter.invoke()
def testGraphDebugInfo(self):
"""Test a session has debug info captured."""
@def_function.function
def plus_placeholder(x, placeholder):
return x + placeholder
with ops.Graph().as_default():
placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[1], name='input')
variable_node = variables.Variable(1.0, name='variable_node')
defun_node = plus_placeholder(variable_node, placeholder)
output_node = math_ops.multiply(defun_node, 2.0, name='output_node')
# Initialize variables in the model.
sess = session.Session()
sess.run(variables.variables_initializer([variable_node]))
converter = lite.TFLiteConverter.from_session(sess, [placeholder],
[output_node])
converter.convert()
self.assertValidDebugInfo(converter._debug_info)
# Check the add node in the inlined function is included.
func = sess.graph.as_graph_def().library.function[0].signature.name
self.assertIn(('add@' + func), repr(converter._debug_info))
def testOutputOnlyModel(self):
with ops.Graph().as_default():
out_tensor = random_ops.random_normal(shape=[3])
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverter.from_session(sess, [], [out_tensor])
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS,
]
# Empty input array is a valid input.
self.assertTrue(converter._has_valid_tensors())
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
| FromSessionTest |
python | Netflix__metaflow | metaflow/plugins/pypi/conda_decorator.py | {
"start": 10418,
"end": 14064
} | class ____(FlowDecorator):
"""
Specifies the Conda environment for all steps of the flow.
Use `@conda_base` to set common libraries required by all
steps and use `@conda` to specify step-specific additions.
Parameters
----------
packages : Dict[str, str], default {}
Packages to use for this flow. The key is the name of the package
and the value is the version to use.
libraries : Dict[str, str], default {}
Supported for backward compatibility. When used with packages, packages will take precedence.
python : str, optional, default None
Version of Python to use, e.g. '3.7.4'. A default value of None implies
that the version used will correspond to the version of the Python interpreter used to start the run.
disabled : bool, default False
If set to True, disables Conda.
"""
# TODO: Migrate conda_base keyword to conda for simplicity.
name = "conda_base"
defaults = {
"packages": {},
"libraries": {}, # Deprecated! Use packages going forward.
"python": None,
"disabled": None,
}
def __init__(self, attributes=None, statically_defined=False, inserted_by=None):
self._attributes_with_user_values = (
set(attributes.keys()) if attributes is not None else set()
)
super(CondaFlowDecorator, self).__init__(
attributes, statically_defined, inserted_by
)
def init(self):
# Support legacy 'libraries=' attribute for the decorator.
self.attributes["packages"] = {
**self.attributes["libraries"],
**self.attributes["packages"],
}
# Keep because otherwise make_decorator_spec will fail
self.attributes["libraries"] = {}
if self.attributes["python"]:
self.attributes["python"] = str(self.attributes["python"])
def is_attribute_user_defined(self, name):
return name in self._attributes_with_user_values
def flow_init(
self, flow, graph, environment, flow_datastore, metadata, logger, echo, options
):
# NOTE: Important for extensions implementing custom virtual environments.
# Without this steps will not have an implicit conda step decorator on them unless the environment adds one in its decospecs.
from metaflow import decorators
decorators._attach_decorators(flow, ["conda"])
# @conda uses a conda environment to create a virtual environment.
# The conda environment can be created through micromamba.
_supported_virtual_envs = ["conda"]
# To placate people who don't want to see a shred of conda in UX, we symlink
# --environment=pypi to --environment=conda
_supported_virtual_envs.extend(["pypi"])
# TODO: Hardcoded for now to support the fast bakery environment.
# We should introduce a more robust mechanism for appending supported environments, for example from within extensions.
_supported_virtual_envs.extend(["fast-bakery"])
# The --environment= requirement ensures that valid virtual environments are
# created for every step to execute it, greatly simplifying the @conda
# implementation.
if environment.TYPE not in _supported_virtual_envs:
raise InvalidEnvironmentException(
"@%s decorator requires %s"
% (
self.name,
" or ".join(
["--environment=%s" % env for env in _supported_virtual_envs]
),
)
)
| CondaFlowDecorator |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 28017,
"end": 28893
} | class ____(ASTPostfixOp):
def __init__(self, lst: ASTParenExprList | ASTBracedInitList) -> None:
self.lst = lst
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTPostfixCallExpr):
return NotImplemented
return self.lst == other.lst
def __hash__(self) -> int:
return hash(self.lst)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.lst)
def get_id(self, idPrefix: str, version: int) -> str:
return ''.join([
'cl',
idPrefix,
*(e.get_id(version) for e in self.lst.exprs),
'E',
])
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.lst.describe_signature(signode, mode, env, symbol)
| ASTPostfixCallExpr |
python | getsentry__sentry | tests/sentry/runner/commands/test_backup.py | {
"start": 27918,
"end": 29870
} | class ____(TransactionTestCase):
"""
Test that we react properly to the "Are you sure you want to delete all models?" confirm dialog.
"""
@staticmethod
def cli_import_with_confirmation_input(
input: str, *, import_args: list[str] | None = None
) -> str:
with TemporaryDirectory() as tmp_dir:
tmp_in_findings = Path(tmp_dir).joinpath(
f"{''.join(choice(ascii_letters)for _ in range(6))}.json"
)
rv = CliRunner().invoke(
import_,
["global", GOOD_FILE_PATH, "--findings-file", str(tmp_in_findings)]
+ ([] if import_args is None else import_args),
input=input,
catch_exceptions=False,
)
assert rv.exit_code == 0, rv.output
return rv.output
@pytest.mark.skipif(
os.environ.get("SENTRY_USE_MONOLITH_DBS", "0") == "0",
reason="only run when in `SENTRY_USE_MONOLITH_DBS=1` env variable is set",
)
def test_confirm_yes(self) -> None:
output = self.cli_import_with_confirmation_input("y\n")
assert "Import cancelled" not in output
assert Email.objects.count() > 0
@pytest.mark.skipif(
os.environ.get("SENTRY_USE_MONOLITH_DBS", "0") == "0",
reason="only run when in `SENTRY_USE_MONOLITH_DBS=1` env variable is set",
)
def test_confirm_no(self) -> None:
output = self.cli_import_with_confirmation_input("n\n")
assert "Import cancelled" in output
assert Email.objects.count() == 0
# Should ignore the `--silent` flag, and only trigger on `--no-prompt`.
output = self.cli_import_with_confirmation_input("n\n", import_args=["--silent"])
assert "Import cancelled" not in output
assert Email.objects.count() == 0
@patch("sentry.backup.imports.ImportExportService.get_importer_for_model")
| GoodGlobalImportConfirmDialogTests |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 15245,
"end": 15307
} | class ____(FacetField, IntegerField):
pass
| FacetIntegerField |
python | keras-team__keras | integration_tests/dataset_tests/mnist_test.py | {
"start": 89,
"end": 1165
} | class ____(testing.TestCase):
def test_x_train_shape(self):
(x_train, _), _ = mnist.load_data()
self.assertEqual(x_train.shape, (60000, 28, 28))
def test_y_train_shape(self):
(_, y_train), _ = mnist.load_data()
self.assertEqual(y_train.shape, (60000,))
def test_x_test_shape(self):
_, (x_test, _) = mnist.load_data()
self.assertEqual(x_test.shape, (10000, 28, 28))
def test_y_test_shape(self):
_, (_, y_test) = mnist.load_data()
self.assertEqual(y_test.shape, (10000,))
def test_x_train_dtype(self):
(x_train, _), _ = mnist.load_data()
self.assertEqual(x_train.dtype, np.uint8)
def test_y_train_dtype(self):
(_, y_train), _ = mnist.load_data()
self.assertEqual(y_train.dtype, np.uint8)
def test_x_test_dtype(self):
_, (x_test, _) = mnist.load_data()
self.assertEqual(x_test.dtype, np.uint8)
def test_y_test_dtype(self):
_, (_, y_test) = mnist.load_data()
self.assertEqual(y_test.dtype, np.uint8)
| MnistLoadDataTest |
python | PyCQA__flake8 | tests/integration/test_plugins.py | {
"start": 3130,
"end": 7770
} | class ____(AlwaysErrors):
off_by_default = True
def test_plugin_gets_enabled_by_default(tmp_path, capsys):
cfg_s = f"""\
[flake8:local-plugins]
extension =
ABC = {AlwaysErrors.__module__}:{AlwaysErrors.__name__}
"""
cfg = tmp_path.joinpath("tox.ini")
cfg.write_text(cfg_s)
t_py = tmp_path.joinpath("t.py")
t_py.touch()
assert main((str(t_py), "--config", str(cfg))) == 1
out, err = capsys.readouterr()
assert out == f"{t_py}:1:1: ABC123 error\n"
assert err == ""
def test_plugin_off_by_default(tmp_path, capsys):
cfg_s = f"""\
[flake8:local-plugins]
extension =
ABC = {AlwaysErrorsDisabled.__module__}:{AlwaysErrorsDisabled.__name__}
"""
cfg = tmp_path.joinpath("tox.ini")
cfg.write_text(cfg_s)
t_py = tmp_path.joinpath("t.py")
t_py.touch()
cmd = (str(t_py), "--config", str(cfg))
assert main(cmd) == 0
out, err = capsys.readouterr()
assert out == err == ""
assert main((*cmd, "--enable-extension=ABC")) == 1
out, err = capsys.readouterr()
assert out == f"{t_py}:1:1: ABC123 error\n"
assert err == ""
def yields_physical_line(physical_line):
yield 0, f"T001 {physical_line!r}"
def test_physical_line_plugin_multiline_string(tmpdir, capsys):
cfg_s = f"""\
[flake8:local-plugins]
extension =
T = {yields_physical_line.__module__}:{yields_physical_line.__name__}
"""
cfg = tmpdir.join("tox.ini")
cfg.write(cfg_s)
src = '''\
x = "foo" + """
bar
"""
'''
t_py = tmpdir.join("t.py")
t_py.write_binary(src.encode())
with tmpdir.as_cwd():
assert main(("t.py", "--config", str(cfg))) == 1
expected = '''\
t.py:1:1: T001 'x = "foo" + """\\n'
t.py:2:1: T001 'bar\\n'
t.py:3:1: T001 '"""\\n'
'''
out, err = capsys.readouterr()
assert out == expected
def test_physical_line_plugin_multiline_fstring(tmpdir, capsys):
cfg_s = f"""\
[flake8:local-plugins]
extension =
T = {yields_physical_line.__module__}:{yields_physical_line.__name__}
"""
cfg = tmpdir.join("tox.ini")
cfg.write(cfg_s)
src = '''\
y = 1
x = f"""
hello {y}
"""
'''
t_py = tmpdir.join("t.py")
t_py.write_binary(src.encode())
with tmpdir.as_cwd():
assert main(("t.py", "--config", str(cfg))) == 1
expected = '''\
t.py:1:1: T001 'y = 1\\n'
t.py:2:1: T001 'x = f"""\\n'
t.py:3:1: T001 'hello {y}\\n'
t.py:4:1: T001 '"""\\n'
'''
out, err = capsys.readouterr()
assert out == expected
def yields_logical_line(logical_line):
yield 0, f"T001 {logical_line!r}"
def test_logical_line_plugin(tmpdir, capsys):
cfg_s = f"""\
[flake8]
extend-ignore = F
[flake8:local-plugins]
extension =
T = {yields_logical_line.__module__}:{yields_logical_line.__name__}
"""
cfg = tmpdir.join("tox.ini")
cfg.write(cfg_s)
src = """\
f'hello world'
"""
t_py = tmpdir.join("t.py")
t_py.write_binary(src.encode())
with tmpdir.as_cwd():
assert main(("t.py", "--config", str(cfg))) == 1
expected = """\
t.py:1:1: T001 "f'xxxxxxxxxxx'"
"""
out, err = capsys.readouterr()
assert out == expected
def test_escaping_of_fstrings_in_string_redacter(tmpdir, capsys):
cfg_s = f"""\
[flake8]
extend-ignore = F
[flake8:local-plugins]
extension =
T = {yields_logical_line.__module__}:{yields_logical_line.__name__}
"""
cfg = tmpdir.join("tox.ini")
cfg.write(cfg_s)
src = """\
f'{{"{hello}": "{world}"}}'
"""
t_py = tmpdir.join("t.py")
t_py.write_binary(src.encode())
with tmpdir.as_cwd():
assert main(("t.py", "--config", str(cfg))) == 1
if sys.version_info >= (3, 12): # pragma: >=3.12 cover
expected = """\
t.py:1:1: T001 "f'xxx{hello}xxxx{world}xxx'"
"""
else: # pragma: <3.12 cover
expected = """\
t.py:1:1: T001 "f'xxxxxxxxxxxxxxxxxxxxxxxx'"
"""
out, err = capsys.readouterr()
assert out == expected
@pytest.mark.xfail(sys.version_info < (3, 14), reason="3.14+")
def test_tstring_logical_line(tmpdir, capsys): # pragma: >=3.14 cover
cfg_s = f"""\
[flake8]
extend-ignore = F
[flake8:local-plugins]
extension =
T = {yields_logical_line.__module__}:{yields_logical_line.__name__}
"""
cfg = tmpdir.join("tox.ini")
cfg.write(cfg_s)
src = """\
t'''
hello {world}
'''
t'{{"{hello}": "{world}"}}'
"""
t_py = tmpdir.join("t.py")
t_py.write_binary(src.encode())
with tmpdir.as_cwd():
assert main(("t.py", "--config", str(cfg))) == 1
expected = """\
t.py:1:1: T001 "t'''xxxxxxx{world}x'''"
t.py:4:1: T001 "t'xxx{hello}xxxx{world}xxx'"
"""
out, err = capsys.readouterr()
assert out == expected
| AlwaysErrorsDisabled |
python | langchain-ai__langchain | libs/cli/langchain_cli/integration_template/tests/unit_tests/test_embeddings.py | {
"start": 191,
"end": 472
} | class ____(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[__ModuleName__Embeddings]:
return __ModuleName__Embeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "nest-embed-001"}
| TestParrotLinkEmbeddingsUnit |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/api/client.py | {
"start": 323,
"end": 505
} | class ____(Protocol):
"""Protocol for GraphQL client factories used in testing."""
def __call__(self, config: DagsterPlusCliConfig) -> IGraphQLClient: ...
| GraphQLClientFactory |
python | django__django | tests/postgres_tests/test_operations.py | {
"start": 15153,
"end": 21935
} | class ____(OptimizerTestBase, PostgreSQLTestCase):
app_label = "test_allow_create_collation"
@override_settings(DATABASE_ROUTERS=[NoMigrationRouter()])
def test_no_allow_migrate(self):
operation = CreateCollation("C_test", locale="C")
project_state = ProjectState()
new_state = project_state.clone()
# Don't create a collation.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertEqual(len(captured_queries), 0)
# Reversal.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertEqual(len(captured_queries), 0)
def test_create(self):
operation = CreateCollation("C_test", locale="C")
self.assertEqual(operation.migration_name_fragment, "create_collation_c_test")
self.assertEqual(operation.describe(), "Create collation C_test")
self.assertEqual(operation.formatted_description(), "+ Create collation C_test")
project_state = ProjectState()
new_state = project_state.clone()
# Create a collation.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertEqual(len(captured_queries), 1)
self.assertIn("CREATE COLLATION", captured_queries[0]["sql"])
# Creating the same collation raises an exception.
with self.assertRaisesMessage(ProgrammingError, "already exists"):
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
# Reversal.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertEqual(len(captured_queries), 1)
self.assertIn("DROP COLLATION", captured_queries[0]["sql"])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, "CreateCollation")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"name": "C_test", "locale": "C"})
def test_create_non_deterministic_collation(self):
operation = CreateCollation(
"case_insensitive_test",
"und-u-ks-level2",
provider="icu",
deterministic=False,
)
project_state = ProjectState()
new_state = project_state.clone()
# Create a collation.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertEqual(len(captured_queries), 1)
self.assertIn("CREATE COLLATION", captured_queries[0]["sql"])
# Reversal.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertEqual(len(captured_queries), 1)
self.assertIn("DROP COLLATION", captured_queries[0]["sql"])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, "CreateCollation")
self.assertEqual(args, [])
self.assertEqual(
kwargs,
{
"name": "case_insensitive_test",
"locale": "und-u-ks-level2",
"provider": "icu",
"deterministic": False,
},
)
def test_create_collation_alternate_provider(self):
operation = CreateCollation(
"german_phonebook_test",
provider="icu",
locale="de-u-co-phonebk",
)
project_state = ProjectState()
new_state = project_state.clone()
# Create an collation.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
self.assertEqual(len(captured_queries), 1)
self.assertIn("CREATE COLLATION", captured_queries[0]["sql"])
# Reversal.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(
self.app_label, editor, new_state, project_state
)
self.assertEqual(len(captured_queries), 1)
self.assertIn("DROP COLLATION", captured_queries[0]["sql"])
def test_writer(self):
operation = CreateCollation(
"sample_collation",
"und-u-ks-level2",
provider="icu",
deterministic=False,
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {"import django.contrib.postgres.operations"})
self.assertEqual(
buff,
"django.contrib.postgres.operations.CreateCollation(\n"
" name='sample_collation',\n"
" locale='und-u-ks-level2',\n"
" provider='icu',\n"
" deterministic=False,\n"
"),",
)
def test_reduce_create_remove(self):
self.assertOptimizesTo(
[
CreateCollation(
"sample_collation",
"und-u-ks-level2",
provider="icu",
deterministic=False,
),
RemoveCollation(
"sample_collation",
# Different locale
"de-u-ks-level1",
),
],
[],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests.")
| CreateCollationTests |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/cli_commands/test_permissions_command.py | {
"start": 1318,
"end": 9790
} | class ____:
"""Test permissions cleanup CLI commands."""
@pytest.fixture(autouse=True)
def _set_attrs(self):
with conf_vars(
{
(
"core",
"auth_manager",
): "airflow.providers.fab.auth_manager.fab_auth_manager.FabAuthManager",
}
):
# Reload the module to use FAB auth manager
reload(cli_parser)
# Clearing the cache before calling it
cli_parser.get_parser.cache_clear()
self.parser = cli_parser.get_parser()
with get_application_builder() as appbuilder:
self.appbuilder = appbuilder
yield
@patch("airflow.providers.fab.auth_manager.cli_commands.permissions_command.cleanup_dag_permissions")
@patch("airflow.providers.fab.auth_manager.models.Resource")
def test_permissions_cleanup_success(self, mock_resource, mock_cleanup_dag_permissions):
"""Test successful cleanup of DAG permissions."""
# Mock args
args = argparse.Namespace()
args.dag_id = None
args.dry_run = False
args.yes = True
args.verbose = True
# Mock orphaned resources
mock_orphaned_resource = MagicMock()
mock_orphaned_resource.name = "DAG:orphaned_dag"
with (
patch("airflow.providers.fab.auth_manager.cli_commands.utils.get_application_builder"),
patch("airflow.utils.session.create_session") as mock_session_ctx,
patch("sqlalchemy.select"),
redirect_stdout(StringIO()),
):
mock_session = MagicMock()
mock_session_ctx.return_value.__enter__.return_value = mock_session
# Mock DagModel query - return existing DAGs
mock_dag_result = MagicMock()
mock_dag_result.all.return_value = [MagicMock(dag_id="existing_dag")]
# Mock Resource query - return orphaned resources
mock_resource_result = MagicMock()
mock_resource_result.all.return_value = [mock_orphaned_resource]
# Setup session.scalars to return different results for different queries
mock_session.scalars.side_effect = [mock_dag_result, mock_resource_result]
permissions_command.permissions_cleanup(args)
# Verify function calls - it should be called exactly once for the orphaned DAG
mock_cleanup_dag_permissions.assert_called_once_with("orphaned_dag", mock_session)
@patch("airflow.providers.fab.auth_manager.cli_commands.permissions_command.cleanup_dag_permissions")
@patch("airflow.providers.fab.auth_manager.models.Resource")
def test_permissions_cleanup_dry_run(self, mock_resource, mock_cleanup_dag_permissions):
"""Test dry run mode for permissions cleanup."""
# Mock args
args = argparse.Namespace()
args.dag_id = None
args.dry_run = True
args.verbose = True
# Mock orphaned resources
mock_orphaned_resource = MagicMock()
mock_orphaned_resource.name = "DAG:orphaned_dag"
with (
patch("airflow.providers.fab.auth_manager.cli_commands.utils.get_application_builder"),
patch("airflow.utils.session.create_session") as mock_session_ctx,
patch("sqlalchemy.select"),
redirect_stdout(StringIO()) as stdout,
):
mock_session = MagicMock()
mock_session_ctx.return_value.__enter__.return_value = mock_session
# Mock DagModel query - return existing DAGs
mock_dag_result = MagicMock()
mock_dag_result.all.return_value = [MagicMock(dag_id="existing_dag")]
# Mock Resource query - return orphaned resources
mock_resource_result = MagicMock()
mock_resource_result.all.return_value = [mock_orphaned_resource]
# Setup session.scalars to return different results for different queries
mock_session.scalars.side_effect = [mock_dag_result, mock_resource_result]
permissions_command.permissions_cleanup(args)
output = stdout.getvalue()
assert "Dry run mode" in output or "No orphaned DAG permissions found" in output
# In dry run mode, cleanup_dag_permissions should NOT be called
mock_cleanup_dag_permissions.assert_not_called()
@patch("airflow.providers.fab.auth_manager.cli_commands.permissions_command.cleanup_dag_permissions")
@patch("airflow.providers.fab.auth_manager.models.Resource")
def test_permissions_cleanup_specific_dag(self, mock_resource, mock_cleanup_dag_permissions):
"""Test cleanup for a specific DAG."""
# Mock args
args = argparse.Namespace()
args.dag_id = "test_dag"
args.dry_run = False
args.yes = True
args.verbose = True
# Mock orphaned resource for the specific DAG
mock_orphaned_resource = MagicMock()
mock_orphaned_resource.name = "DAG:test_dag"
with (
patch("airflow.providers.fab.auth_manager.cli_commands.utils.get_application_builder"),
patch("airflow.utils.session.create_session") as mock_session_ctx,
patch("sqlalchemy.select"),
redirect_stdout(StringIO()),
):
mock_session = MagicMock()
mock_session_ctx.return_value.__enter__.return_value = mock_session
# Mock DagModel query - return existing DAGs (NOT including the target DAG)
mock_dag_result = MagicMock()
mock_dag_result.all.return_value = [
MagicMock(dag_id="existing_dag"),
MagicMock(dag_id="another_existing_dag"),
]
# Mock Resource query - return orphaned resources
mock_resource_result = MagicMock()
mock_resource_result.all.return_value = [mock_orphaned_resource]
# Setup session.scalars to return different results for different queries
mock_session.scalars.side_effect = [mock_dag_result, mock_resource_result]
permissions_command.permissions_cleanup(args)
# Should call cleanup_dag_permissions specifically for test_dag
mock_cleanup_dag_permissions.assert_called_once_with("test_dag", mock_session)
@patch("airflow.providers.fab.auth_manager.cli_commands.permissions_command.cleanup_dag_permissions")
@patch("airflow.providers.fab.auth_manager.models.Resource")
@patch("builtins.input", return_value="n")
def test_permissions_cleanup_no_confirmation(
self, mock_input, mock_resource, mock_cleanup_dag_permissions
):
"""Test cleanup cancellation when user doesn't confirm."""
# Mock args
args = argparse.Namespace()
args.dag_id = None
args.dry_run = False
args.yes = False
args.verbose = False
# Mock orphaned resources
mock_orphaned_resource = MagicMock()
mock_orphaned_resource.name = "DAG:orphaned_dag"
with (
patch("airflow.providers.fab.auth_manager.cli_commands.utils.get_application_builder"),
patch("airflow.utils.session.create_session") as mock_session_ctx,
patch("sqlalchemy.select"),
redirect_stdout(StringIO()) as stdout,
):
mock_session = MagicMock()
mock_session_ctx.return_value.__enter__.return_value = mock_session
# Mock DagModel query - return existing DAGs
mock_dag_result = MagicMock()
mock_dag_result.all.return_value = [MagicMock(dag_id="existing_dag")]
# Mock Resource query - return orphaned resources
mock_resource_result = MagicMock()
mock_resource_result.all.return_value = [mock_orphaned_resource]
# Setup session.scalars to return different results for different queries
mock_session.scalars.side_effect = [mock_dag_result, mock_resource_result]
permissions_command.permissions_cleanup(args)
output = stdout.getvalue()
# Should not call cleanup if user declines or no orphaned permissions found
assert "Cleanup cancelled" in output or "No orphaned DAG permissions found" in output
# cleanup_dag_permissions should NOT be called when user cancels
if "Cleanup cancelled" in output:
mock_cleanup_dag_permissions.assert_not_called()
| TestPermissionsCommand |
python | scrapy__scrapy | scrapy/utils/defer.py | {
"start": 5059,
"end": 19008
} | class ____(Iterator, Generic[_T]):
"""A class that wraps an async iterable into a normal iterator suitable
for using in Cooperator.coiterate(). As it's only needed for parallel_async(),
it calls the callable directly in the callback, instead of providing a more
generic interface.
On the outside, this class behaves as an iterator that yields Deferreds.
Each Deferred is fired with the result of the callable which was called on
the next result from aiterator. It raises StopIteration when aiterator is
exhausted, as expected.
Cooperator calls __next__() multiple times and waits on the Deferreds
returned from it. As async generators (since Python 3.8) don't support
awaiting on __anext__() several times in parallel, we need to serialize
this. It's done by storing the Deferreds returned from __next__() and
firing the oldest one when a result from __anext__() is available.
The workflow:
1. When __next__() is called for the first time, it creates a Deferred, stores it
in self.waiting_deferreds and returns it. It also makes a Deferred that will wait
for self.aiterator.__anext__() and puts it into self.anext_deferred.
2. If __next__() is called again before self.anext_deferred fires, more Deferreds
are added to self.waiting_deferreds.
3. When self.anext_deferred fires, it either calls _callback() or _errback(). Both
clear self.anext_deferred.
3.1. _callback() calls the callable passing the result value that it takes, pops a
Deferred from self.waiting_deferreds, and if the callable result was a Deferred, it
chains those Deferreds so that the waiting Deferred will fire when the result
Deferred does, otherwise it fires it directly. This causes one awaiting task to
receive a result. If self.waiting_deferreds is still not empty, new __anext__() is
called and self.anext_deferred is populated.
3.2. _errback() checks the exception class. If it's StopAsyncIteration it means
self.aiterator is exhausted and so it sets self.finished and fires all
self.waiting_deferreds. Other exceptions are propagated.
4. If __next__() is called after __anext__() was handled, then if self.finished is
True, it raises StopIteration, otherwise it acts like in step 2, but if
self.anext_deferred is now empty is also populates it with a new __anext__().
Note that CooperativeTask ignores the value returned from the Deferred that it waits
for, so we fire them with None when needed.
It may be possible to write an async iterator-aware replacement for
Cooperator/CooperativeTask and use it instead of this adapter to achieve the same
goal.
"""
def __init__(
self,
aiterable: AsyncIterator[_T],
callable_: Callable[Concatenate[_T, _P], Deferred[Any] | None],
*callable_args: _P.args,
**callable_kwargs: _P.kwargs,
):
self.aiterator: AsyncIterator[_T] = aiterable.__aiter__()
self.callable: Callable[Concatenate[_T, _P], Deferred[Any] | None] = callable_
self.callable_args: tuple[Any, ...] = callable_args
self.callable_kwargs: dict[str, Any] = callable_kwargs
self.finished: bool = False
self.waiting_deferreds: list[Deferred[Any]] = []
self.anext_deferred: Deferred[_T] | None = None
def _callback(self, result: _T) -> None:
# This gets called when the result from aiterator.__anext__() is available.
# It calls the callable on it and sends the result to the oldest waiting Deferred
# (by chaining if the result is a Deferred too or by firing if not).
self.anext_deferred = None
callable_result = self.callable(
result, *self.callable_args, **self.callable_kwargs
)
d = self.waiting_deferreds.pop(0)
if isinstance(callable_result, Deferred):
callable_result.chainDeferred(d)
else:
d.callback(None)
if self.waiting_deferreds:
self._call_anext()
def _errback(self, failure: Failure) -> None:
# This gets called on any exceptions in aiterator.__anext__().
# It handles StopAsyncIteration by stopping the iteration and reraises all others.
self.anext_deferred = None
failure.trap(StopAsyncIteration)
self.finished = True
for d in self.waiting_deferreds:
d.callback(None)
def _call_anext(self) -> None:
# This starts waiting for the next result from aiterator.
# If aiterator is exhausted, _errback will be called.
self.anext_deferred = deferred_from_coro(self.aiterator.__anext__())
self.anext_deferred.addCallbacks(self._callback, self._errback)
def __next__(self) -> Deferred[Any]:
# This puts a new Deferred into self.waiting_deferreds and returns it.
# It also calls __anext__() if needed.
if self.finished:
raise StopIteration
d: Deferred[Any] = Deferred()
self.waiting_deferreds.append(d)
if not self.anext_deferred:
self._call_anext()
return d
def parallel_async(
async_iterable: AsyncIterator[_T],
count: int,
callable: Callable[Concatenate[_T, _P], Deferred[Any] | None], # noqa: A002
*args: _P.args,
**named: _P.kwargs,
) -> Deferred[list[tuple[bool, Iterator[Deferred[Any]]]]]:
"""Like ``parallel`` but for async iterators"""
coop = Cooperator()
work: Iterator[Deferred[Any]] = _AsyncCooperatorAdapter(
async_iterable, callable, *args, **named
)
dl: Deferred[list[tuple[bool, Iterator[Deferred[Any]]]]] = DeferredList(
[coop.coiterate(work) for _ in range(count)]
)
return dl
def process_chain(
callbacks: Iterable[Callable[Concatenate[_T, _P], _T]],
input: _T, # noqa: A002
*a: _P.args,
**kw: _P.kwargs,
) -> Deferred[_T]:
"""Return a Deferred built by chaining the given callbacks"""
warnings.warn(
"process_chain() is deprecated.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
d: Deferred[_T] = Deferred()
for x in callbacks:
d.addCallback(x, *a, **kw)
d.callback(input)
return d
def process_parallel(
callbacks: Iterable[Callable[Concatenate[_T, _P], _T2]],
input: _T, # noqa: A002
*a: _P.args,
**kw: _P.kwargs,
) -> Deferred[list[_T2]]: # pragma: no cover
"""Return a Deferred with the output of all successful calls to the given
callbacks
"""
warnings.warn(
"process_parallel() is deprecated.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
dfds = [succeed(input).addCallback(x, *a, **kw) for x in callbacks]
d: Deferred[list[tuple[bool, _T2]]] = DeferredList(
dfds, fireOnOneErrback=True, consumeErrors=True
)
d2: Deferred[list[_T2]] = d.addCallback(lambda r: [x[1] for x in r])
def eb(failure: Failure) -> Failure:
return failure.value.subFailure
d2.addErrback(eb)
return d2
def iter_errback(
iterable: Iterable[_T],
errback: Callable[Concatenate[Failure, _P], Any],
*a: _P.args,
**kw: _P.kwargs,
) -> Iterable[_T]:
"""Wrap an iterable calling an errback if an error is caught while
iterating it.
"""
it = iter(iterable)
while True:
try:
yield next(it)
except StopIteration:
break
except Exception:
errback(failure.Failure(), *a, **kw)
async def aiter_errback(
aiterable: AsyncIterator[_T],
errback: Callable[Concatenate[Failure, _P], Any],
*a: _P.args,
**kw: _P.kwargs,
) -> AsyncIterator[_T]:
"""Wrap an async iterable calling an errback if an error is caught while
iterating it. Similar to :func:`scrapy.utils.defer.iter_errback`.
"""
it = aiterable.__aiter__()
while True:
try:
yield await it.__anext__()
except StopAsyncIteration:
break
except Exception:
errback(failure.Failure(), *a, **kw)
@overload
def deferred_from_coro(o: Awaitable[_T]) -> Deferred[_T]: ...
@overload
def deferred_from_coro(o: _T2) -> _T2: ...
def deferred_from_coro(o: Awaitable[_T] | _T2) -> Deferred[_T] | _T2:
"""Convert a coroutine or other awaitable object into a Deferred,
or return the object as is if it isn't a coroutine."""
if isinstance(o, Deferred):
return o
if inspect.isawaitable(o):
if not is_asyncio_available():
# wrapping the coroutine directly into a Deferred, this doesn't work correctly with coroutines
# that use asyncio, e.g. "await asyncio.sleep(1)"
return Deferred.fromCoroutine(cast("Coroutine[Deferred[Any], Any, _T]", o))
# wrapping the coroutine into a Future and then into a Deferred, this requires AsyncioSelectorReactor
return Deferred.fromFuture(asyncio.ensure_future(o))
return o
def deferred_f_from_coro_f(
coro_f: Callable[_P, Awaitable[_T]],
) -> Callable[_P, Deferred[_T]]:
"""Convert a coroutine function into a function that returns a Deferred.
The coroutine function will be called at the time when the wrapper is called. Wrapper args will be passed to it.
This is useful for callback chains, as callback functions are called with the previous callback result.
"""
@wraps(coro_f)
def f(*coro_args: _P.args, **coro_kwargs: _P.kwargs) -> Deferred[_T]:
return deferred_from_coro(coro_f(*coro_args, **coro_kwargs))
return f
def maybeDeferred_coro(
f: Callable[_P, Any], *args: _P.args, **kw: _P.kwargs
) -> Deferred[Any]:
"""Copy of defer.maybeDeferred that also converts coroutines to Deferreds."""
try:
result = f(*args, **kw)
except: # noqa: E722 # pylint: disable=bare-except
return fail(failure.Failure(captureVars=Deferred.debug))
if isinstance(result, Deferred):
return result
if asyncio.isfuture(result) or inspect.isawaitable(result):
return deferred_from_coro(result)
if isinstance(result, failure.Failure):
return fail(result)
return succeed(result)
def deferred_to_future(d: Deferred[_T]) -> Future[_T]:
"""Return an :class:`asyncio.Future` object that wraps *d*.
This function requires
:class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor` to be
installed.
When :ref:`using the asyncio reactor <install-asyncio>`, you cannot await
on :class:`~twisted.internet.defer.Deferred` objects from :ref:`Scrapy
callables defined as coroutines <coroutine-support>`, you can only await on
``Future`` objects. Wrapping ``Deferred`` objects into ``Future`` objects
allows you to wait on them::
class MySpider(Spider):
...
async def parse(self, response):
additional_request = scrapy.Request('https://example.org/price')
deferred = self.crawler.engine.download(additional_request)
additional_response = await deferred_to_future(deferred)
.. versionadded:: 2.6.0
.. versionchanged:: VERSION
This function no longer installs an asyncio loop if called before the
Twisted asyncio reactor is installed. A :exc:`RuntimeError` is raised
in this case.
"""
if not is_asyncio_available():
raise RuntimeError("deferred_to_future() requires AsyncioSelectorReactor.")
return d.asFuture(asyncio.get_event_loop())
def maybe_deferred_to_future(d: Deferred[_T]) -> Deferred[_T] | Future[_T]:
"""Return *d* as an object that can be awaited from a :ref:`Scrapy callable
defined as a coroutine <coroutine-support>`.
What you can await in Scrapy callables defined as coroutines depends on the
value of :setting:`TWISTED_REACTOR`:
- When :ref:`using the asyncio reactor <install-asyncio>`, you can only
await on :class:`asyncio.Future` objects.
- When not using the asyncio reactor, you can only await on
:class:`~twisted.internet.defer.Deferred` objects.
If you want to write code that uses ``Deferred`` objects but works with any
reactor, use this function on all ``Deferred`` objects::
class MySpider(Spider):
...
async def parse(self, response):
additional_request = scrapy.Request('https://example.org/price')
deferred = self.crawler.engine.download(additional_request)
additional_response = await maybe_deferred_to_future(deferred)
.. versionadded:: 2.6.0
"""
if not is_asyncio_available():
return d
return deferred_to_future(d)
def _schedule_coro(coro: Coroutine[Any, Any, Any]) -> None:
"""Schedule the coroutine as a task or a Deferred.
This doesn't store the reference to the task/Deferred, so a better
alternative is calling :func:`scrapy.utils.defer.deferred_from_coro`,
keeping the result, and adding proper exception handling (e.g. errbacks) to
it.
"""
if not is_asyncio_available():
Deferred.fromCoroutine(coro)
return
loop = asyncio.get_event_loop()
loop.create_task(coro) # noqa: RUF006
@overload
def ensure_awaitable(o: Awaitable[_T]) -> Awaitable[_T]: ...
@overload
def ensure_awaitable(o: _T) -> Awaitable[_T]: ...
def ensure_awaitable(o: _T | Awaitable[_T]) -> Awaitable[_T]:
"""Convert any value to an awaitable object.
For a :class:`~twisted.internet.defer.Deferred` object, use
:func:`maybe_deferred_to_future` to wrap it into a suitable object. For an
awaitable object of a different type, return it as is. For any other
value, return a coroutine that completes with that value.
.. versionadded:: VERSION
"""
if isinstance(o, Deferred):
return maybe_deferred_to_future(o)
if inspect.isawaitable(o):
return o
async def coro() -> _T:
return o
return coro()
| _AsyncCooperatorAdapter |
python | kubernetes-client__python | kubernetes/client/models/v1_volume_attributes_class_list.py | {
"start": 383,
"end": 7190
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1VolumeAttributesClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1VolumeAttributesClassList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1VolumeAttributesClassList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1VolumeAttributesClassList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1VolumeAttributesClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1VolumeAttributesClassList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1VolumeAttributesClassList. # noqa: E501
items is the list of VolumeAttributesClass objects. # noqa: E501
:return: The items of this V1VolumeAttributesClassList. # noqa: E501
:rtype: list[V1VolumeAttributesClass]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1VolumeAttributesClassList.
items is the list of VolumeAttributesClass objects. # noqa: E501
:param items: The items of this V1VolumeAttributesClassList. # noqa: E501
:type: list[V1VolumeAttributesClass]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1VolumeAttributesClassList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1VolumeAttributesClassList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1VolumeAttributesClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1VolumeAttributesClassList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1VolumeAttributesClassList. # noqa: E501
:return: The metadata of this V1VolumeAttributesClassList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1VolumeAttributesClassList.
:param metadata: The metadata of this V1VolumeAttributesClassList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VolumeAttributesClassList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1VolumeAttributesClassList):
return True
return self.to_dict() != other.to_dict()
| V1VolumeAttributesClassList |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/any1.py | {
"start": 358,
"end": 378
} | class ____(Any): ...
| A |
python | getsentry__sentry | src/sentry/snuba/metrics/naming_layer/public.py | {
"start": 6569,
"end": 6767
} | class ____(Enum):
"""Identifier value for the satisfaction of a transaction."""
SATISFIED = "satisfied"
TOLERATED = "tolerated"
FRUSTRATED = "frustrated"
| TransactionSatisfactionTagValue |
python | numpy__numpy | benchmarks/benchmarks/bench_lib.py | {
"start": 102,
"end": 2455
} | class ____(Benchmark):
"""Benchmarks for `numpy.pad`.
When benchmarking the pad function it is useful to cover scenarios where
the ratio between the size of the input array and the output array differs
significantly (original area vs. padded area). This allows to evaluate for
which scenario a padding algorithm is optimized. Furthermore involving
large range of array sizes ensures that the effects of CPU-bound caching is
visible.
The table below shows the sizes of the arrays involved in this benchmark:
+-----------------+----------+-----------+-----------+-----------------+
| shape | original | padded: 1 | padded: 8 | padded: (0, 32) |
+=================+==========+===========+===========+=================+
| (2 ** 22,) | 32 MiB | 32.0 MiB | 32.0 MiB | 32.0 MiB |
+-----------------+----------+-----------+-----------+-----------------+
| (1024, 1024) | 8 MiB | 8.03 MiB | 8.25 MiB | 8.51 MiB |
+-----------------+----------+-----------+-----------+-----------------+
| (256, 256, 1) | 256 KiB | 786 KiB | 5.08 MiB | 11.6 MiB |
+-----------------+----------+-----------+-----------+-----------------+
| (4, 4, 4, 4) | 2 KiB | 10.1 KiB | 1.22 MiB | 12.8 MiB |
+-----------------+----------+-----------+-----------+-----------------+
| (1, 1, 1, 1, 1) | 8 B | 1.90 MiB | 10.8 MiB | 299 MiB |
+-----------------+----------+-----------+-----------+-----------------+
"""
param_names = ["shape", "pad_width", "mode"]
params = [
# Shape of the input arrays
[(2 ** 22,), (1024, 1024), (256, 128, 1),
(4, 4, 4, 4), (1, 1, 1, 1, 1)],
# Tested pad widths
[1, 8, (0, 32)],
# Tested modes: mean, median, minimum & maximum use the same code path
# reflect & symmetric share a lot of their code path
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
# Make sure to fill the array to make the OS page fault
# in the setup phase and not the timed phase
self.array = np.full(shape, fill_value=1, dtype=np.float64)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
| Pad |
python | pyinstaller__pyinstaller | PyInstaller/building/makespec.py | {
"start": 5068,
"end": 5353
} | class ____(_RemovedFlagAction):
def __call__(self, *args, **kwargs):
from PyInstaller.exceptions import RemovedWinSideBySideSupportError
raise RemovedWinSideBySideSupportError("Please remove your --win-private-assemblies argument.")
| _RemovedWinPrivateAssembliesAction |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_perspective.py | {
"start": 599,
"end": 11747
} | class ____(BaseImagePreprocessingLayer):
"""A preprocessing layer that applies random perspective transformations.
This layer distorts the perspective of input images by shifting their
corner points, simulating a 3D-like transformation. The amount of distortion
is controlled by the `factor` and `scale` parameters.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A float or a tuple of two floats.
Represents the probability of applying the perspective
transformation to each image in the batch.
- `factor=0.0` ensures no transformation is applied.
- `factor=1.0` means the transformation is always applied.
- If a tuple `(min, max)` is provided, a probability is randomly
sampled between `min` and `max` for each image.
- If a single float is given, the probability is sampled between
`0.0` and the provided float.
Default is 1.0.
scale: A float defining the relative amount of perspective shift.
Determines how much the image corners are displaced, affecting
the intensity of the perspective effect.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
def __init__(
self,
factor=1.0,
scale=1.0,
interpolation="bilinear",
fill_value=0.0,
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.scale = scale
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.generator = SeedGenerator(seed)
self.supports_jit = False
if scale < 0.0 or scale > 1.0:
raise ValueError(
"The `scale` argument should be a number "
"in the range "
f"[0,1]. "
f"Received: scale={scale}"
)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
self.channel_axis = -3
else:
self.height_axis = -3
self.width_axis = -2
self.channel_axis = -1
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
unbatched = len(images_shape) == 3
if unbatched:
batch_size = 1
else:
batch_size = images_shape[0]
height, width = (
images.shape[self.height_axis],
images.shape[self.width_axis],
)
seed = seed or self._get_seed_generator(self.backend._backend)
transformation_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
apply_perspective = random_threshold < transformation_probability
perspective_factor = self.backend.random.uniform(
shape=(batch_size, 4, 2),
minval=-0.5 * self.scale,
maxval=0.5 * self.scale,
seed=seed,
dtype=self.compute_dtype,
)
start_points = self.backend.convert_to_tensor(
[
[
[0.0, 0.0],
[width - 1, 0.0],
[0.0, height - 1],
[width - 1, height - 1],
]
],
dtype=self.compute_dtype,
)
start_points = self.backend.numpy.repeat(
start_points, batch_size, axis=0
)
end_points = start_points + start_points * perspective_factor
return {
"apply_perspective": apply_perspective,
"start_points": start_points,
"end_points": end_points,
"input_shape": images_shape,
}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training and transformation is not None:
images = self._perspective_inputs(images, transformation)
images = self.backend.cast(images, self.compute_dtype)
return images
def _perspective_inputs(self, inputs, transformation):
if transformation is None:
return inputs
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
start_points = transformation["start_points"]
end_points = transformation["end_points"]
outputs = self.backend.image.perspective_transform(
inputs,
start_points,
end_points,
interpolation=self.interpolation,
fill_value=self.fill_value,
data_format=self.data_format,
)
apply_perspective = transformation["apply_perspective"]
outputs = self.backend.numpy.where(
apply_perspective[:, None, None, None],
outputs,
inputs,
)
if unbatched:
outputs = self.backend.numpy.squeeze(outputs, axis=0)
return outputs
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
if training and transformation is not None:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
input_height, input_width = (
transformation["input_shape"][self.height_axis],
transformation["input_shape"][self.width_axis],
)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
height=input_height,
width=input_width,
)
boxes = bounding_boxes["boxes"]
x0, y0, x1, y1 = self.backend.numpy.split(boxes, 4, axis=-1)
start_points = transformation["start_points"]
end_points = transformation["end_points"]
transform = self.backend.image.compute_homography_matrix(
start_points, end_points
)
transform = self.backend.numpy.expand_dims(transform, axis=1)
transform = self.backend.cast(transform, dtype=self.compute_dtype)
corners = [
self._get_transformed_coordinates(x, y, transform)
for x, y in [(x0, y0), (x1, y1), (x0, y1), (x1, y0)]
]
x_corners, y_corners = zip(*corners)
xs = self.backend.numpy.stack(x_corners, axis=-1)
ys = self.backend.numpy.stack(y_corners, axis=-1)
min_x, max_x = (
self.backend.numpy.min(xs, axis=-1),
self.backend.numpy.max(xs, axis=-1),
)
min_y, max_y = (
self.backend.numpy.min(ys, axis=-1),
self.backend.numpy.max(ys, axis=-1),
)
min_x = self.backend.numpy.expand_dims(min_x, axis=-1)
max_x = self.backend.numpy.expand_dims(max_x, axis=-1)
min_y = self.backend.numpy.expand_dims(min_y, axis=-1)
max_y = self.backend.numpy.expand_dims(max_y, axis=-1)
boxes = self.backend.numpy.concatenate(
[min_x, min_y, max_x, max_y], axis=-1
)
apply_perspective = self.backend.core.convert_to_tensor(
transformation["apply_perspective"], dtype=boxes.dtype
)
bounding_boxes["boxes"] = self.backend.numpy.where(
apply_perspective[:, None, None],
boxes,
bounding_boxes["boxes"],
)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=input_height,
width=input_width,
bounding_box_format="xyxy",
)
self.backend.reset()
return bounding_boxes
def _get_transformed_coordinates(
self, x_coords, y_coords, transformation_matrix
):
backend = self.backend
batch_size = backend.shape(transformation_matrix)[0]
homogeneous_transform = backend.numpy.concatenate(
[transformation_matrix, backend.numpy.ones((batch_size, 1, 1))],
axis=-1,
)
homogeneous_transform = backend.numpy.reshape(
homogeneous_transform, (batch_size, 3, 3)
)
inverse_transform = backend.linalg.inv(homogeneous_transform)
ones_column = backend.numpy.ones_like(x_coords)
homogeneous_coords = backend.numpy.concatenate(
[x_coords, y_coords, ones_column], axis=-1
)
homogeneous_coords = backend.numpy.moveaxis(homogeneous_coords, -1, -2)
transformed_coords = backend.numpy.matmul(
inverse_transform, homogeneous_coords
)
transformed_coords = backend.numpy.moveaxis(transformed_coords, -1, -2)
x_transformed = transformed_coords[..., 0] / transformed_coords[..., 2]
y_transformed = transformed_coords[..., 1] / transformed_coords[..., 2]
return x_transformed, y_transformed
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"factor": self.factor,
"scale": self.scale,
"interpolation": self.interpolation,
"fill_value": self.fill_value,
"seed": self.seed,
}
return {**base_config, **config}
| RandomPerspective |
python | spack__spack | var/spack/test_repos/spack_repo/edges_test/packages/zlib/package.py | {
"start": 216,
"end": 597
} | class ____(Package):
"""This package has a variant that triggers a condition only if a required dependency is
providing a virtual.
"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("2.0", md5="abcdef0123456789abcdef0123456789")
version("1.0", md5="0123456789abcdef0123456789abcdef")
provides("zlib-api")
| Zlib |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/frontend_widget.py | {
"start": 623,
"end": 3368
} | class ____(PygmentsHighlighter):
""" A PygmentsHighlighter that understands and ignores prompts.
"""
def __init__(self, frontend, lexer=None):
super().__init__(frontend._control.document(), lexer=lexer)
self._current_offset = 0
self._frontend = frontend
self.highlighting_on = False
self._classic_prompt_re = re.compile(
r'^(%s)?([ \t]*>>> |^[ \t]*\.\.\. )' % re.escape(frontend.other_output_prefix)
)
self._ipy_prompt_re = re.compile(
r'^(%s)?([ \t]*In \[\d+\]: |[ \t]*\ \ \ \.\.\.+: )' % re.escape(frontend.other_output_prefix)
)
def transform_classic_prompt(self, line):
"""Handle inputs that start with '>>> ' syntax."""
if not line or line.isspace():
return line
m = self._classic_prompt_re.match(line)
if m:
return line[len(m.group(0)):]
else:
return line
def transform_ipy_prompt(self, line):
"""Handle inputs that start classic IPython prompt syntax."""
if not line or line.isspace():
return line
m = self._ipy_prompt_re.match(line)
if m:
return line[len(m.group(0)):]
else:
return line
def highlightBlock(self, string):
""" Highlight a block of text. Reimplemented to highlight selectively.
"""
if not hasattr(self, 'highlighting_on') or not self.highlighting_on:
return
# The input to this function is a unicode string that may contain
# paragraph break characters, non-breaking spaces, etc. Here we acquire
# the string as plain text so we can compare it.
current_block = self.currentBlock()
string = current_block.text()
# QTextBlock::text() can still return non-breaking spaces
# for the continuation prompt
string = string.replace("\xa0", " ")
# Only highlight if we can identify a prompt, but make sure not to
# highlight the prompt.
without_prompt = self.transform_ipy_prompt(string)
diff = len(string) - len(without_prompt)
if diff > 0:
self._current_offset = diff
super().highlightBlock(without_prompt)
def rehighlightBlock(self, block):
""" Reimplemented to temporarily enable highlighting if disabled.
"""
old = self.highlighting_on
self.highlighting_on = True
super().rehighlightBlock(block)
self.highlighting_on = old
def setFormat(self, start, count, format):
""" Reimplemented to highlight selectively.
"""
start += self._current_offset
super().setFormat(start, count, format)
| FrontendHighlighter |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/data.py | {
"start": 10293,
"end": 11661
} | class ____:
"""Records the series of ``start_span``, ``stop_span``, and
``draw_bits`` calls so that these may be stored in ``Spans`` and
replayed when we need to know about the structure of individual
``Span`` objects.
Note that there is significant similarity between this class and
``DataObserver``, and the plan is to eventually unify them, but
they currently have slightly different functions and implementations.
"""
def __init__(self) -> None:
self.labels: list[int] = []
self.__index_of_labels: dict[int, int] | None = {}
self.trail = IntList()
self.nodes: list[ChoiceNode] = []
def freeze(self) -> None:
self.__index_of_labels = None
def record_choice(self) -> None:
self.trail.append(TrailType.CHOICE)
def start_span(self, label: int) -> None:
assert self.__index_of_labels is not None
try:
i = self.__index_of_labels[label]
except KeyError:
i = self.__index_of_labels.setdefault(label, len(self.labels))
self.labels.append(label)
self.trail.append(TrailType.CHOICE + 1 + i)
def stop_span(self, *, discard: bool) -> None:
if discard:
self.trail.append(TrailType.STOP_SPAN_DISCARD)
else:
self.trail.append(TrailType.STOP_SPAN_NO_DISCARD)
| SpanRecord |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_project_stacktrace_link.py | {
"start": 4079,
"end": 8444
} | class ____(BaseProjectStacktraceLink):
filepath = "usr/src/getsentry/src/sentry/src/sentry/utils/safe.py"
def test_no_filepath(self) -> None:
"""The file query search is missing"""
response = self.get_error_response(
self.organization.slug, self.project.slug, status_code=400
)
assert response.data == {"detail": "Filepath is required"}
def test_no_configs(self) -> None:
"""No code mappings have been set for this project"""
# new project that has no configurations set up for it
project = self.create_project(
name="bloop",
organization=self.organization,
teams=[self.create_team(organization=self.organization)],
)
response = self.get_success_response(
self.organization.slug, project.slug, qs_params={"file": self.filepath}
)
assert response.data == {
"config": None,
"sourceUrl": None,
"integrations": [serialized_integration(self.integration)],
"error": "no_code_mappings_for_project",
}
def test_file_not_found_error(self) -> None:
"""File matches code mapping but it cannot be found in the source repository."""
cm = self._create_code_mapping(stack_root="usr/src/getsentry/", source_root="")
response = self.get_success_response(
self.organization.slug, self.project.slug, qs_params={"file": self.filepath}
)
assert response.data["config"] == self.expected_configurations(cm)
assert not response.data["sourceUrl"]
assert response.data["error"] == "file_not_found"
assert response.data["integrations"] == [serialized_integration(self.integration)]
assert (
response.data["attemptedUrl"]
== f"https://example.com/{self.repo.name}/blob/master/src/sentry/src/sentry/utils/safe.py"
)
def test_stack_root_mismatch_error(self) -> None:
"""Looking for a stacktrace file path that will not match any code mappings"""
# At least one code mapping to produce the stack_root_mismatch error
self._create_code_mapping(stack_root="usr/src/getsentry/", source_root="")
response = self.get_success_response(
self.organization.slug, self.project.slug, qs_params={"file": "wrong/file/path"}
)
assert response.data["config"] is None
assert not response.data["sourceUrl"]
assert response.data["error"] == "stack_root_mismatch"
assert response.data["integrations"] == [serialized_integration(self.integration)]
def test_config_and_source_url(self) -> None:
"""Having a different source url should also work"""
with patch.object(
ExampleIntegration, "get_stacktrace_link", return_value="https://sourceurl.com/"
):
cm = self._create_code_mapping(stack_root="usr/src/getsentry/", source_root="")
response = self.get_success_response(
self.organization.slug, self.project.slug, qs_params={"file": self.filepath}
)
assert response.data["config"] == self.expected_configurations(cm)
assert response.data["sourceUrl"] == "https://sourceurl.com/"
assert response.data["integrations"] == [serialized_integration(self.integration)]
@patch.object(ExampleIntegration, "get_stacktrace_link")
def test_file_no_stack_root_match(self, mock_integration: MagicMock) -> None:
# Pretend that the file was not found in the repository
mock_integration.return_value = None
# At least one code mapping to produce the stack_root_mismatch error
self._create_code_mapping(stack_root="usr/src/getsentry/", source_root="")
response = self.get_success_response(
self.organization.slug,
self.project.slug,
qs_params={"file": "something/else/" + self.filepath},
)
assert mock_integration.call_count == 0 # How many attempts to find the source code
assert response.data["config"] is None # Since no code mapping matched
assert not response.data["sourceUrl"]
assert response.data["error"] == "stack_root_mismatch"
assert response.data["integrations"] == [serialized_integration(self.integration)]
| ProjectStacktraceLinkTest |
python | chroma-core__chroma | chromadb/api/collection_configuration.py | {
"start": 890,
"end": 5521
} | class ____(TypedDict, total=True):
hnsw: Optional[HNSWConfiguration]
spann: Optional[SpannConfiguration]
embedding_function: Optional[EmbeddingFunction] # type: ignore
def load_collection_configuration_from_json_str(
config_json_str: str,
) -> CollectionConfiguration:
config_json_map = json.loads(config_json_str)
return load_collection_configuration_from_json(config_json_map)
# TODO: make warnings prettier and add link to migration docs
def load_collection_configuration_from_json(
config_json_map: Dict[str, Any]
) -> CollectionConfiguration:
if (
config_json_map.get("spann") is not None
and config_json_map.get("hnsw") is not None
):
raise ValueError("hnsw and spann cannot both be provided")
hnsw_config = None
spann_config = None
ef_config = None
# Process vector index configuration (HNSW or SPANN)
if config_json_map.get("hnsw") is not None:
hnsw_config = cast(HNSWConfiguration, config_json_map["hnsw"])
if config_json_map.get("spann") is not None:
spann_config = cast(SpannConfiguration, config_json_map["spann"])
# Process embedding function configuration
if config_json_map.get("embedding_function") is not None:
ef_config = config_json_map["embedding_function"]
if ef_config["type"] == "legacy":
warnings.warn(
"legacy embedding function config",
DeprecationWarning,
stacklevel=2,
)
ef = None
else:
try:
ef_name = ef_config["name"]
except KeyError:
raise ValueError(
f"Embedding function name not found in config: {ef_config}"
)
try:
ef = known_embedding_functions[ef_name]
except KeyError:
raise ValueError(
f"Embedding function {ef_name} not found. Add @register_embedding_function decorator to the class definition."
)
try:
ef = ef.build_from_config(ef_config["config"]) # type: ignore
except Exception as e:
raise ValueError(
f"Could not build embedding function {ef_config['name']} from config {ef_config['config']}: {e}"
)
else:
ef = None
return CollectionConfiguration(
hnsw=hnsw_config,
spann=spann_config,
embedding_function=ef, # type: ignore
)
def collection_configuration_to_json_str(config: CollectionConfiguration) -> str:
return json.dumps(collection_configuration_to_json(config))
def collection_configuration_to_json(config: CollectionConfiguration) -> Dict[str, Any]:
if isinstance(config, dict):
hnsw_config = config.get("hnsw")
spann_config = config.get("spann")
ef = config.get("embedding_function")
else:
try:
hnsw_config = config.get_parameter("hnsw").value
except ValueError:
hnsw_config = None
try:
spann_config = config.get_parameter("spann").value
except ValueError:
spann_config = None
try:
ef = config.get_parameter("embedding_function").value
except ValueError:
ef = None
ef_config: Dict[str, Any] | None = None
if hnsw_config is not None:
try:
hnsw_config = cast(HNSWConfiguration, hnsw_config)
except Exception as e:
raise ValueError(f"not a valid hnsw config: {e}")
if spann_config is not None:
try:
spann_config = cast(SpannConfiguration, spann_config)
except Exception as e:
raise ValueError(f"not a valid spann config: {e}")
if ef is None:
ef = None
ef_config = {"type": "legacy"}
if ef is not None:
try:
if ef.is_legacy():
ef_config = {"type": "legacy"}
else:
ef_config = {
"name": ef.name(),
"type": "known",
"config": ef.get_config(),
}
register_embedding_function(type(ef)) # type: ignore
except Exception as e:
warnings.warn(
f"legacy embedding function config: {e}",
DeprecationWarning,
stacklevel=2,
)
ef = None
ef_config = {"type": "legacy"}
return {
"hnsw": hnsw_config,
"spann": spann_config,
"embedding_function": ef_config,
}
| CollectionConfiguration |
python | realpython__materials | python-class/vehicles.py | {
"start": 686,
"end": 1039
} | class ____(Vehicle):
def __init__(self, make, model, year, num_wheels):
super().__init__(make, model, year)
self.num_wheels = num_wheels
def ride(self):
print(f'Riding my "{self.make} - {self.model}" on the road')
def __str__(self):
return f'"{self.make} - {self.model}" has {self.num_wheels} wheels'
| Motorcycle |
python | neetcode-gh__leetcode | python/1899-merge-triplets-to-form-target-triplet.py | {
"start": 0,
"end": 381
} | class ____:
def mergeTriplets(self, triplets: List[List[int]], target: List[int]) -> bool:
good = set()
for t in triplets:
if t[0] > target[0] or t[1] > target[1] or t[2] > target[2]:
continue
for i, v in enumerate(t):
if v == target[i]:
good.add(i)
return len(good) == 3
| Solution |
python | huggingface__transformers | src/transformers/models/dinat/modeling_dinat.py | {
"start": 12782,
"end": 13484
} | class ____(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
super().__init__()
self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size, dilation)
self.output = NeighborhoodAttentionOutput(config, dim)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
| NeighborhoodAttentionModule |
python | walkccc__LeetCode | solutions/2289. Steps to Make Array Non-decreasing/2289.py | {
"start": 0,
"end": 379
} | class ____:
def totalSteps(self, nums: list[int]) -> int:
# dp[i] := the number of steps to remove nums[i]
dp = [0] * len(nums)
stack = []
for i, num in enumerate(nums):
step = 1
while stack and nums[stack[-1]] <= num:
step = max(step, dp[stack.pop()] + 1)
if stack:
dp[i] = step
stack.append(i)
return max(dp)
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/mode.py | {
"start": 384,
"end": 1870
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
name = graphene.NonNull(graphene.String)
description = graphene.String()
resources = non_null_list(GrapheneResource)
loggers = non_null_list(GrapheneLogger)
class Meta:
name = "Mode"
def __init__(
self,
get_config_type: Callable[[str], ConfigTypeSnap],
job_graphql_id: str,
mode_def_snap: ModeDefSnap,
):
super().__init__()
self._mode_def_snap = check.inst_param(mode_def_snap, "mode_def_snap", ModeDefSnap)
self._get_config_type = get_config_type
self._job_graphql_id = job_graphql_id
def resolve_id(self, _graphene_info: ResolveInfo):
return f"{self._job_graphql_id}-{self._mode_def_snap.name}"
def resolve_name(self, _graphene_info: ResolveInfo):
return self._mode_def_snap.name
def resolve_description(self, _graphene_info: ResolveInfo):
return self._mode_def_snap.description
def resolve_resources(self, _graphene_info: ResolveInfo):
return [
GrapheneResource(self._get_config_type, resource_def_snap)
for resource_def_snap in sorted(self._mode_def_snap.resource_def_snaps)
]
def resolve_loggers(self, _graphene_info: ResolveInfo):
return [
GrapheneLogger(self._get_config_type, logger_def_snap)
for logger_def_snap in sorted(self._mode_def_snap.logger_def_snaps)
]
| GrapheneMode |
python | viewflow__viewflow | viewflow/jsonstore.py | {
"start": 6831,
"end": 6903
} | class ____(JSONFieldMixin, fields.IPAddressField):
pass
| IPAddressField |
python | eventlet__eventlet | tests/semaphore_test.py | {
"start": 44,
"end": 1968
} | class ____(tests.LimitedTestCase):
def test_bounded(self):
sem = eventlet.CappedSemaphore(2, limit=3)
self.assertEqual(sem.acquire(), True)
self.assertEqual(sem.acquire(), True)
gt1 = eventlet.spawn(sem.release)
self.assertEqual(sem.acquire(), True)
self.assertEqual(-3, sem.balance)
sem.release()
sem.release()
sem.release()
gt2 = eventlet.spawn(sem.acquire)
sem.release()
self.assertEqual(3, sem.balance)
gt1.wait()
gt2.wait()
def test_bounded_with_zero_limit(self):
sem = eventlet.CappedSemaphore(0, 0)
gt = eventlet.spawn(sem.acquire)
sem.release()
gt.wait()
def test_non_blocking(self):
sem = eventlet.Semaphore(0)
self.assertEqual(sem.acquire(blocking=False), False)
def test_timeout(self):
sem = eventlet.Semaphore(0)
start = time.time()
self.assertEqual(sem.acquire(timeout=0.1), False)
self.assertTrue(time.time() - start >= 0.1)
def test_timeout_non_blocking(self):
sem = eventlet.Semaphore()
self.assertRaises(ValueError, sem.acquire, blocking=False, timeout=1)
def test_semaphore_contention():
g_mutex = eventlet.Semaphore()
counts = [0, 0]
def worker(no):
while min(counts) < 200:
with g_mutex:
counts[no - 1] += 1
eventlet.sleep(0.001)
t1 = eventlet.spawn(worker, no=1)
t2 = eventlet.spawn(worker, no=2)
eventlet.sleep(0.5)
t1.kill()
t2.kill()
assert abs(counts[0] - counts[1]) < int(min(counts) * 0.1), counts
def test_semaphore_type_check():
eventlet.Semaphore(0)
eventlet.Semaphore(1)
eventlet.Semaphore(1e2)
with tests.assert_raises(TypeError):
eventlet.Semaphore('foo')
with tests.assert_raises(ValueError):
eventlet.Semaphore(-1)
| TestSemaphore |
python | pytorch__pytorch | torch/cuda/_sanitizer.py | {
"start": 2267,
"end": 2368
} | class ____(Exception):
"""Base class for errors detected by CUDA Sanitizer."""
| SynchronizationError |
python | keon__algorithms | algorithms/linkedlist/linkedlist.py | {
"start": 544,
"end": 690
} | class ____(object):
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
| DoublyLinkedListNode |
python | pallets__werkzeug | src/werkzeug/datastructures/structures.py | {
"start": 1160,
"end": 1414
} | class ____(ImmutableListMixin, list[V]): # type: ignore[misc]
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self) -> str:
return f"{type(self).__name__}({list.__repr__(self)})"
| ImmutableList |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol24.py | {
"start": 143,
"end": 209
} | class ____(Protocol):
def meth(_self, x: int) -> int: ...
| ProtoA |
python | openai__openai-python | examples/responses/background_streaming_async.py | {
"start": 179,
"end": 1201
} | class ____(BaseModel):
steps: List[Step]
final_answer: str
async def main() -> None:
client = AsyncOpenAI()
id = None
async with client.responses.stream(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
text_format=MathResponse,
background=True,
) as stream:
async for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
async with client.responses.stream(
response_id=id,
starting_after=10,
text_format=MathResponse,
) as stream:
async for event in stream:
if "output_text" in event.type:
rich.print(event)
rich.print(stream.get_final_response())
if __name__ == "__main__":
asyncio.run(main())
| MathResponse |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 86761,
"end": 92030
} | class ____(SeamlessM4TPreTrainedModel, GenerationMixin):
_keys_to_ignore_on_load_missing = [
"vocoder",
"speech_encoder",
"text_encoder",
"text_decoder",
]
_tied_weights_keys = {"lm_head.weight": "model.decoder.embed_tokens.weight"}
def __init__(
self,
config: SeamlessM4TConfig,
embed_tokens_decoder: Optional[nn.Embedding] = None,
):
r"""
embed_tokens_decoder (`nn.Embedding`, *optional*):
input embedding of the decoder.
"""
# update config - used principality for bos_token_id etc.
config = copy.deepcopy(config)
for param, val in config.to_dict().items():
if param.startswith("t2u_"):
config.__setattr__(param[4:], val)
super().__init__(config)
self.model = SeamlessM4TTextToUnitModel(config, embed_tokens_decoder)
self.lm_head = nn.Linear(config.hidden_size, config.t2u_vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.t2u_pad_token_id, self.config.t2u_decoder_start_token_id)
############ VOCODER related code ################
# Copied from transformers.models.speecht5.modeling_speecht5.HifiGanResidualBlock
| SeamlessM4TTextToUnitForConditionalGeneration |
python | pexpect__pexpect | pexpect/FSM.py | {
"start": 4385,
"end": 13419
} | class ____:
'''This is a Finite State Machine (FSM).
'''
def __init__(self, initial_state, memory=None):
'''This creates the FSM. You set the initial state here. The "memory"
attribute is any object that you want to pass along to the action
functions. It is not used by the FSM. For parsing you would typically
pass a list to be used as a stack. '''
# Map (input_symbol, current_state) --> (action, next_state).
self.state_transitions = {}
# Map (current_state) --> (action, next_state).
self.state_transitions_any = {}
self.default_transition = None
self.input_symbol = None
self.initial_state = initial_state
self.current_state = self.initial_state
self.next_state = None
self.action = None
self.memory = memory
def reset (self):
'''This sets the current_state to the initial_state and sets
input_symbol to None. The initial state was set by the constructor
__init__(). '''
self.current_state = self.initial_state
self.input_symbol = None
def add_transition (self, input_symbol, state, action=None, next_state=None):
'''This adds a transition that associates:
(input_symbol, current_state) --> (action, next_state)
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged.
You can also set transitions for a list of symbols by using
add_transition_list(). '''
if next_state is None:
next_state = state
self.state_transitions[(input_symbol, state)] = (action, next_state)
def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
'''This adds the same transition for a list of input symbols.
You can pass a list or a string. Note that it is handy to use
string.digits, string.whitespace, string.letters, etc. to add
transitions that match character classes.
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. '''
if next_state is None:
next_state = state
for input_symbol in list_input_symbols:
self.add_transition (input_symbol, state, action, next_state)
def add_transition_any (self, state, action=None, next_state=None):
'''This adds a transition that associates:
(current_state) --> (action, next_state)
That is, any input symbol will match the current state.
The process() method checks the "any" state associations after it first
checks for an exact match of (input_symbol, current_state).
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. '''
if next_state is None:
next_state = state
self.state_transitions_any [state] = (action, next_state)
def set_default_transition (self, action, next_state):
'''This sets the default transition. This defines an action and
next_state if the FSM cannot find the input symbol and the current
state in the transition list and if the FSM cannot find the
current_state in the transition_any list. This is useful as a final
fall-through state for catching errors and undefined states.
The default transition can be removed by setting the attribute
default_transition to None. '''
self.default_transition = (action, next_state)
def get_transition (self, input_symbol, state):
'''This returns (action, next state) given an input_symbol and state.
This does not modify the FSM state, so calling this method has no side
effects. Normally you do not call this method directly. It is called by
process().
The sequence of steps to check for a defined transition goes from the
most specific to the least specific.
1. Check state_transitions[] that match exactly the tuple,
(input_symbol, state)
2. Check state_transitions_any[] that match (state)
In other words, match a specific state and ANY input_symbol.
3. Check if the default_transition is defined.
This catches any input_symbol and any state.
This is a handler for errors, undefined states, or defaults.
4. No transition was defined. If we get here then raise an exception.
'''
if (input_symbol, state) in self.state_transitions:
return self.state_transitions[(input_symbol, state)]
elif state in self.state_transitions_any:
return self.state_transitions_any[state]
elif self.default_transition is not None:
return self.default_transition
else:
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
(str(input_symbol), str(state)) )
def process (self, input_symbol):
'''This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). '''
self.input_symbol = input_symbol
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
if self.action is not None:
self.action (self)
self.current_state = self.next_state
self.next_state = None
def process_list (self, input_symbols):
'''This takes a list and sends each element to process(). The list may
be a string or any iterable object. '''
for s in input_symbols:
self.process (s)
##############################################################################
# The following is an example that demonstrates the use of the FSM class to
# process an RPN expression. Run this module from the command line. You will
# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
# Operators are * / + - Use the = sign to evaluate and print the expression.
# For example:
#
# 167 3 2 2 * * * 1 - =
#
# will print:
#
# 2003
##############################################################################
import sys
import string
PY3 = (sys.version_info[0] >= 3)
#
# These define the actions.
# Note that "memory" is a list being used as a stack.
#
def BeginBuildNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def BuildNumber (fsm):
s = fsm.memory.pop ()
s = s + fsm.input_symbol
fsm.memory.append (s)
def EndBuildNumber (fsm):
s = fsm.memory.pop ()
fsm.memory.append (int(s))
def DoOperator (fsm):
ar = fsm.memory.pop()
al = fsm.memory.pop()
if fsm.input_symbol == '+':
fsm.memory.append (al + ar)
elif fsm.input_symbol == '-':
fsm.memory.append (al - ar)
elif fsm.input_symbol == '*':
fsm.memory.append (al * ar)
elif fsm.input_symbol == '/':
fsm.memory.append (al / ar)
def DoEqual (fsm):
print(str(fsm.memory.pop()))
def Error (fsm):
print('That does not compute.')
print(str(fsm.input_symbol))
def main():
'''This is where the example starts and the FSM state transitions are
defined. Note that states are strings (such as 'INIT'). This is not
necessary, but it makes the example easier to read. '''
f = FSM ('INIT', [])
f.set_default_transition (Error, 'INIT')
f.add_transition_any ('INIT', None, 'INIT')
f.add_transition ('=', 'INIT', DoEqual, 'INIT')
f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
print()
print('Enter an RPN Expression.')
print('Numbers may be integers. Operators are * / + -')
print('Use the = sign to evaluate and print the expression.')
print('For example: ')
print(' 167 3 2 2 * * * 1 - =')
inputstr = (input if PY3 else raw_input)('> ') # analysis:ignore
f.process_list(inputstr)
if __name__ == '__main__':
main()
| FSM |
python | doocs__leetcode | solution/1700-1799/1710.Maximum Units on a Truck/Solution2.py | {
"start": 0,
"end": 423
} | class ____:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
cnt = [0] * 1001
for a, b in boxTypes:
cnt[b] += a
ans = 0
for b in range(1000, 0, -1):
a = cnt[b]
if a:
ans += b * min(truckSize, a)
truckSize -= a
if truckSize <= 0:
break
return ans
| Solution |
python | Textualize__textual | docs/examples/widgets/text_area_custom_language.py | {
"start": 292,
"end": 415
} | class ____ {
public static void main(String[] args) {
System.out.println("Hello, World!");
}
}
"""
| HelloWorld |
python | walkccc__LeetCode | solutions/1352. Product of the Last K Numbers/1352.py | {
"start": 0,
"end": 343
} | class ____:
def __init__(self):
self.prefix = [1]
def add(self, num: int) -> None:
if num == 0:
self.prefix = [1]
else:
self.prefix.append(self.prefix[-1] * num)
def getProduct(self, k: int) -> int:
return 0 if k >= len(self.prefix) else self.prefix[-1] // self.prefix[len(self.prefix) - k - 1]
| ProductOfNumbers |
python | pallets__werkzeug | src/werkzeug/datastructures/accept.py | {
"start": 9371,
"end": 11774
} | class ____(Accept):
"""Like :class:`Accept` but with normalization for language tags."""
def _value_matches(self, value: str, item: str) -> bool:
return item == "*" or _normalize_lang(value) == _normalize_lang(item)
@t.overload
def best_match(self, matches: cabc.Iterable[str]) -> str | None: ...
@t.overload
def best_match(self, matches: cabc.Iterable[str], default: str = ...) -> str: ...
def best_match(
self, matches: cabc.Iterable[str], default: str | None = None
) -> str | None:
"""Given a list of supported values, finds the best match from
the list of accepted values.
Language tags are normalized for the purpose of matching, but
are returned unchanged.
If no exact match is found, this will fall back to matching
the first subtag (primary language only), first with the
accepted values then with the match values. This partial is not
applied to any other language subtags.
The default is returned if no exact or fallback match is found.
:param matches: A list of supported languages to find a match.
:param default: The value that is returned if none match.
"""
# Look for an exact match first. If a client accepts "en-US",
# "en-US" is a valid match at this point.
result = super().best_match(matches)
if result is not None:
return result
# Fall back to accepting primary tags. If a client accepts
# "en-US", "en" is a valid match at this point. Need to use
# re.split to account for 2 or 3 letter codes.
fallback = Accept(
[(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self]
)
result = fallback.best_match(matches)
if result is not None:
return result
# Fall back to matching primary tags. If the client accepts
# "en", "en-US" is a valid match at this point.
fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
result = super().best_match(fallback_matches)
# Return a value from the original match list. Find the first
# original value that starts with the matched primary tag.
if result is not None:
return next(item for item in matches if item.startswith(result))
return default
| LanguageAccept |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 4525,
"end": 5183
} | class ____(_VectorIndexConfigCreate):
threshold: Optional[int]
hnsw: Optional[_VectorIndexConfigHNSWCreate]
flat: Optional[_VectorIndexConfigFlatCreate]
@staticmethod
def vector_index_type() -> VectorIndexType:
return VectorIndexType.DYNAMIC
def _to_dict(self) -> dict:
ret_dict = super()._to_dict()
if self.hnsw is not None:
ret_dict["hnsw"] = self.hnsw._to_dict()
if self.flat is not None:
ret_dict["flat"] = self.flat._to_dict()
if self.threshold is not None:
ret_dict["threshold"] = self.threshold
return ret_dict
| _VectorIndexConfigDynamicCreate |
python | pytorch__pytorch | torch/profiler/_pattern_matcher.py | {
"start": 13881,
"end": 16154
} | class ____(Pattern):
"""
This pattern identifies if we are using num_workers=0 in DataLoader.
example:
torch.utils.data.DataLoader(dataset, batch_size=batch_size)
Add num_workers=N to the arguments. N depends on system configuration.
Pattern:
dataloader.py(...): __iter__
dataloader.py(...): _get_iterator
NOT dataloader.py(...): check_worker_number_rationality
Algorithm:
If we don't see check_worker_number_rationality call in the dataloader __iter__,
It is not an asynchronous dataloader.
"""
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
super().__init__(prof, should_benchmark)
self.name = "Synchronized DataLoader Pattern"
self.description = (
"Detected DataLoader running with synchronized implementation. "
"Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader."
)
self.url = (
"https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
"#enable-async-data-loading-and-augmentation"
)
def match(self, event: _ProfilerEvent) -> bool:
def is_dataloader_function(name: str, function_name: str):
return name.startswith(
os.path.join("torch", "utils", "data", "dataloader.py")
) and name.endswith(function_name)
# TODO: fixme! Due to lifetime issues of the function name, this field might
# actually point to an already freed string when the even is a PyCall.
# Just silently skip this to unblock testing.
try:
event.name
except UnicodeDecodeError:
return False
if not is_dataloader_function(event.name, "__iter__"):
return False
if not event.children:
return False
event = event.children[0]
if not is_dataloader_function(event.name, "_get_iterator"):
return False
if not event.children:
return False
event = event.children[0]
return not is_dataloader_function(event.name, "check_worker_number_rationality")
# TODO: We should also check if the loader is bottleneck.
| SynchronizedDataLoaderPattern |
python | PyCQA__pylint | tests/functional/r/regression/regression_3231_no_member_property.py | {
"start": 78,
"end": 304
} | class ____(metaclass=ABCMeta):
def __init__(self):
pass
@property
@abstractmethod
def values(self):
pass
@classmethod
def some_method(cls):
return cls.values.issubset({2, 3})
| Cls |
python | django__django | django/contrib/gis/db/backends/postgis/adapter.py | {
"start": 264,
"end": 1980
} | class ____:
def __init__(self, obj, geography=False):
"""
Initialize on the spatial object.
"""
self.is_geometry = isinstance(obj, (GEOSGeometry, PostGISAdapter))
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry or raster.
if self.is_geometry:
self.ewkb = bytes(obj.ewkb)
else:
self.ewkb = to_pgraster(obj)
self.srid = obj.srid
self.geography = geography
def __conform__(self, proto):
"""Does the given protocol conform to what Psycopg2 expects?"""
from psycopg2.extensions import ISQLQuote
if proto == ISQLQuote:
return self
else:
raise Exception(
"Error implementing psycopg2 protocol. Is psycopg2 installed?"
)
def __eq__(self, other):
return isinstance(other, PostGISAdapter) and self.ewkb == other.ewkb
def __hash__(self):
return hash(self.ewkb)
def __str__(self):
return self.getquoted().decode()
@classmethod
def _fix_polygon(cls, poly):
return poly
def getquoted(self):
"""
Return a properly quoted string for use in PostgreSQL/PostGIS.
"""
if self.is_geometry:
# Psycopg will figure out whether to use E'\\000' or '\000'.
return b"%s(%s)" % (
b"ST_GeogFromWKB" if self.geography else b"ST_GeomFromEWKB",
sql.quote(self.ewkb).encode(),
)
else:
# For rasters, add explicit type cast to WKB string.
return b"'%s'::raster" % self.ewkb.hex().encode()
| PostGISAdapter |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_translate.py | {
"start": 20669,
"end": 21954
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.translate.TranslateHook")
def test_minimal_green_path(self, mock_hook):
m_delete_method_result = mock.MagicMock()
mock_hook.return_value.delete_model.return_value = m_delete_method_result
wait_for_done = mock_hook.return_value.wait_for_operation_done
op = TranslateDeleteModelOperator(
task_id="task_id",
model_id=MODEL_ID,
project_id=PROJECT_ID,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
timeout=TIMEOUT_VALUE,
retry=DEFAULT,
)
context = mock.MagicMock()
op.execute(context=context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_model.assert_called_once_with(
model_id=MODEL_ID,
project_id=PROJECT_ID,
location=LOCATION,
timeout=TIMEOUT_VALUE,
retry=DEFAULT,
metadata=(),
)
wait_for_done.assert_called_once_with(operation=m_delete_method_result, timeout=TIMEOUT_VALUE)
| TestTranslateDeleteModel |
python | pytorch__pytorch | torch/_dynamo/package.py | {
"start": 14517,
"end": 15700
} | class ____:
codes: list[_DynamoCodeCacheEntry]
source_info: SourceInfo
device_type: str
system_info: SystemInfo = dataclasses.field(default_factory=SystemInfo.current)
fn_name: Optional[str] = None
fn_first_lineno: Optional[str] = None
@property
def backend_ids(self) -> set[_BackendId]:
return {backend_id for code in self.codes for backend_id in code.backend_ids}
def check_versions(self) -> None:
"""Check if the current system is compatible with the system used to create this cache entry."""
current_system_info = SystemInfo.current()
self.system_info.check_compatibility(current_system_info, self.device_type)
def debug_info(self) -> dict[str, Any]:
assert len(self.codes) > 0
return {
"num_codes": str(len(self.codes)),
"fn_name": self.fn_name,
"fn_first_lineno": self.fn_first_lineno,
"device_type": self.device_type,
"backend_ids": list(self.backend_ids),
}
from torch.compiler._cache import (
CacheArtifact,
CacheArtifactFactory,
CacheArtifactManager,
)
@CacheArtifactFactory.register
| _DynamoCacheEntry |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 28431,
"end": 28707
} | class ____(BinExpr):
"""Short circuited AND."""
operator = "and"
def as_const(self, eval_ctx: EvalContext | None = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
| And |
python | jazzband__tablib | src/tablib/exceptions.py | {
"start": 71,
"end": 179
} | class ____(TablibException, TypeError):
"""Only Datasets can be added to a Databook."""
| InvalidDatasetType |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 55332,
"end": 56694
} | class ____(Generic[_COL_co]):
__slots__ = ("column",)
column: _COL_co
def __init__(
self, collection: ColumnCollection[Any, _COL_co], col: _COL_co
) -> None:
self.column = col
# proxy_index being non-empty means it was initialized.
# so we need to update it
pi = collection._proxy_index
if pi:
for eps_col in col._expanded_proxy_set:
pi[eps_col].add(self)
def get_expanded_proxy_set(self) -> FrozenSet[ColumnElement[Any]]:
return self.column._expanded_proxy_set
def dispose(self, collection: ColumnCollection[_COLKEY, _COL_co]) -> None:
pi = collection._proxy_index
if not pi:
return
for col in self.column._expanded_proxy_set:
colset = pi.get(col, None)
if colset:
colset.discard(self)
if colset is not None and not colset:
del pi[col]
def embedded(
self,
target_set: Union[
Set[ColumnElement[Any]], FrozenSet[ColumnElement[Any]]
],
) -> bool:
expanded_proxy_set = self.column._expanded_proxy_set
for t in target_set.difference(expanded_proxy_set):
if not expanded_proxy_set.intersection(_expand_cloned([t])):
return False
return True
| _ColumnMetrics |
python | mlflow__mlflow | dev/check_function_signatures.py | {
"start": 1090,
"end": 8369
} | class ____:
message: str
param_name: str
lineno: int
col_offset: int
def parse_signature(args: ast.arguments) -> Signature:
"""Convert ast.arguments to a Signature dataclass for easier processing."""
parameters_positional: list[Parameter] = []
parameters_keyword_only: list[Parameter] = []
# Process positional-only parameters
for i, arg in enumerate(args.posonlyargs):
parameters_positional.append(
Parameter(
name=arg.arg,
position=i,
is_required=True, # All positional-only are required
is_positional_only=True,
is_keyword_only=False,
lineno=arg.lineno,
col_offset=arg.col_offset,
)
)
# Process regular positional parameters
offset = len(args.posonlyargs)
first_optional_idx = len(args.posonlyargs + args.args) - len(args.defaults)
for i, arg in enumerate(args.args):
pos = offset + i
parameters_positional.append(
Parameter(
name=arg.arg,
position=pos,
is_required=pos < first_optional_idx,
is_positional_only=False,
is_keyword_only=False,
lineno=arg.lineno,
col_offset=arg.col_offset,
)
)
# Process keyword-only parameters
for arg, default in zip(args.kwonlyargs, args.kw_defaults):
parameters_keyword_only.append(
Parameter(
name=arg.arg,
position=None,
is_required=default is None,
is_positional_only=False,
is_keyword_only=True,
lineno=arg.lineno,
col_offset=arg.col_offset,
)
)
return Signature(
positional=parameters_positional,
keyword_only=parameters_keyword_only,
has_var_positional=args.vararg is not None,
has_var_keyword=args.kwarg is not None,
)
def check_signature_compatibility(
old_fn: ast.FunctionDef | ast.AsyncFunctionDef,
new_fn: ast.FunctionDef | ast.AsyncFunctionDef,
) -> list[ParameterError]:
"""
Return list of error messages when *new_fn* is not backward-compatible with *old_fn*,
or None if compatible.
Compatibility rules
-------------------
• Positional / positional-only parameters
- Cannot be reordered, renamed, or removed.
- Adding **required** ones is breaking.
- Adding **optional** ones is allowed only at the end.
- Making an optional parameter required is breaking.
• Keyword-only parameters (order does not matter)
- Cannot be renamed or removed.
- Making an optional parameter required is breaking.
- Adding a required parameter is breaking; adding an optional parameter is fine.
"""
old_sig = parse_signature(old_fn.args)
new_sig = parse_signature(new_fn.args)
errors: list[ParameterError] = []
# ------------------------------------------------------------------ #
# 1. Positional / pos-only parameters
# ------------------------------------------------------------------ #
# (a) existing parameters must line up
for idx, old_param in enumerate(old_sig.positional):
if idx >= len(new_sig.positional):
errors.append(
ParameterError(
message=f"Positional param '{old_param.name}' was removed.",
param_name=old_param.name,
lineno=old_param.lineno,
col_offset=old_param.col_offset,
)
)
continue
new_param = new_sig.positional[idx]
if old_param.name != new_param.name:
errors.append(
ParameterError(
message=(
f"Positional param order/name changed: "
f"'{old_param.name}' -> '{new_param.name}'."
),
param_name=new_param.name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# Stop checking further positional params after first order/name mismatch
break
if (not old_param.is_required) and new_param.is_required:
errors.append(
ParameterError(
message=f"Optional positional param '{old_param.name}' became required.",
param_name=new_param.name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# (b) any extra new positional params must be optional and appended
if len(new_sig.positional) > len(old_sig.positional):
for idx in range(len(old_sig.positional), len(new_sig.positional)):
new_param = new_sig.positional[idx]
if new_param.is_required:
errors.append(
ParameterError(
message=f"New required positional param '{new_param.name}' added.",
param_name=new_param.name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# ------------------------------------------------------------------ #
# 2. Keyword-only parameters (order-agnostic)
# ------------------------------------------------------------------ #
old_kw_names = {p.name for p in old_sig.keyword_only}
new_kw_names = {p.name for p in new_sig.keyword_only}
# Build mappings for easier lookup
old_kw_by_name = {p.name: p for p in old_sig.keyword_only}
new_kw_by_name = {p.name: p for p in new_sig.keyword_only}
# removed or renamed
for name in old_kw_names - new_kw_names:
old_param = old_kw_by_name[name]
errors.append(
ParameterError(
message=f"Keyword-only param '{name}' was removed.",
param_name=name,
lineno=old_param.lineno,
col_offset=old_param.col_offset,
)
)
# optional -> required upgrades
for name in old_kw_names & new_kw_names:
if not old_kw_by_name[name].is_required and new_kw_by_name[name].is_required:
new_param = new_kw_by_name[name]
errors.append(
ParameterError(
message=f"Keyword-only param '{name}' became required.",
param_name=name,
lineno=new_param.lineno,
col_offset=new_param.col_offset,
)
)
# new required keyword-only params
errors.extend(
ParameterError(
message=f"New required keyword-only param '{param.name}' added.",
param_name=param.name,
lineno=param.lineno,
col_offset=param.col_offset,
)
for param in new_sig.keyword_only
if param.is_required and param.name not in old_kw_names
)
return errors
def _is_private(n: str) -> bool:
return n.startswith("_") and not n.startswith("__") and not n.endswith("__")
| ParameterError |
python | google__jax | jax/experimental/pallas/ops/tpu/splash_attention/splash_attention_mask.py | {
"start": 10398,
"end": 11959
} | class ____(_ComputableMask):
"""Lazy chunked causal mask.
Attention is causal within each chunk (0, K), (K, 2K), (2K, 3K), ... tokens
attend to each other but not across chunks.
Llama4 models use interleaved chunk attention along with global attention.
Attributes:
chunk_size: The size of each attention chunk.
"""
chunk_size: int
def __init__(
self,
shape: tuple[int, int],
chunk_size: int,
shard_count: int = 1,
):
if chunk_size <= 0:
raise ValueError('chunk_size must be positive')
self.chunk_size = chunk_size
# Define the mask function for chunk attention
def chunked_causal_mask_function(q_ids, kv_ids):
"""Computes the mask logic for the given slice indices."""
# Condition 1: Same chunk
same_chunk = (q_ids // self.chunk_size) == (kv_ids // self.chunk_size)
# Condition 2: Causal
causal = q_ids >= kv_ids
return same_chunk & causal
super().__init__(
shape=shape,
mask_function=chunked_causal_mask_function,
shard_count=shard_count,
)
def __eq__(self, other: object):
if not isinstance(other, type(self)):
return NotImplemented
return (
self.shape == other.shape
and self.chunk_size == other.chunk_size
and np.array_equal(self.q_sequence, other.q_sequence)
)
def __hash__(self):
return hash((
type(self),
self.shape,
self.chunk_size,
self.q_sequence.tobytes() if self.q_sequence is not None else None,
))
| ChunkedCausalMask |
python | walkccc__LeetCode | solutions/239. Sliding Window Maximum/239.py | {
"start": 0,
"end": 392
} | class ____:
def maxSlidingWindow(self, nums: list[int], k: int) -> list[int]:
ans = []
maxQ = collections.deque()
for i, num in enumerate(nums):
while maxQ and maxQ[-1] < num:
maxQ.pop()
maxQ.append(num)
if i >= k and nums[i - k] == maxQ[0]: # out-of-bounds
maxQ.popleft()
if i >= k - 1:
ans.append(maxQ[0])
return ans
| Solution |
python | getsentry__sentry | src/sentry/integrations/msteams/webhook.py | {
"start": 3511,
"end": 3641
} | class ____(MsTeamsIntegrationAnalytics):
pass
@analytics.eventclass("integrations.msteams.unassign")
| MsTeamsIntegrationUnresolve |
python | pennersr__django-allauth | allauth/idp/oidc/views.py | {
"start": 14796,
"end": 15533
} | class ____(View):
"""
The UserInfo Endpoint MUST support the use of the HTTP GET and HTTP POST methods
"""
def get(self, request: HttpRequest) -> HttpResponse:
return self._respond(request)
def post(self, request: HttpRequest) -> HttpResponse:
return self._respond(request)
def _respond(self, request: HttpRequest) -> HttpResponse:
orequest = extract_params(request)
try:
oresponse = get_server().create_userinfo_response(*orequest)
return convert_response(*oresponse)
except OAuth2Error as e:
return respond_json_error(request, e)
user_info = UserInfoView.as_view()
@method_decorator(login_not_required, name="dispatch")
| UserInfoView |
python | coleifer__peewee | tests/extra_fields.py | {
"start": 342,
"end": 1055
} | class ____(ModelTestCase):
requires = [Comp]
def test_compressed_field(self):
a = b'a' * 1024
b = b'b' * 1024
Comp.create(data=a, key='a')
Comp.create(data=b, key='b')
a_db = Comp.get(Comp.key == 'a')
self.assertEqual(a_db.data, a)
b_db = Comp.get(Comp.key == 'b')
self.assertEqual(b_db.data, b)
# Get at the underlying data.
CompTbl = Table('comp', ('id', 'data', 'key')).bind(self.database)
obj = CompTbl.select().where(CompTbl.key == 'a').get()
self.assertEqual(obj['key'], 'a')
# Ensure that the data actually was compressed.
self.assertTrue(len(obj['data']) < 1024)
| TestCompressedField |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.