language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-core/llama_index/core/tools/eval_query_engine.py | {
"start": 632,
"end": 3295
} | class ____(QueryEngineTool):
"""
Evaluating query engine tool.
A tool that makes use of a query engine and an evaluator, where the
evaluation of the query engine response will determine the tool output.
Args:
evaluator (BaseEvaluator): A query engine.
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
_evaluator: BaseEvaluator
_failed_tool_output_template: str
def __init__(
self,
evaluator: BaseEvaluator,
*args: Any,
failed_tool_output_template: str = FAILED_TOOL_OUTPUT_TEMPLATE,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._evaluator = evaluator
self._failed_tool_output_template = failed_tool_output_template
def _process_tool_output(
self,
tool_output: ToolOutput,
evaluation_result: EvaluationResult,
) -> ToolOutput:
if evaluation_result.passing:
return tool_output
tool_output.content = self._failed_tool_output_template.format(
tool_name=self.metadata.name,
reason=evaluation_result.feedback,
)
return tool_output
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
resolve_input_errors: bool = True,
evaluator: Optional[BaseEvaluator] = None,
) -> "EvalQueryEngineTool":
return cls(
evaluator=evaluator or AnswerRelevancyEvaluator(),
query_engine=query_engine,
metadata=ToolMetadata(
name=name or DEFAULT_NAME,
description=description or DEFAULT_DESCRIPTION,
return_direct=return_direct,
),
resolve_input_errors=resolve_input_errors,
)
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
tool_output = super().call(*args, **kwargs)
evaluation_results = self._evaluator.evaluate_response(
tool_output.raw_input["input"], tool_output.raw_output
)
return self._process_tool_output(tool_output, evaluation_results)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
tool_output = await super().acall(*args, **kwargs)
evaluation_results = await self._evaluator.aevaluate_response(
tool_output.raw_input["input"], tool_output.raw_output
)
return self._process_tool_output(tool_output, evaluation_results)
| EvalQueryEngineTool |
python | pydantic__pydantic | pydantic/types.py | {
"start": 35268,
"end": 35528
} | class ____(BaseModel):
uuid1: UUID1
Model(uuid1=uuid.uuid1())
```
"""
UUID3 = Annotated[UUID, UuidVersion(3)]
"""A [UUID](https://docs.python.org/3/library/uuid.html) that must be version 3.
```python
import uuid
from pydantic import UUID3, BaseModel
| Model |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 143369,
"end": 144341
} | class ____(TestCase):
def test_basic(self):
def generator():
yield 1
yield 2
sleep(0.2)
yield 3
iterable = mi.time_limited(0.1, generator())
actual = list(iterable)
expected = [1, 2]
self.assertEqual(actual, expected)
self.assertTrue(iterable.timed_out)
def test_complete(self):
iterable = mi.time_limited(2, iter(range(10)))
actual = list(iterable)
expected = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertEqual(actual, expected)
self.assertFalse(iterable.timed_out)
def test_zero_limit(self):
iterable = mi.time_limited(0, count())
actual = list(iterable)
expected = []
self.assertEqual(actual, expected)
self.assertTrue(iterable.timed_out)
def test_invalid_limit(self):
with self.assertRaises(ValueError):
list(mi.time_limited(-0.1, count()))
| TimeLimitedTests |
python | walkccc__LeetCode | solutions/1309. Decrypt String from Alphabet to Integer Mapping/1309.py | {
"start": 0,
"end": 303
} | class ____:
def freqAlphabets(self, s: str) -> str:
ans = ''
i = 0
while i < len(s):
if i + 2 < len(s) and s[i + 2] == '#':
ans += chr(int(s[i:i + 2]) + ord('a') - 1)
i += 3
else:
ans += chr(int(s[i]) + ord('a') - 1)
i += 1
return ans
| Solution |
python | euske__pdfminer | pdfminer/rijndael.py | {
"start": 45443,
"end": 46223
} | class ____:
"""
>>> key = bytes.fromhex('00010203050607080a0b0c0d0f101112')
>>> plaintext = bytes.fromhex('506812a45f08c889b97f5980038b8359')
>>> RijndaelEncryptor(key, 128).encrypt(plaintext).hex()
'd8f532538289ef7d06b506a4fd5be9c9'
"""
def __init__(self, key, keybits=256):
assert len(key) == KEYLENGTH(keybits)
(self.rk, self.nrounds) = rijndaelSetupEncrypt(key, keybits)
assert len(self.rk) == RKLENGTH(keybits)
assert self.nrounds == NROUNDS(keybits)
return
def encrypt(self, plaintext):
assert len(plaintext) == 16
return rijndaelEncrypt(self.rk, self.nrounds, plaintext)
if __name__ == '__main__':
import doctest
print('pdfminer.rijndael', doctest.testmod())
| RijndaelEncryptor |
python | ipython__ipython | IPython/core/ultratb.py | {
"start": 44622,
"end": 44962
} | class ____(FormattedTB):
"""Deprecated since IPython 9.0."""
def __init__(self, *args, **kwargs):
warnings.warn(
"Deprecated since IPython 9.0 use FormattedTB directly ColorTB is just an alias",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| ColorTB |
python | wandb__wandb | wandb/vendor/pygments/lexers/matlab.py | {
"start": 5816,
"end": 7475
} | class ____(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
.. versionadded:: 0.10
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:2])]))
curcode += line[2:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
# line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
| MatlabSessionLexer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-sharepoint/source_microsoft_sharepoint/utils.py | {
"start": 491,
"end": 600
} | class ____(Enum):
OWN_DRIVES = "OWN_DRIVES"
SHARED_ITEMS = "SHARED_ITEMS"
BOTH = "BOTH"
| SearchScope |
python | viewflow__viewflow | viewflow/forms/renderers.py | {
"start": 8137,
"end": 8224
} | class ____(SelectRenderer):
tag = "vf-field-select-dependent"
| DependentSelectRenderer |
python | python-openxml__python-docx | tests/test_table.py | {
"start": 18978,
"end": 21861
} | class ____:
"""Unit-test suite for `docx.table._Cell` objects."""
def it_provides_access_to_its_cells(self, _index_prop_: Mock, table_prop_: Mock, table_: Mock):
table_prop_.return_value = table_
_index_prop_.return_value = 4
column = _Column(cast(CT_TblGridCol, element("w:gridCol{w:w=500}")), table_)
table_.column_cells.return_value = [3, 2, 1]
cells = column.cells
table_.column_cells.assert_called_once_with(4)
assert cells == (3, 2, 1)
def it_provides_access_to_the_table_it_belongs_to(self, table_: Mock):
table_.table = table_
column = _Column(cast(CT_TblGridCol, element("w:gridCol{w:w=500}")), table_)
assert column.table is table_
@pytest.mark.parametrize(
("gridCol_cxml", "expected_width"),
[
("w:gridCol{w:w=4242}", 2693670),
("w:gridCol{w:w=1440}", 914400),
("w:gridCol{w:w=2.54cm}", 914400),
("w:gridCol{w:w=54mm}", 1944000),
("w:gridCol{w:w=12.5pt}", 158750),
("w:gridCol", None),
],
)
def it_knows_its_width_in_EMU(
self, gridCol_cxml: str, expected_width: int | None, table_: Mock
):
column = _Column(cast(CT_TblGridCol, element(gridCol_cxml)), table_)
assert column.width == expected_width
@pytest.mark.parametrize(
("gridCol_cxml", "new_value", "expected_cxml"),
[
("w:gridCol", Emu(914400), "w:gridCol{w:w=1440}"),
("w:gridCol{w:w=4242}", Inches(0.5), "w:gridCol{w:w=720}"),
("w:gridCol{w:w=4242}", None, "w:gridCol"),
("w:gridCol", None, "w:gridCol"),
],
)
def it_can_change_its_width(
self, gridCol_cxml: str, new_value: Length | None, expected_cxml: str, table_: Mock
):
column = _Column(cast(CT_TblGridCol, element(gridCol_cxml)), table_)
column.width = new_value
assert column.width == new_value
assert column._gridCol.xml == xml(expected_cxml)
def it_knows_its_index_in_table_to_help(self, table_: Mock):
tbl = cast(CT_Tbl, element("w:tbl/w:tblGrid/(w:gridCol,w:gridCol,w:gridCol)"))
gridCol = tbl.tblGrid.gridCol_lst[1]
column = _Column(gridCol, table_)
assert column._index == 1
# fixtures -------------------------------------------------------
@pytest.fixture
def _index_prop_(self, request: FixtureRequest):
return property_mock(request, _Column, "_index")
@pytest.fixture
def parent_(self, request: FixtureRequest):
return instance_mock(request, Table)
@pytest.fixture
def table_(self, request: FixtureRequest):
return instance_mock(request, Table)
@pytest.fixture
def table_prop_(self, request: FixtureRequest):
return property_mock(request, _Column, "table")
| Describe_Column |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_skip/type_params.py | {
"start": 48,
"end": 226
} | class ____ [ # trailing open paren comment
# leading comment
T # trailing type param comment
# trailing type param own line comment
]: # fmt: skip
pass
| TestTypeParam |
python | getsentry__sentry | src/sentry/integrations/source_code_management/search.py | {
"start": 1027,
"end": 1224
} | class ____(serializers.Serializer[dict[str, str]]):
field = serializers.CharField(required=True)
query = serializers.CharField(required=True)
@control_silo_endpoint
| SourceCodeSearchSerializer |
python | weaviate__weaviate-python-client | weaviate/collections/classes/data.py | {
"start": 763,
"end": 1281
} | class ____(Generic[P, R]):
"""This class represents an entire object within a collection to be used when batching."""
properties: P = None # type: ignore
uuid: Optional[UUID] = None
vector: Optional[VECTORS] = None
references: R = None # type: ignore
# R is clearly bounded to Optional[Any] and defaults to None but mypy doesn't seem to understand that
# throws error: Incompatible types in assignment (expression has type "None", variable has type "R") [assignment]
@dataclass
| DataObject |
python | django__django | django/contrib/postgres/fields/ranges.py | {
"start": 11211,
"end": 11379
} | class ____(models.Transform):
lookup_name = "upper_inc"
function = "UPPER_INC"
output_field = models.BooleanField()
@RangeField.register_lookup
| UpperInclusive |
python | jupyterlab__jupyterlab | examples/console/main.py | {
"start": 565,
"end": 1431
} | class ____(LabServerApp):
extension_url = "/example"
default_url = "/example"
app_url = "/example"
load_other_extensions = False
name = __name__
app_name = "JupyterLab Example Console"
app_settings_dir = os.path.join(HERE, "build", "application_settings")
schemas_dir = os.path.join(HERE, "build", "schemas")
static_dir = os.path.join(HERE, "build")
templates_dir = os.path.join(HERE, "templates")
themes_dir = os.path.join(HERE, "build", "themes")
user_settings_dir = os.path.join(HERE, "build", "user_settings")
workspaces_dir = os.path.join(HERE, "build", "workspaces")
def initialize_settings(self):
super().initialize_settings()
settings = self.serverapp.web_app.settings
settings["terminals_available"] = False
if __name__ == "__main__":
ExampleApp.launch_instance()
| ExampleApp |
python | huggingface__transformers | src/transformers/models/bark/modeling_bark.py | {
"start": 10324,
"end": 10956
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias)
self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias)
self.dropout = nn.Dropout(config.dropout)
self.gelu = nn.GELU()
def forward(self, hidden_states):
hidden_states = self.in_proj(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.out_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| BarkMLP |
python | pypa__warehouse | tests/unit/test_sessions.py | {
"start": 11406,
"end": 21407
} | class ____:
def test_initialize(self, monkeypatch):
timestamp_signer_obj = pretend.stub()
timestamp_signer_create = pretend.call_recorder(
lambda secret, salt: timestamp_signer_obj
)
monkeypatch.setattr(crypto, "TimestampSigner", timestamp_signer_create)
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url: strict_redis_obj)
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
session_factory = SessionFactory("mysecret", "my url")
assert session_factory.signer is timestamp_signer_obj
assert session_factory.redis is strict_redis_obj
assert timestamp_signer_create.calls == [
pretend.call("mysecret", salt="session")
]
assert strict_redis_cls.from_url.calls == [pretend.call("my url")]
def test_redis_key(self):
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
assert (
session_factory._redis_key("my_session_id")
== "warehouse/session/data/my_session_id"
)
def test_no_current_session(self, pyramid_request):
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert (
pyramid_request.response_callbacks[0] is session_factory._process_response
)
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_invalid_session_id(self, pyramid_request):
pyramid_request.cookies["session_id"] = "invalid!"
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert (
pyramid_request.response_callbacks[0] is session_factory._process_response
)
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_no_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: None)
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert (
pyramid_request.response_callbacks[0] is session_factory._process_response
)
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60)
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456")
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_invalid_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"invalid data")
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert (
pyramid_request.response_callbacks[0] is session_factory._process_response
)
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60)
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456")
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_valid_data(self, monkeypatch, pyramid_request):
msgpack_unpackb = pretend.call_recorder(
lambda bdata, raw, use_list: {"foo": "bar"}
)
monkeypatch.setattr(msgpack, "unpackb", msgpack_unpackb)
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"valid data")
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert (
pyramid_request.response_callbacks[0] is session_factory._process_response
)
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60)
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456")
]
assert msgpack_unpackb.calls == [
pretend.call(b"valid data", raw=False, use_list=True)
]
assert isinstance(session, Session)
assert session == {"foo": "bar"}
assert session.sid == "123456"
assert not session.new
def test_no_save_invalid_session(self, pyramid_request):
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.redis = pretend.stub()
pyramid_request.session = InvalidSession()
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
def test_noop_unused_session(self, pyramid_request):
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.redis = pretend.stub()
pyramid_request.session.invalidated = set()
pyramid_request.session.should_save = pretend.call_recorder(lambda: False)
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
assert pyramid_request.session.should_save.calls == [pretend.call()]
def test_invalidated_deletes_no_save(self, pyramid_request):
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None)
)
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(lambda: False)
response = pretend.stub(
delete_cookie=pretend.call_recorder(lambda cookie: None)
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert response.delete_cookie.calls == [pretend.call("session_id")]
def test_invalidated_deletes_save_non_secure(self, monkeypatch, pyramid_request):
msgpack_packb = pretend.call_recorder(lambda *a, **kw: b"msgpack data")
monkeypatch.setattr(msgpack, "packb", msgpack_packb)
session_factory = SessionFactory("mysecret", "redis://redis://localhost:6379/0")
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None),
setex=pretend.call_recorder(lambda key, age, data: None),
)
session_factory.signer.sign = pretend.call_recorder(lambda data: "cookie data")
pyramid_request.scheme = "http"
pyramid_request.session.sid = "123456"
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(lambda: True)
response = pretend.stub(
set_cookie=pretend.call_recorder(
lambda cookie, data, httponly=False, secure=True, samesite=b"none": None
),
delete_cookie=pretend.call_recorder(lambda cookie: None),
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert msgpack_packb.calls == [
pretend.call(
pyramid_request.session, default=object_encode, use_bin_type=True
)
]
assert session_factory.redis.setex.calls == [
pretend.call("warehouse/session/data/123456", 12 * 60 * 60, b"msgpack data")
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert session_factory.signer.sign.calls == [pretend.call(b"123456")]
assert response.set_cookie.calls == [
pretend.call(
"session_id",
"cookie data",
httponly=True,
secure=False,
samesite=b"lax",
)
]
assert response.delete_cookie.calls == [
pretend.call("user_id__insecure"),
]
| TestSessionFactory |
python | kamyu104__LeetCode-Solutions | Python/building-boxes.py | {
"start": 43,
"end": 579
} | class ____(object):
def minimumBoxes(self, n):
"""
:type n: int
:rtype: int
"""
# find max h s.t. sum(k*(k+1)//2 for k in xrange(1, h+1)) <= n
# => find max h s.t. h*(h+1)*(h+2)//6 <= n
h = int((6*n)**(1.0/3))
if h*(h+1)*(h+2) > 6*n:
# (h-1)*h*(h+1) < h^3 <= 6n < h*(h+1)*(h+2) < (h+1)^3
h -= 1
n -= h*(h+1)*(h+2)//6
d = int(math.ceil((-1+(1+8*n)**0.5)/2)) # find min d s.t. d*(d+1)//2 >= n
return h*(h+1)//2 + d
| Solution |
python | qdrant__qdrant-client | qdrant_client/local/multi_distances.py | {
"start": 1221,
"end": 1630
} | class ____:
def __init__(self, positive: list[list[float]], negative: list[list[float]]):
self.positive: types.NumpyArray = np.array(positive)
self.negative: types.NumpyArray = np.array(negative)
assert not np.isnan(self.positive).any(), "Positive vector must not contain NaN"
assert not np.isnan(self.negative).any(), "Negative vector must not contain NaN"
| MultiContextPair |
python | readthedocs__readthedocs.org | readthedocs/analytics/migrations/0006_alter_pageview_id.py | {
"start": 149,
"end": 567
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("analytics", "0005_add_unique_constraint"),
]
operations = [
migrations.AlterField(
model_name="pageview",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
]
| Migration |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 26919,
"end": 28126
} | class ____(MixinSequenceOfValues):
"""
x-axis tick labels
Parameters
----------
theme_element : element_text
Notes
-----
Use the `margin` to control the gap between the ticks and the
text. e.g.
```python
theme(axis_text_x=element_text(margin={"t": 5, "units": "pt"}))
```
creates a margin of 5 points.
"""
_omit = ["margin", "va"]
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
# TODO: Remove this code when the minimum matplotlib >= 3.10.0,
# and use the commented one below it
import matplotlib as mpl
from packaging import version
vinstalled = version.parse(mpl.__version__)
v310 = version.parse("3.10.0")
name = "labelbottom" if vinstalled >= v310 else "labelleft"
if not ax.xaxis.get_tick_params()[name]:
return
# if not ax.xaxis.get_tick_params()["labelbottom"]:
# return
labels = [t.label1 for t in ax.xaxis.get_major_ticks()]
self.set(labels)
def blank_ax(self, ax: Axes):
super().blank_ax(ax)
for t in ax.xaxis.get_major_ticks():
t.label1.set_visible(False)
| axis_text_x |
python | getsentry__sentry | src/sentry_plugins/github/webhooks/events/__init__.py | {
"start": 0,
"end": 666
} | class ____:
def __call__(self, event, organization):
raise NotImplementedError
def is_anonymous_email(email):
return email[-25:] == "@users.noreply.github.com"
def get_external_id(username):
return "github:%s" % username
from .installation import InstallationEventWebhook
from .installation_repository import InstallationRepositoryEventWebhook
from .pull_request import PullRequestEventWebhook
from .push import PushEventWebhook
__all__ = (
"InstallationEventWebhook",
"InstallationRepositoryEventWebhook",
"PullRequestEventWebhook",
"PushEventWebhook",
"is_anonymous_email",
"get_external_id",
"Webhook",
)
| Webhook |
python | joke2k__faker | faker/providers/address/es_MX/__init__.py | {
"start": 84,
"end": 4900
} | class ____(AddressProvider):
city_prefixes = ("Sur", "Norte")
city_adjectives = ("Nueva", "Vieja")
city_suffixes = ("de la Montaña", "los bajos", "los altos")
street_prefixes = (
"Ampliación",
"Andador",
"Avenida",
"Boulevard",
"Calle",
"Callejón",
"Calzada",
"Cerrada",
"Circuito",
"Circunvalación",
"Continuación",
"Corredor",
"Diagonal",
"Eje vial",
"Pasaje",
"Peatonal",
"Periférico",
"Privada",
"Prolongación",
"Retorno",
"Viaducto",
)
building_number_formats = ("#####", "####", "###")
postcode_formats = ("#####", "#####-####")
# States and abbrs from Mexico from INEGI
# http://www.inegi.org.mx/geo/contenidos/geoestadistica/CatalogoClaves.aspx
states = (
("AGS", "Aguascalientes"),
("BC", "Baja California"),
("BCS", "Baja California Sur"),
("CAMP", "Campeche"),
("COAH", "Coahuila de Zaragoza"),
("COL", "Colima"),
("CHIS", "Chiapas"),
("CHIH", "Chihuahua"),
("DF", "Distrito Federal"),
("DGO", "Durango"),
("GTO", "Guanajuato"),
("GRO", "Guerrero"),
("HGO", "Hidalgo"),
("JAL", "Jalisco"),
("MEX", "México"),
("MICH", "Michoacán de Ocampo"),
("MOR", "Morelos"),
("NAY", "Nayarit"),
("NL", "Nuevo León"),
("OAX", "Oaxaca"),
("PUE", "Puebla"),
("QRO", "Querétaro"),
("Q. ROO", "Quintana Roo"),
("SLP", "San Luis Potosí"),
("SIN", "Sinaloa"),
("SON", "Sonora"),
("TAB", "Tabasco"),
("TAMPS", "Tamaulipas"),
("TLAX", "Tlaxcala"),
("VER", "Veracruz de Ignacio de la Llave"),
("YUC", "Yucatán"),
("ZAC", "Zacatecas"),
)
zip_codes = OrderedDict(
(
# The ZipCodes has a begin & final range
# Source: Norma Técnica de Domicilios INEGI
("AGS", (20000, 20999)),
("BC", (21000, 22999)),
("BCS", (23000, 23999)),
("CAMP", (24000, 24999)),
("COAH", (25000, 27999)),
("COL", (28000, 28999)),
("CHIS", (29000, 30999)),
("CHIH", (31000, 33999)),
("DF", (1000, 19999)),
("DGO", (36000, 35999)),
("GTO", (36000, 38999)),
("GRO", (39000, 41999)),
("HGO", (42000, 43999)),
("JAL", (44000, 49999)),
("MEX", (50000, 57999)),
("MICH", (58000, 61999)),
("MOR", (62000, 62999)),
("NAY", (63000, 63999)),
("NL", (64000, 67999)),
("OAX", (68000, 71999)),
("PUE", (72000, 75999)),
("QRO", (76000, 76999)),
("Q. ROO", (77000, 75999)),
("SLP", (78000, 79999)),
("SIN", (80000, 82999)),
("SON", (83000, 85999)),
("TAB", (86000, 86999)),
("TAMPS", (87000, 89999)),
("TLAX", (90000, 90999)),
("VER", (91000, 97999)),
("YUC", (97000, 97999)),
("ZAC", (98000, 99999)),
)
)
city_formats = (
"{{city_adjective}} {{country}}",
"San {{first_name}} {{city_suffix}}",
)
street_name_formats = (
"{{street_prefix}} {{last_name}}",
"{{street_prefix}} {{country}}",
"{{street_prefix}} {{state}}",
"{{street_prefix}} {{city_prefix}} {{last_name}}",
)
street_address_formats = ("{{street_name}} {{secondary_address}}",)
address_formats = ("{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}",)
secondary_address_formats = (
"### ###",
"### Interior ###",
"### Edif. ### , Depto. ###",
)
def city_prefix(self) -> str:
return self.random_element(self.city_prefixes)
def city_suffix(self) -> str:
return self.random_element(self.city_suffixes)
def city_adjective(self) -> str:
return self.random_element(self.city_adjectives)
def street_prefix(self) -> str:
"""
:example 'Avenida'
"""
return self.random_element(self.street_prefixes)
def secondary_address(self) -> str:
"""
:example '020 Interior 999'
"""
return self.numerify(self.random_element(self.secondary_address_formats))
def administrative_unit(self) -> str:
"""
example: u'Guerrero'
"""
return self.random_element(self.states)[1] # type: ignore
state = administrative_unit
def state_abbr(self) -> str:
"""
example: u'GRO'
"""
return self.random_element(self.states)[0] # type: ignore
| Provider |
python | huggingface__transformers | tests/models/trocr/test_modeling_trocr.py | {
"start": 5888,
"end": 7057
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
def setUp(self):
self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=TrOCRConfig)
@unittest.skip(reason="Not yet implemented")
def test_inputs_embeds(self):
pass
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
| TrOCRStandaloneDecoderModelTest |
python | django__django | django/contrib/admin/widgets.py | {
"start": 15981,
"end": 20109
} | class ____:
"""
Select widget mixin that loads options from AutocompleteJsonView via AJAX.
Renders the necessary data attributes for select2 and adds the static form
media.
"""
url_name = "%s:autocomplete"
def __init__(self, field, admin_site, attrs=None, choices=(), using=None):
self.field = field
self.admin_site = admin_site
self.db = using
self.choices = choices
self.attrs = {} if attrs is None else attrs.copy()
self.i18n_name = get_select2_language()
def get_url(self):
return reverse(self.url_name % self.admin_site.name)
def build_attrs(self, base_attrs, extra_attrs=None):
"""
Set select2's AJAX attributes.
Attributes can be set using the html5 data attribute.
Nested attributes require a double dash as per
https://select2.org/configuration/data-attributes#nested-subkey-options
"""
attrs = super().build_attrs(base_attrs, extra_attrs=extra_attrs)
attrs.setdefault("class", "")
attrs.update(
{
"data-ajax--cache": "true",
"data-ajax--delay": 250,
"data-ajax--type": "GET",
"data-ajax--url": self.get_url(),
"data-app-label": self.field.model._meta.app_label,
"data-model-name": self.field.model._meta.model_name,
"data-field-name": self.field.name,
"data-theme": "admin-autocomplete",
"data-allow-clear": json.dumps(not self.is_required),
"data-placeholder": "", # Allows clearing of the input.
"lang": self.i18n_name,
"class": attrs["class"]
+ (" " if attrs["class"] else "")
+ "admin-autocomplete",
}
)
return attrs
def optgroups(self, name, value, attr=None):
"""Return selected options based on the ModelChoiceIterator."""
default = (None, [], 0)
groups = [default]
has_selected = False
selected_choices = {
str(v) for v in value if str(v) not in self.choices.field.empty_values
}
if not self.is_required and not self.allow_multiple_selected:
default[1].append(self.create_option(name, "", "", False, 0))
remote_model_opts = self.field.remote_field.model._meta
to_field_name = getattr(
self.field.remote_field, "field_name", remote_model_opts.pk.attname
)
to_field_name = remote_model_opts.get_field(to_field_name).attname
choices = (
(getattr(obj, to_field_name), self.choices.field.label_from_instance(obj))
for obj in self.choices.queryset.using(self.db).filter(
**{"%s__in" % to_field_name: selected_choices}
)
)
for option_value, option_label in choices:
selected = str(option_value) in value and (
has_selected is False or self.allow_multiple_selected
)
has_selected |= selected
index = len(default[1])
subgroup = default[1]
subgroup.append(
self.create_option(
name, option_value, option_label, selected_choices, index
)
)
return groups
@property
def media(self):
extra = "" if settings.DEBUG else ".min"
i18n_file = (
("admin/js/vendor/select2/i18n/%s.js" % self.i18n_name,)
if self.i18n_name
else ()
)
return forms.Media(
js=(
"admin/js/vendor/jquery/jquery%s.js" % extra,
"admin/js/vendor/select2/select2.full%s.js" % extra,
*i18n_file,
"admin/js/jquery.init.js",
"admin/js/autocomplete.js",
),
css={
"screen": (
"admin/css/vendor/select2/select2%s.css" % extra,
"admin/css/autocomplete.css",
),
},
)
| AutocompleteMixin |
python | neetcode-gh__leetcode | python/0442-find-all-duplicates-in-an-array.py | {
"start": 0,
"end": 269
} | class ____:
def findDuplicates(self, nums: List[int]) -> List[int]:
res = []
for n in nums:
n = abs(n)
if nums[n - 1] < 0:
res.append(n)
nums[n - 1] = -nums[n - 1]
return res
| Solution |
python | django__django | django/db/backends/mysql/introspection.py | {
"start": 837,
"end": 14998
} | class ____(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: "TextField",
FIELD_TYPE.CHAR: "CharField",
FIELD_TYPE.DECIMAL: "DecimalField",
FIELD_TYPE.NEWDECIMAL: "DecimalField",
FIELD_TYPE.DATE: "DateField",
FIELD_TYPE.DATETIME: "DateTimeField",
FIELD_TYPE.DOUBLE: "FloatField",
FIELD_TYPE.FLOAT: "FloatField",
FIELD_TYPE.INT24: "IntegerField",
FIELD_TYPE.JSON: "JSONField",
FIELD_TYPE.LONG: "IntegerField",
FIELD_TYPE.LONGLONG: "BigIntegerField",
FIELD_TYPE.SHORT: "SmallIntegerField",
FIELD_TYPE.STRING: "CharField",
FIELD_TYPE.TIME: "TimeField",
FIELD_TYPE.TIMESTAMP: "DateTimeField",
FIELD_TYPE.TINY: "IntegerField",
FIELD_TYPE.TINY_BLOB: "TextField",
FIELD_TYPE.MEDIUM_BLOB: "TextField",
FIELD_TYPE.LONG_BLOB: "TextField",
FIELD_TYPE.VAR_STRING: "CharField",
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if "auto_increment" in description.extra:
if field_type == "IntegerField":
return "AutoField"
elif field_type == "BigIntegerField":
return "BigAutoField"
elif field_type == "SmallIntegerField":
return "SmallAutoField"
if description.is_unsigned:
if field_type == "BigIntegerField":
return "PositiveBigIntegerField"
elif field_type == "IntegerField":
return "PositiveIntegerField"
elif field_type == "SmallIntegerField":
return "PositiveSmallIntegerField"
if description.data_type.upper() == "UUID":
return "UUIDField"
# JSON data type is an alias for LONGTEXT in MariaDB, use check
# constraints clauses to introspect JSONField.
if description.has_json_constraint:
return "JSONField"
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute(
"""
SELECT
table_name,
table_type,
table_comment
FROM information_schema.tables
WHERE table_schema = DATABASE()
"""
)
return [
TableInfo(row[0], {"BASE TABLE": "t", "VIEW": "v"}.get(row[1]), row[2])
for row in cursor.fetchall()
]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
json_constraints = {}
if (
self.connection.mysql_is_mariadb
and self.connection.features.can_introspect_json_field
):
# JSON data type is an alias for LONGTEXT in MariaDB, select
# JSON_VALID() constraints to introspect JSONField.
cursor.execute(
"""
SELECT c.constraint_name AS column_name
FROM information_schema.check_constraints AS c
WHERE
c.table_name = %s AND
LOWER(c.check_clause) =
'json_valid(`' + LOWER(c.constraint_name) + '`)' AND
c.constraint_schema = DATABASE()
""",
[table_name],
)
json_constraints = {row[0] for row in cursor.fetchall()}
# A default collation for the given table.
cursor.execute(
"""
SELECT table_collation
FROM information_schema.tables
WHERE table_schema = DATABASE()
AND table_name = %s
""",
[table_name],
)
row = cursor.fetchone()
default_column_collation = row[0] if row else ""
# information_schema database gives more accurate results for some
# figures:
# - varchar length returned by cursor.description is an internal
# length, not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute(
"""
SELECT
column_name, data_type, character_maximum_length,
numeric_precision, numeric_scale, extra, column_default,
CASE
WHEN collation_name = %s THEN NULL
ELSE collation_name
END AS collation_name,
CASE
WHEN column_type LIKE '%% unsigned' THEN 1
ELSE 0
END AS is_unsigned,
column_comment
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
""",
[default_column_collation, table_name],
)
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute(
"SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)
)
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
info = field_info[line[0]]
fields.append(
FieldInfo(
*line[:2],
to_int(info.max_len) or line[2],
to_int(info.max_len) or line[3],
to_int(info.num_prec) or line[4],
to_int(info.num_scale) or line[5],
line[6],
info.column_default,
info.collation,
info.extra,
info.is_unsigned,
line[0] in json_constraints,
info.comment,
info.data_type,
)
)
return fields
def get_sequences(self, cursor, table_name, table_fields=()):
for field_info in self.get_table_description(cursor, table_name):
if "auto_increment" in field_info.extra:
# MySQL allows only one auto-increment column per table.
return [{"table": table_name, "column": field_info.name}]
return []
def get_relations(self, cursor, table_name):
"""
Return a dictionary of
{
field_name: (field_name_other_table, other_table, db_on_delete)
}
representing all foreign keys in the given table.
"""
cursor.execute(
"""
SELECT
kcu.column_name,
kcu.referenced_column_name,
kcu.referenced_table_name,
rc.delete_rule
FROM
information_schema.key_column_usage kcu
JOIN
information_schema.referential_constraints rc
ON rc.constraint_name = kcu.constraint_name
AND rc.constraint_schema = kcu.constraint_schema
WHERE kcu.table_name = %s
AND kcu.table_schema = DATABASE()
AND kcu.referenced_table_schema = DATABASE()
AND kcu.referenced_table_name IS NOT NULL
AND kcu.referenced_column_name IS NOT NULL
""",
[table_name],
)
return {
field_name: (other_field, other_table, self.on_delete_types.get(on_delete))
for field_name, other_field, other_table, on_delete in cursor.fetchall()
}
def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"""
SELECT engine
FROM information_schema.tables
WHERE
table_name = %s AND
table_schema = DATABASE()
""",
[table_name],
)
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def _parse_constraint_columns(self, check_clause, columns):
check_columns = OrderedSet()
statement = sqlparse.parse(check_clause)[0]
tokens = (token for token in statement.flatten() if not token.is_whitespace)
for token in tokens:
if (
token.ttype == sqlparse.tokens.Name
and self.connection.ops.quote_name(token.value) == token.value
and token.value[1:-1] in columns
):
check_columns.add(token.value[1:-1])
return check_columns
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`,
c.`constraint_type`
FROM
information_schema.key_column_usage AS kc,
information_schema.table_constraints AS c
WHERE
kc.table_schema = DATABASE() AND
(
kc.referenced_table_schema = DATABASE() OR
kc.referenced_table_schema IS NULL
) AND
c.table_schema = kc.table_schema AND
c.constraint_name = kc.constraint_name AND
c.constraint_type != 'CHECK' AND
kc.table_name = %s
ORDER BY kc.`ordinal_position`
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column, kind in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
"columns": OrderedSet(),
"primary_key": kind == "PRIMARY KEY",
"unique": kind in {"PRIMARY KEY", "UNIQUE"},
"index": False,
"check": False,
"foreign_key": (ref_table, ref_column) if ref_column else None,
}
if self.connection.features.supports_index_column_ordering:
constraints[constraint]["orders"] = []
constraints[constraint]["columns"].add(column)
# Add check constraints.
if self.connection.features.can_introspect_check_constraints:
unnamed_constraints_index = 0
columns = {
info.name for info in self.get_table_description(cursor, table_name)
}
if self.connection.mysql_is_mariadb:
type_query = """
SELECT c.constraint_name, c.check_clause
FROM information_schema.check_constraints AS c
WHERE
c.constraint_schema = DATABASE() AND
c.table_name = %s
"""
else:
type_query = """
SELECT cc.constraint_name, cc.check_clause
FROM
information_schema.check_constraints AS cc,
information_schema.table_constraints AS tc
WHERE
cc.constraint_schema = DATABASE() AND
tc.table_schema = cc.constraint_schema AND
cc.constraint_name = tc.constraint_name AND
tc.constraint_type = 'CHECK' AND
tc.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, check_clause in cursor.fetchall():
constraint_columns = self._parse_constraint_columns(
check_clause, columns
)
# Ensure uniqueness of unnamed constraints. Unnamed unique
# and check columns constraints have the same name as
# a column.
if set(constraint_columns) == {constraint}:
unnamed_constraints_index += 1
constraint = "__unnamed_constraint_%s__" % unnamed_constraints_index
constraints[constraint] = {
"columns": constraint_columns,
"primary_key": False,
"unique": False,
"index": False,
"check": True,
"foreign_key": None,
}
# Now add in the indexes
cursor.execute(
"SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name)
)
for table, non_unique, index, colseq, column, order, type_ in [
x[:6] + (x[10],) for x in cursor.fetchall()
]:
if index not in constraints:
constraints[index] = {
"columns": OrderedSet(),
"primary_key": False,
"unique": not non_unique,
"check": False,
"foreign_key": None,
}
if self.connection.features.supports_index_column_ordering:
constraints[index]["orders"] = []
constraints[index]["index"] = True
constraints[index]["type"] = (
Index.suffix if type_ == "BTREE" else type_.lower()
)
constraints[index]["columns"].add(column)
if self.connection.features.supports_index_column_ordering:
constraints[index]["orders"].append("DESC" if order == "D" else "ASC")
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint["columns"] = list(constraint["columns"])
return constraints
| DatabaseIntrospection |
python | wandb__wandb | wandb/sdk/lib/asyncio_compat.py | {
"start": 1126,
"end": 4320
} | class ____:
"""Runs an asyncio event loop allowing cancellation.
The `run()` method is like `asyncio.run()`. The `cancel()` method may
be used in a different thread, for instance in a `finally` block, to cancel
all tasks, and it is a no-op if `run()` completed.
Without this, it is impossible to make `asyncio.run()` stop if it runs
in a non-main thread. In particular, a KeyboardInterrupt causes the
ThreadPoolExecutor above to block until the asyncio thread completes,
but there is no way to tell the asyncio thread to cancel its work.
A second KeyboardInterrupt makes ThreadPoolExecutor give up while the
asyncio thread still runs in the background, with terrible effects if it
prints to the user's terminal.
"""
def __init__(self) -> None:
self._lock = threading.Lock()
self._is_cancelled = False
self._started = False
self._done = False
self._loop: asyncio.AbstractEventLoop | None = None
self._cancel_event: asyncio.Event | None = None
def run(self, fn: Callable[[], Coroutine[Any, Any, _T]]) -> _T:
"""Run a coroutine in asyncio, cancelling it on `cancel()`.
Returns:
The result of the coroutine returned by `fn`.
Raises:
RunnerCancelledError: If `cancel()` is called.
"""
return asyncio.run(self._run_or_cancel(fn))
async def _run_or_cancel(
self,
fn: Callable[[], Coroutine[Any, Any, _T]],
) -> _T:
with self._lock:
if self._is_cancelled:
raise RunnerCancelledError()
self._loop = asyncio.get_running_loop()
self._cancel_event = asyncio.Event()
self._started = True
cancellation_task = asyncio.create_task(self._cancel_event.wait())
fn_task = asyncio.create_task(fn())
try:
await asyncio.wait(
[cancellation_task, fn_task],
return_when=asyncio.FIRST_COMPLETED,
)
if fn_task.done():
return fn_task.result()
else:
raise RunnerCancelledError()
finally:
# NOTE: asyncio.run() cancels all tasks after the main task exits,
# but this is not documented, so we cancel them explicitly here
# as well. It also blocks until canceled tasks complete.
cancellation_task.cancel()
fn_task.cancel()
with self._lock:
self._done = True
def cancel(self) -> None:
"""Cancel all asyncio work started by `run()`."""
with self._lock:
if self._is_cancelled:
return
self._is_cancelled = True
if self._done or not self._started:
# If the runner already finished, no need to cancel it.
#
# If the runner hasn't started the loop yet, then it will not
# as we already set _is_cancelled.
return
assert self._loop
assert self._cancel_event
self._loop.call_soon_threadsafe(self._cancel_event.set)
| CancellableRunner |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_exceptions/invalid_exceptions_caught.py | {
"start": 2358,
"end": 2583
} | class ____(UnknownError):
pass
EXCEPTIONS = (SomeBase, ValueError)
try:
raise ValueError
except EXCEPTIONS:
pass
LAMBDA = lambda x: 1, 2
try:
pass
except LAMBDA: # [catching-non-exception]
pass
| SomeBase |
python | getsentry__sentry | tests/sentry/search/eap/test_spans.py | {
"start": 1078,
"end": 25769
} | class ____(TestCase):
def setUp(self) -> None:
self.resolver = SearchResolver(
params=SnubaParams(), config=SearchResolverConfig(), definitions=SPAN_DEFINITIONS
)
def test_simple_query(self) -> None:
where, having, _ = self.resolver.resolve_query("span.description:foo")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.raw_description", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
)
assert having is None
def test_negation(self) -> None:
where, having, _ = self.resolver.resolve_query("!span.description:foo")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.raw_description", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_NOT_EQUALS,
value=AttributeValue(val_str="foo"),
)
)
assert having is None
def test_numeric_query(self) -> None:
where, having, _ = self.resolver.resolve_query("ai.total_tokens.used:123")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="ai_total_tokens_used", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_int=123),
)
)
assert having is None
def test_in_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("span.description:[foo,bar,baz]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.raw_description", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_IN,
value=AttributeValue(val_str_array=StrArray(values=["foo", "bar", "baz"])),
)
)
assert having is None
def test_uuid_validation(self) -> None:
where, having, _ = self.resolver.resolve_query(f"id:{'f'*16}")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.item_id", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="f" * 16),
)
)
assert having is None
def test_invalid_uuid_validation(self) -> None:
with pytest.raises(InvalidSearchQuery):
self.resolver.resolve_query("id:hello")
def test_not_in_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("!span.description:[foo,bar,baz]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.raw_description", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_NOT_IN,
value=AttributeValue(val_str_array=StrArray(values=["foo", "bar", "baz"])),
)
)
assert having is None
def test_in_numeric_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("ai.total_tokens.used:[123,456,789]")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="ai_total_tokens_used", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_IN,
value=AttributeValue(val_int_array=IntArray(values=[123, 456, 789])),
)
)
assert having is None
def test_greater_than_numeric_filter(self) -> None:
where, having, _ = self.resolver.resolve_query("ai.total_tokens.used:>123")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="ai_total_tokens_used", type=AttributeKey.Type.TYPE_INT),
op=ComparisonFilter.OP_GREATER_THAN,
value=AttributeValue(val_int=123),
)
)
assert having is None
def test_timestamp_relative_filter(self) -> None:
with freeze_time("2018-12-11 10:20:00"):
where, having, _ = self.resolver.resolve_query("timestamp:-24h")
assert where == TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.timestamp", type=AttributeKey.Type.TYPE_DOUBLE),
op=ComparisonFilter.OP_GREATER_THAN_OR_EQUALS,
value=AttributeValue(
val_double=datetime.fromisoformat("2018-12-10 10:20:00+00:00").timestamp()
),
)
)
assert having is None
def test_query_with_and(self) -> None:
where, having, _ = self.resolver.resolve_query("span.description:foo span.op:bar")
assert where == TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.raw_description", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.op", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="bar"),
)
),
]
)
)
assert having is None
def test_query_with_or(self) -> None:
where, having, _ = self.resolver.resolve_query("span.description:foo or span.op:bar")
assert where == TraceItemFilter(
or_filter=OrFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.raw_description", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="sentry.op", type=AttributeKey.Type.TYPE_STRING),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="bar"),
)
),
]
)
)
assert having is None
def test_query_with_or_and_brackets(self) -> None:
where, having, _ = self.resolver.resolve_query(
"(span.description:123 and span.op:345) or (span.description:foo and span.op:bar)"
)
assert where == TraceItemFilter(
or_filter=OrFilter(
filters=[
TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.raw_description",
type=AttributeKey.Type.TYPE_STRING,
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="123"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.op", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="345"),
)
),
]
)
),
TraceItemFilter(
and_filter=AndFilter(
filters=[
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.raw_description",
type=AttributeKey.Type.TYPE_STRING,
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="foo"),
)
),
TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(
name="sentry.op", type=AttributeKey.Type.TYPE_STRING
),
op=ComparisonFilter.OP_EQUALS,
value=AttributeValue(val_str="bar"),
)
),
]
)
),
]
)
)
def test_empty_query(self) -> None:
where, having, _ = self.resolver.resolve_query("")
assert where is None
assert having is None
def test_none_query(self) -> None:
where, having, _ = self.resolver.resolve_query(None)
assert where is None
assert having is None
def test_simple_aggregate_query(self) -> None:
operators = [
("", AggregationComparisonFilter.OP_EQUALS),
(">", AggregationComparisonFilter.OP_GREATER_THAN),
(">=", AggregationComparisonFilter.OP_GREATER_THAN_OR_EQUALS),
("<", AggregationComparisonFilter.OP_LESS_THAN),
("<=", AggregationComparisonFilter.OP_LESS_THAN_OR_EQUALS),
]
for str_op, rpc_op in operators:
where, having, _ = self.resolver.resolve_query(f"count():{str_op}2")
assert where is None
assert having == AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=rpc_op,
val=2,
)
)
def test_simple_negation_aggregate_query(self) -> None:
operators = [
("", AggregationComparisonFilter.OP_NOT_EQUALS),
(">", AggregationComparisonFilter.OP_LESS_THAN_OR_EQUALS),
(">=", AggregationComparisonFilter.OP_LESS_THAN),
("<", AggregationComparisonFilter.OP_GREATER_THAN_OR_EQUALS),
("<=", AggregationComparisonFilter.OP_GREATER_THAN),
]
for str_op, rpc_op in operators:
where, having, _ = self.resolver.resolve_query(f"!count():{str_op}2")
assert where is None
assert having == AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=rpc_op,
val=2,
)
)
def test_aggregate_query_on_custom_attributes(self) -> None:
where, having, _ = self.resolver.resolve_query("avg(tags[foo,number]):>1000")
assert where is None
assert having == AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(name="foo", type=AttributeKey.Type.TYPE_DOUBLE),
label="avg(tags[foo,number])",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1000,
)
)
def test_aggregate_query_on_attributes_with_units(self) -> None:
for value in ["1000", "1s", "1000ms"]:
where, having, _ = self.resolver.resolve_query(f"avg(measurements.lcp):>{value}")
assert where is None
assert having == AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(name="lcp", type=AttributeKey.Type.TYPE_DOUBLE),
label="avg(measurements.lcp)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1000,
)
)
def test_aggregate_query_with_multiple_conditions(self) -> None:
where, having, _ = self.resolver.resolve_query("count():>1 avg(measurements.lcp):>3000")
assert where is None
assert having == AggregationFilter(
and_filter=AggregationAndFilter(
filters=[
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(
name="sentry.project_id", type=AttributeKey.Type.TYPE_INT
),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1,
),
),
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(name="lcp", type=AttributeKey.Type.TYPE_DOUBLE),
label="avg(measurements.lcp)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=3000,
),
),
],
)
)
def test_aggregate_query_with_multiple_conditions_explicit_and(self) -> None:
where, having, _ = self.resolver.resolve_query("count():>1 AND avg(measurements.lcp):>3000")
assert where is None
assert having == AggregationFilter(
and_filter=AggregationAndFilter(
filters=[
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(
name="sentry.project_id", type=AttributeKey.Type.TYPE_INT
),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1,
),
),
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(name="lcp", type=AttributeKey.Type.TYPE_DOUBLE),
label="avg(measurements.lcp)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=3000,
),
),
],
)
)
def test_aggregate_query_with_multiple_conditions_explicit_or(self) -> None:
where, having, _ = self.resolver.resolve_query("count():>1 or avg(measurements.lcp):>3000")
assert where is None
assert having == AggregationFilter(
or_filter=AggregationOrFilter(
filters=[
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(
name="sentry.project_id", type=AttributeKey.Type.TYPE_INT
),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1,
),
),
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(name="lcp", type=AttributeKey.Type.TYPE_DOUBLE),
label="avg(measurements.lcp)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=3000,
),
),
],
)
)
def test_aggregate_query_with_multiple_conditions_nested(self) -> None:
where, having, _ = self.resolver.resolve_query(
"(count():>1 AND avg(http.response_content_length):>3000) OR (count():>1 AND avg(measurements.lcp):>3000)"
)
assert where is None
assert having == AggregationFilter(
or_filter=AggregationOrFilter(
filters=[
AggregationFilter(
and_filter=AggregationAndFilter(
filters=[
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(
name="sentry.project_id",
type=AttributeKey.Type.TYPE_INT,
),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1,
),
),
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(
name="http.response_content_length",
type=AttributeKey.Type.TYPE_DOUBLE,
),
label="avg(http.response_content_length)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=3000,
),
),
],
)
),
AggregationFilter(
and_filter=AggregationAndFilter(
filters=[
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_COUNT,
key=AttributeKey(
name="sentry.project_id",
type=AttributeKey.Type.TYPE_INT,
),
label="count()",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=1,
),
),
AggregationFilter(
comparison_filter=AggregationComparisonFilter(
aggregation=AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(
name="lcp", type=AttributeKey.Type.TYPE_DOUBLE
),
label="avg(measurements.lcp)",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED,
),
op=AggregationComparisonFilter.OP_GREATER_THAN,
val=3000,
),
),
],
)
),
]
)
)
| SearchResolverQueryTest |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 19946,
"end": 25772
} | class ____(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from urllib.error import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib.request.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [([("http_open", "return self")], 500),
(["http_open"], 0)]:
class MockHandlerSubclass(MockHandler):
pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib.error.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, support.ALWAYS_EQ, 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assertIsInstance(args[0], Request)
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], Request)
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
if args[1] is not None:
self.assertIsInstance(args[1], MockResponse)
| OpenerDirectorTests |
python | docker__docker-py | tests/unit/auth_test.py | {
"start": 3116,
"end": 7847
} | class ____(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
auth_config = auth.AuthConfig({
'auths': auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
})
})
def test_resolve_authconfig_hostname_only(self):
assert auth.resolve_authconfig(
self.auth_config, 'my.registry.net'
)['username'] == 'privateuser'
def test_resolve_authconfig_no_protocol(self):
assert auth.resolve_authconfig(
self.auth_config, 'my.registry.net/v1/'
)['username'] == 'privateuser'
def test_resolve_authconfig_no_path(self):
assert auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net'
)['username'] == 'privateuser'
def test_resolve_authconfig_no_path_trailing_slash(self):
assert auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/'
)['username'] == 'privateuser'
def test_resolve_authconfig_no_path_wrong_secure_proto(self):
assert auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net'
)['username'] == 'privateuser'
def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
assert auth.resolve_authconfig(
self.auth_config, 'http://index.docker.io'
)['username'] == 'indexuser'
def test_resolve_authconfig_path_wrong_proto(self):
assert auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net/v1/'
)['username'] == 'privateuser'
def test_resolve_authconfig_default_registry(self):
assert auth.resolve_authconfig(
self.auth_config
)['username'] == 'indexuser'
def test_resolve_authconfig_default_explicit_none(self):
assert auth.resolve_authconfig(
self.auth_config, None
)['username'] == 'indexuser'
def test_resolve_authconfig_fully_explicit(self):
assert auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/v1/'
)['username'] == 'privateuser'
def test_resolve_authconfig_legacy_config(self):
assert auth.resolve_authconfig(
self.auth_config, 'legacy.registry.url'
)['username'] == 'legacyauth'
def test_resolve_authconfig_no_match(self):
assert auth.resolve_authconfig(
self.auth_config, 'does.not.exist'
) is None
def test_resolve_registry_and_auth_library_image(self):
image = 'image'
assert auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'] == 'indexuser'
def test_resolve_registry_and_auth_hub_image(self):
image = 'username/image'
assert auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'] == 'indexuser'
def test_resolve_registry_and_auth_explicit_hub(self):
image = 'docker.io/username/image'
assert auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'] == 'indexuser'
def test_resolve_registry_and_auth_explicit_legacy_hub(self):
image = 'index.docker.io/username/image'
assert auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'] == 'indexuser'
def test_resolve_registry_and_auth_private_registry(self):
image = 'my.registry.net/image'
assert auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'] == 'privateuser'
def test_resolve_registry_and_auth_unauthenticated_registry(self):
image = 'other.registry.net/image'
assert auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
) is None
def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
auth_config = auth.AuthConfig({
'auths': auth.parse_auth({
'https://index.docker.io/v1/': self.index_config,
}),
'credsStore': 'blackbox'
})
with mock.patch(
'docker.auth.AuthConfig._resolve_authconfig_credstore'
) as m:
m.return_value = None
assert 'indexuser' == auth.resolve_authconfig(
auth_config, None
)['username']
| ResolveAuthTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/requirements.py | {
"start": 789,
"end": 57326
} | class ____(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def create_table_as(self):
"""target platform supports CREATE TABLE AS SELECT."""
return exclusions.closed()
@property
def create_temp_table_as(self):
"""target platform supports CREATE TEMPORARY TABLE AS SELECT."""
return exclusions.closed()
@property
def table_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for tables."""
return exclusions.closed()
@property
def index_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for indexes."""
return exclusions.closed()
@property
def uuid_data_type(self):
"""Return databases that support the UUID datatype."""
return exclusions.closed()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def foreign_keys_reflect_as_index(self):
"""Target database creates an index that's reflected for
foreign keys."""
return exclusions.closed()
@property
def unique_index_reflect_as_unique_constraints(self):
"""Target database reflects unique indexes as unique constrains."""
return exclusions.closed()
@property
def unique_constraints_reflect_as_index(self):
"""Target database reflects unique constraints as indexes."""
return exclusions.closed()
@property
def table_value_constructor(self):
"""Database / dialect supports a query like:
.. sourcecode:: sql
SELECT * FROM VALUES ( (c1, c2), (c1, c2), ...)
AS some_table(col1, col2)
SQLAlchemy generates this with the :func:`_sql.values` function.
"""
return exclusions.closed()
@property
def standard_cursor_sql(self):
"""Target database passes SQL-92 style statements to cursor.execute()
when a statement like select() or insert() is run.
A very small portion of dialect-level tests will ensure that certain
conditions are present in SQL strings, and these tests use very basic
SQL that will work on any SQL-like platform in order to assert results.
It's normally a given for any pep-249 DBAPI that a statement like
"SELECT id, name FROM table WHERE some_table.id=5" will work.
However, there are dialects that don't actually produce SQL Strings
and instead may work with symbolic objects instead, or dialects that
aren't working with SQL, so for those this requirement can be marked
as excluded.
"""
return exclusions.open()
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled
or self.deferrable_fks.enabled
)
@property
def queue_pool(self):
"""target database is using QueuePool"""
def go(config):
return isinstance(config.db.pool, QueuePool)
return exclusions.only_if(go)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return exclusions.open()
@property
def unusual_column_name_characters(self):
"""target database allows column names that have unusual characters
in them, such as dots, spaces, slashes, or percent signs.
The column names are as always in such a case quoted, however the
DB still needs to support those characters in the name somehow.
"""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def sql_expression_limit_offset(self):
"""target database can render LIMIT and/or OFFSET with a complete
SQL expression, such as one that uses the addition operator.
parameter
"""
return exclusions.open()
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite.
"""
return exclusions.open()
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullable_booleans(self):
"""Target database allows boolean columns to store NULL."""
return exclusions.open()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column
expressions without being in the context of a typed column.
"""
return exclusions.open()
@property
def standalone_null_binds_whereclause(self):
"""target database/driver supports bound parameters with NULL in the
WHERE clause, in situations where it has to be typed.
"""
return exclusions.open()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def window_range(self):
"""Target backend supports RANGE in window functions with int frames"""
return exclusions.closed()
@property
def window_range_numeric(self):
"""Target backend supports numeric values in RANGE"""
return exclusions.closed()
@property
def window_range_non_numeric(self):
"""Target backend supports non-numeric values in RANGE"""
return exclusions.closed()
@property
def ctes(self):
"""Target database supports CTEs"""
return exclusions.closed()
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return exclusions.closed()
@property
def ctes_with_values(self):
"""target database supports CTES that ride on top of a VALUES
clause."""
return exclusions.closed()
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def group_by_complex_expression(self):
"""target platform supports SQL expressions in GROUP BY
e.g.
SELECT x + y AS somelabel FROM table GROUP BY x + y
"""
return exclusions.open()
@property
def sane_rowcount(self):
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_sane_rowcount,
"driver doesn't support 'sane' rowcount",
)
@property
def sane_multi_rowcount(self):
return exclusions.fails_if(
lambda config: not config.db.dialect.supports_sane_multi_rowcount,
"driver %(driver)s %(doesnt_support)s 'sane' multi row count",
)
@property
def sane_rowcount_w_returning(self):
return exclusions.fails_if(
lambda config: not (
config.db.dialect.supports_sane_rowcount_returning
),
"driver doesn't support 'sane' rowcount when returning is on",
)
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert
or config.db.dialect.supports_default_values
or config.db.dialect.supports_default_metavalue,
"empty inserts not supported",
)
@property
def empty_inserts_executemany(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent, within executemany()"""
return self.empty_inserts
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def delete_returning(self):
"""target platform supports DELETE ... RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.delete_returning,
"%(database)s %(does_support)s 'DELETE ... RETURNING'",
)
@property
def insert_returning(self):
"""target platform supports INSERT ... RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.insert_returning,
"%(database)s %(does_support)s 'INSERT ... RETURNING'",
)
@property
def update_returning(self):
"""target platform supports UPDATE ... RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.update_returning,
"%(database)s %(does_support)s 'UPDATE ... RETURNING'",
)
@property
def insert_executemany_returning(self):
"""target platform supports RETURNING when INSERT is used with
executemany(), e.g. multiple parameter sets, indicating
as many rows come back as do parameter sets were passed.
"""
return exclusions.only_if(
lambda config: config.db.dialect.insert_executemany_returning,
"%(database)s %(does_support)s 'RETURNING of "
"multiple rows with INSERT executemany'",
)
@property
def insertmanyvalues(self):
return exclusions.only_if(
lambda config: config.db.dialect.supports_multivalues_insert
and config.db.dialect.insert_returning
and config.db.dialect.use_insertmanyvalues,
"%(database)s %(does_support)s 'insertmanyvalues functionality",
)
@property
def tuple_in(self):
"""Target platform supports the syntax
"(x, y) IN ((x1, y1), (x2, y2), ...)"
"""
return exclusions.closed()
@property
def tuple_in_w_empty(self):
"""Target platform tuple IN w/ empty set"""
return self.tuple_in
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names.",
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts.",
)
@property
def implements_get_lastrowid(self):
"""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def arraysize(self):
"""dialect includes the required pep-249 attribute
``cursor.arraysize``"""
return exclusions.open()
@property
def emulated_lastrowid(self):
"""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def emulated_lastrowid_even_with_sequences(self):
"""target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes, even if the table has a
Sequence on it.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
"""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return only_on(lambda config: config.db.dialect.supports_schemas)
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema
foreign keys"""
return exclusions.closed()
@property
def foreign_key_constraint_name_reflection(self):
"""Target supports reflection of FOREIGN KEY constraints and
will return the name of the constraint that was used in the
"CONSTRAINT <name> FOREIGN KEY" DDL.
MySQL prior to version 8 and MariaDB prior to version 10.5
don't support this.
"""
return exclusions.closed()
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return exclusions.closed()
@property
def default_schema_name_switch(self):
"""target dialect implements provisioning module including
set_default_schema_on_connection"""
return exclusions.closed()
@property
def server_side_cursors(self):
"""Target dialect must support server side cursors."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_server_side_cursors],
"no server side cursors support",
)
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if(
[lambda config: config.db.dialect.supports_sequences],
"no sequence support",
)
@property
def no_sequences(self):
"""the opposite of "sequences", DB does not support sequences at
all."""
return exclusions.NotPredicate(self.sequences)
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if(
[
lambda config: config.db.dialect.supports_sequences
and config.db.dialect.sequences_optional
],
"no sequence support, or sequences not optional",
)
@property
def supports_lastrowid(self):
"""target database / driver supports cursor.lastrowid as a means
of retrieving the last inserted primary key value.
note that if the target DB supports sequences also, this is still
assumed to work. This is a new use case brought on by MariaDB 10.3.
"""
return exclusions.only_if(
[lambda config: config.db.dialect.postfetch_lastrowid]
)
@property
def no_lastrowid_support(self):
"""the opposite of supports_lastrowid"""
return exclusions.only_if(
[lambda config: not config.db.dialect.postfetch_lastrowid]
)
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
"""target database has general support for table reflection"""
return exclusions.open()
@property
def reflect_tables_no_columns(self):
"""target database supports creation and reflection of tables with no
columns, or at least tables that seem to have no columns."""
return exclusions.closed()
@property
def temp_table_comment_reflection(self):
"""indicates if database supports comments on temp tables and
the dialect can reflect them"""
return exclusions.closed()
@property
def comment_reflection(self):
"""Indicates if the database support table comment reflection"""
return exclusions.closed()
@property
def comment_reflection_full_unicode(self):
"""Indicates if the database support table comment reflection in the
full unicode range, including emoji etc.
"""
return exclusions.closed()
@property
def constraint_comment_reflection(self):
"""indicates if the database support comments on constraints
and their reflection"""
return exclusions.closed()
@property
def column_collation_reflection(self):
"""Indicates if the database support column collation reflection.
This requirement also uses ``get_order_by_collation`` to get
an available collation.
"""
return exclusions.closed()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW
definition."""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def schema_create_delete(self):
"""target database supports schema create and dropped with
'CREATE SCHEMA' and 'DROP SCHEMA'"""
return exclusions.closed()
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return exclusions.closed()
@property
def fk_constraint_option_reflection_ondelete_restrict(self):
return exclusions.closed()
@property
def fk_constraint_option_reflection_ondelete_noaction(self):
return exclusions.closed()
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return exclusions.closed()
@property
def fk_constraint_option_reflection_onupdate_restrict(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_reflect_indexes(self):
return self.temp_table_reflection
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def has_temp_table(self):
"""target dialect supports checking a single temp table name"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def create_or_replace_view(self):
"""target database supports CREATE OR REPLACE VIEW"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def index_reflects_included_columns(self):
return exclusions.closed()
@property
def indexes_with_ascdesc(self):
"""target database supports CREATE INDEX with per-column ASC/DESC."""
return exclusions.open()
@property
def reflect_indexes_with_ascdesc(self):
"""target database supports reflecting INDEX with per-column
ASC/DESC."""
return exclusions.open()
@property
def reflect_indexes_with_ascdesc_as_expression(self):
"""target database supports reflecting INDEX with per-column
ASC/DESC but reflects them as expressions (like oracle)."""
return exclusions.closed()
@property
def indexes_check_column_order(self):
"""target database supports CREATE INDEX with column order check."""
return exclusions.closed()
@property
def indexes_with_expressions(self):
"""target database supports CREATE INDEX against SQL expressions."""
return exclusions.closed()
@property
def reflect_indexes_with_expressions(self):
"""target database supports reflection of indexes with
SQL expressions."""
return exclusions.closed()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def inline_check_constraint_reflection(self):
"""target dialect supports reflection of inline check constraints"""
return exclusions.closed()
@property
def check_constraint_reflection(self):
"""target dialect supports reflection of check constraints"""
return exclusions.closed()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def nvarchar_types(self):
"""target database supports NVARCHAR and NCHAR as an actual datatype"""
return exclusions.closed()
@property
def unicode_data_no_special_types(self):
"""Target database/dialect can receive / deliver / compare data with
non-ASCII characters in plain VARCHAR, TEXT columns, without the need
for special "national" datatypes like NVARCHAR or similar.
"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def symbol_names_w_double_quote(self):
"""Target driver can create tables with a name like 'some " table'"""
return exclusions.open()
@property
def datetime_interval(self):
"""target dialect supports rendering of a datetime.timedelta as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_timezone(self):
"""target dialect supports representation of Python
datetime.datetime() with tzinfo with DateTime(timezone=True)."""
return exclusions.closed()
@property
def time_timezone(self):
"""target dialect supports representation of Python
datetime.time() with tzinfo with Time(timezone=True)."""
return exclusions.closed()
@property
def date_implicit_bound(self):
"""target dialect when given a date object will bind it such
that the database server knows the object is a date, and not
a plain string.
"""
return exclusions.open()
@property
def time_implicit_bound(self):
"""target dialect when given a time object will bind it such
that the database server knows the object is a time, and not
a plain string.
"""
return exclusions.open()
@property
def datetime_implicit_bound(self):
"""target dialect when given a datetime object will bind it such
that the database server knows the object is a datetime, and not
a plain string.
"""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return exclusions.closed()
@property
def timestamp_microseconds_implicit_bound(self):
"""target dialect when given a datetime object which also includes
a microseconds portion when using the TIMESTAMP data type
will bind it such that the database server knows
the object is a datetime with microseconds, and not a plain string.
"""
return self.timestamp_microseconds
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like:
.. sourcecode:: sql
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return exclusions.closed()
@property
def skip_autocommit_rollback(self):
"""target dialect supports the detect_autocommit_setting() method and
uses the default implementation of do_rollback()"""
return exclusions.closed()
@property
def isolation_level(self):
"""target dialect supports general isolation level settings.
Note that this requirement, when enabled, also requires that
the get_isolation_levels() method be implemented.
"""
return exclusions.closed()
def get_isolation_levels(self, config):
"""Return a structure of supported isolation levels for the current
testing dialect.
The structure indicates to the testing suite what the expected
"default" isolation should be, as well as the other values that
are accepted. The dictionary has two keys, "default" and "supported".
The "supported" key refers to a list of all supported levels and
it should include AUTOCOMMIT if the dialect supports it.
If the :meth:`.DefaultRequirements.isolation_level` requirement is
not open, then this method has no return value.
E.g.::
>>> testing.requirements.get_isolation_levels()
{
"default": "READ_COMMITTED",
"supported": [
"SERIALIZABLE", "READ UNCOMMITTED",
"READ COMMITTED", "REPEATABLE READ",
"AUTOCOMMIT"
]
}
"""
with config.db.connect() as conn:
try:
supported = conn.dialect.get_isolation_level_values(
conn.connection.dbapi_connection
)
except NotImplementedError:
return None
else:
return {
"default": conn.dialect.default_isolation_level,
"supported": supported,
}
@property
def get_isolation_level_values(self):
"""target dialect supports the
:meth:`_engine.Dialect.get_isolation_level_values`
method added in SQLAlchemy 2.0.
"""
def go(config):
with config.db.connect() as conn:
try:
conn.dialect.get_isolation_level_values(
conn.connection.dbapi_connection
)
except NotImplementedError:
return False
else:
return True
return exclusions.only_if(go)
@property
def dialect_level_isolation_level_param(self):
"""test that the dialect allows the 'isolation_level' argument
to be handled by DefaultDialect"""
def go(config):
try:
e = create_engine(
config.db.url, isolation_level="READ COMMITTED"
)
except:
return False
else:
return (
e.dialect._on_connect_isolation_level == "READ COMMITTED"
)
return exclusions.only_if(go)
@property
def array_type(self):
"""Target platform implements a native ARRAY type"""
return exclusions.closed()
@property
def json_type(self):
"""target platform implements a native JSON type."""
return exclusions.closed()
@property
def json_array_indexes(self):
"""target platform supports numeric array indexes
within a JSON structure"""
return self.json_type
@property
def json_index_supplementary_unicode_element(self):
return exclusions.open()
@property
def legacy_unconditional_json_extract(self):
"""Backend has a JSON_EXTRACT or similar function that returns a
valid JSON string in all cases.
Used to test a legacy feature and is not needed.
"""
return exclusions.closed()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.open()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def cast_precision_numerics_many_significant_digits(self):
"""same as precision_numerics_many_significant_digits but within the
context of a CAST statement (hello MySQL)
"""
return self.precision_numerics_many_significant_digits
@property
def server_defaults(self):
"""Target backend supports server side defaults for columns"""
return exclusions.closed()
@property
def expression_server_defaults(self):
"""Target backend supports server side defaults with SQL expressions
for columns"""
return exclusions.closed()
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(select(literal(expr)))
assert value == expr
See :ticket:`4036`
"""
return exclusions.open()
@property
def numeric_received_as_decimal_untyped(self):
"""target backend will return result columns that are explicitly
against NUMERIC or similar precision-numeric datatypes (not including
FLOAT or INT types) as Python Decimal objects, and not as floats
or ints, including when no SQLAlchemy-side typing information is
associated with the statement (e.g. such as a raw SQL string).
This should be enabled if either the DBAPI itself returns Decimal
objects, or if the dialect has set up DBAPI-specific return type
handlers such that Decimal objects come back automatically.
"""
return exclusions.open()
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate
"""
return exclusions.open()
@property
def aggregate_order_by(self):
"""target database can use ORDER BY or equivalent in an aggregate
function, and dialect supports aggregate_order_by().
"""
return exclusions.closed()
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key
"""
return exclusions.open()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def infinity_floats(self):
"""The Float type can persist and load float('inf'), float('-inf')."""
return exclusions.closed()
@property
def float_or_double_precision_behaves_generically(self):
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def literal_float_coercion(self):
"""target backend will return the exact float value 15.7563
with only four significant digits from this statement:
SELECT :param
where :param is the Python float 15.7563
i.e. it does not return 15.75629997253418
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def float_is_numeric(self):
"""target backend uses Numeric for Float/Dual"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
.. sourcecode:: sql
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g.:
.. sourcecode:: sql
(SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
"""
return exclusions.open()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this:
.. sourcecode:: sql
select data as foo from test order by foo || 'bar'
Lots of databases including PostgreSQL don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def order_by_collation(self):
def check(config):
try:
self.get_order_by_collation(config)
return False
except NotImplementedError:
return True
return exclusions.skip_if(check)
def get_order_by_collation(self, config):
raise NotImplementedError()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
return exclusions.open()
@property
def independent_readonly_connections(self):
"""
Target must support simultaneous, independent database connections
that will be used in a readonly fashion.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
No longer used in any tests; is a no-op
"""
return exclusions.open()
@property
def no_windows(self):
return exclusions.skip_if(self._running_on_windows())
def _running_on_windows(self):
return exclusions.LambdaPredicate(
lambda: platform.system() == "Windows",
description="running on Windows",
)
@property
def only_linux(self):
return exclusions.only_if(self._running_on_linux())
def _running_on_linux(self):
return exclusions.LambdaPredicate(
lambda: platform.system() == "Linux",
description="running on Linux",
)
@property
def timing_intensive(self):
from . import config
return config.add_to_marker.timing_intensive
@property
def posix(self):
return exclusions.skip_if(lambda: os.name != "posix")
@property
def memory_intensive(self):
from . import config
return config.add_to_marker.memory_intensive
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Stability issues with coverage",
)
@property
def sqlalchemy2_stubs(self):
def check(config):
try:
__import__("sqlalchemy-stubs.ext.mypy")
except ImportError:
return False
else:
return True
return exclusions.only_if(check)
@property
def no_sqlalchemy2_stubs(self):
def check(config):
try:
__import__("sqlalchemy-stubs.ext.mypy")
except ImportError:
return False
else:
return True
return exclusions.skip_if(check)
@property
def up_to_date_typealias_type(self):
# this checks a particular quirk found in typing_extensions <=4.12.0
# using older python versions like 3.10 or 3.9, we use TypeAliasType
# from typing_extensions which does not provide for sufficient
# introspection prior to 4.13.0
def check(config):
import typing
import typing_extensions
TypeAliasType = getattr(
typing, "TypeAliasType", typing_extensions.TypeAliasType
)
TV = typing.TypeVar("TV")
TA_generic = TypeAliasType( # type: ignore
"TA_generic", typing.List[TV], type_params=(TV,)
)
return hasattr(TA_generic[int], "__value__")
return exclusions.only_if(check)
@property
def python311(self):
return exclusions.only_if(
lambda: util.py311, "Python 3.11 or above required"
)
@property
def python312(self):
return exclusions.only_if(
lambda: util.py312, "Python 3.12 or above required"
)
@property
def python314(self):
return exclusions.only_if(
lambda: util.py314, "Python 3.14 or above required"
)
@property
def fail_python314b1(self):
return exclusions.fails_if(
lambda: util.compat.py314b1, "Fails as of python 3.14.0b1"
)
@property
def not_python314(self):
"""This requirement is interim to assist with backporting of
issue #12405.
SQLAlchemy 2.0 still includes the ``await_fallback()`` method that
makes use of ``asyncio.get_event_loop_policy()``. This is removed
in SQLAlchemy 2.1.
"""
return exclusions.skip_if(
lambda: util.py314, "Python 3.14 or above not supported"
)
@property
def cpython(self):
return exclusions.only_if(
lambda: util.cpython, "cPython interpreter needed"
)
@property
def gil_enabled(self):
return exclusions.only_if(
lambda: not util.freethreading, "GIL-enabled build needed"
)
@property
def is64bit(self):
return exclusions.only_if(lambda: util.is64bit, "64bit required")
@property
def patch_library(self):
def check_lib():
try:
__import__("patch")
except ImportError:
return False
else:
return True
return exclusions.only_if(check_lib, "patch library needed")
@property
def predictable_gc(self):
"""target platform must remove all cycles unconditionally when
gc.collect() is called, as well as clean out unreferenced subclasses.
"""
return self.cpython + self.gil_enabled
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as PostgreSQL notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled",
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not util.has_compiled_ext(),
"Cython extensions not installed",
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine("sqlite://")
return True
except ImportError:
return False
@property
def async_dialect(self):
"""dialect makes use of await_() to invoke operations on the
DBAPI."""
return exclusions.closed()
@property
def asyncio(self):
return self.greenlet
@property
def no_greenlet(self):
def go(config):
try:
import greenlet # noqa: F401
except ImportError:
return True
else:
return False
return exclusions.only_if(go)
@property
def greenlet(self):
def go(config):
if not _test_asyncio.ENABLE_ASYNCIO:
return False
try:
import greenlet # noqa: F401
except ImportError:
return False
else:
return True
return exclusions.only_if(go)
@property
def computed_columns(self):
"Supports computed columns"
return exclusions.closed()
@property
def computed_columns_stored(self):
"Supports computed columns with `persisted=True`"
return exclusions.closed()
@property
def computed_columns_virtual(self):
"Supports computed columns with `persisted=False`"
return exclusions.closed()
@property
def computed_columns_default_persisted(self):
"""If the default persistence is virtual or stored when `persisted`
is omitted"""
return exclusions.closed()
@property
def computed_columns_reflect_persisted(self):
"""If persistence information is returned by the reflection of
computed columns"""
return exclusions.closed()
@property
def supports_distinct_on(self):
"""If a backend supports the DISTINCT ON in a select"""
return exclusions.closed()
@property
def supports_is_distinct_from(self):
"""Supports some form of "x IS [NOT] DISTINCT FROM y" construct.
Different dialects will implement their own flavour, e.g.,
sqlite will emit "x IS NOT y" instead of "x IS DISTINCT FROM y".
.. seealso::
:meth:`.ColumnOperators.is_distinct_from`
"""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_is_distinct_from,
"driver doesn't support an IS DISTINCT FROM construct",
)
@property
def identity_columns(self):
"""If a backend supports GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY"""
return exclusions.closed()
@property
def identity_columns_standard(self):
"""If a backend supports GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY with a standard syntax.
This is mainly to exclude MSSql.
"""
return exclusions.closed()
@property
def regexp_match(self):
"""backend supports the regexp_match operator."""
return exclusions.closed()
@property
def regexp_replace(self):
"""backend supports the regexp_replace operator."""
return exclusions.closed()
@property
def fetch_first(self):
"""backend supports the fetch first clause."""
return exclusions.closed()
@property
def fetch_percent(self):
"""backend supports the fetch first clause with percent."""
return exclusions.closed()
@property
def fetch_ties(self):
"""backend supports the fetch first clause with ties."""
return exclusions.closed()
@property
def fetch_no_order_by(self):
"""backend supports the fetch first without order by"""
return exclusions.closed()
@property
def fetch_offset_with_options(self):
"""backend supports the offset when using fetch first with percent
or ties. basically this is "not mssql"
"""
return exclusions.closed()
@property
def fetch_expression(self):
"""backend supports fetch / offset with expression in them, like
SELECT * FROM some_table
OFFSET 1 + 1 ROWS FETCH FIRST 1 + 1 ROWS ONLY
"""
return exclusions.closed()
@property
def autoincrement_without_sequence(self):
"""If autoincrement=True on a column does not require an explicit
sequence. This should be false only for oracle.
"""
return exclusions.open()
@property
def generic_classes(self):
"If X[Y] can be implemented with ``__class_getitem__``. py3.7+"
return exclusions.open()
@property
def json_deserializer_binary(self):
"indicates if the json_deserializer function is called with bytes"
return exclusions.closed()
@property
def reflect_table_options(self):
"""Target database must support reflecting table_options."""
return exclusions.closed()
@property
def materialized_views(self):
"""Target database must support MATERIALIZED VIEWs."""
return exclusions.closed()
@property
def materialized_views_reflect_pk(self):
"""Target database reflect MATERIALIZED VIEWs pks."""
return exclusions.closed()
@property
def supports_bitwise_or(self):
"""Target database supports bitwise or"""
return exclusions.closed()
@property
def supports_bitwise_and(self):
"""Target database supports bitwise and"""
return exclusions.closed()
@property
def supports_bitwise_not(self):
"""Target database supports bitwise not"""
return exclusions.closed()
@property
def supports_bitwise_xor(self):
"""Target database supports bitwise xor"""
return exclusions.closed()
@property
def supports_bitwise_shift(self):
"""Target database supports bitwise left or right shift"""
return exclusions.closed()
@property
def like_escapes(self):
"""Target backend supports custom ESCAPE characters
with LIKE comparisons"""
return exclusions.open()
| SuiteRequirements |
python | pypa__warehouse | tests/unit/api/test_simple.py | {
"start": 2592,
"end": 6739
} | class ____:
@pytest.mark.parametrize(
("content_type", "renderer_override"),
CONTENT_TYPE_PARAMS,
)
def test_no_results_no_serial(self, db_request, content_type, renderer_override):
db_request.accept = content_type
assert simple.simple_index(db_request) == {
"meta": {"_last-serial": 0, "api-version": API_VERSION},
"projects": [],
}
assert db_request.response.headers["X-PyPI-Last-Serial"] == "0"
assert db_request.response.content_type == content_type
_assert_has_cors_headers(db_request.response.headers)
if renderer_override is not None:
assert db_request.override_renderer == renderer_override
@pytest.mark.parametrize(
("content_type", "renderer_override"),
CONTENT_TYPE_PARAMS,
)
def test_no_results_with_serial(self, db_request, content_type, renderer_override):
db_request.accept = content_type
user = UserFactory.create()
je = JournalEntryFactory.create(submitted_by=user)
assert simple.simple_index(db_request) == {
"meta": {"_last-serial": je.id, "api-version": API_VERSION},
"projects": [],
}
assert db_request.response.headers["X-PyPI-Last-Serial"] == str(je.id)
assert db_request.response.content_type == content_type
_assert_has_cors_headers(db_request.response.headers)
if renderer_override is not None:
assert db_request.override_renderer == renderer_override
@pytest.mark.parametrize(
("content_type", "renderer_override"),
CONTENT_TYPE_PARAMS,
)
def test_with_results_no_serial(self, db_request, content_type, renderer_override):
db_request.accept = content_type
projects = [(x.name, x.normalized_name) for x in ProjectFactory.create_batch(3)]
assert simple.simple_index(db_request) == {
"meta": {"_last-serial": 0, "api-version": API_VERSION},
"projects": [
{"name": x[0], "_last-serial": 0}
for x in sorted(projects, key=lambda x: x[1])
],
}
assert db_request.response.headers["X-PyPI-Last-Serial"] == "0"
assert db_request.response.content_type == content_type
_assert_has_cors_headers(db_request.response.headers)
if renderer_override is not None:
assert db_request.override_renderer == renderer_override
@pytest.mark.parametrize(
("content_type", "renderer_override"),
CONTENT_TYPE_PARAMS,
)
def test_with_results_with_serial(
self, db_request, content_type, renderer_override
):
db_request.accept = content_type
projects = [(x.name, x.normalized_name) for x in ProjectFactory.create_batch(3)]
user = UserFactory.create()
je = JournalEntryFactory.create(submitted_by=user)
assert simple.simple_index(db_request) == {
"meta": {"_last-serial": je.id, "api-version": API_VERSION},
"projects": [
{"name": x[0], "_last-serial": 0}
for x in sorted(projects, key=lambda x: x[1])
],
}
assert db_request.response.headers["X-PyPI-Last-Serial"] == str(je.id)
assert db_request.response.content_type == content_type
_assert_has_cors_headers(db_request.response.headers)
if renderer_override is not None:
assert db_request.override_renderer == renderer_override
def test_quarantined_project_omitted_from_index(self, db_request):
db_request.accept = "text/html"
ProjectFactory.create(name="foo")
ProjectFactory.create(name="bar", lifecycle_status="quarantine-enter")
assert simple.simple_index(db_request) == {
"meta": {"_last-serial": 0, "api-version": API_VERSION},
"projects": [{"name": "foo", "_last-serial": 0}],
}
assert db_request.response.headers["X-PyPI-Last-Serial"] == "0"
assert db_request.response.content_type == "text/html"
_assert_has_cors_headers(db_request.response.headers)
| TestSimpleIndex |
python | falconry__falcon | falcon/bench/nuts/nuts/controllers/root.py | {
"start": 472,
"end": 611
} | class ____:
@expose()
def _lookup(self, account_id, *remainder):
return TestController(account_id), remainder
| HelloController |
python | huggingface__transformers | tests/models/camembert/test_modeling_camembert.py | {
"start": 958,
"end": 2905
} | class ____(unittest.TestCase):
@slow
def test_output_embeds_base_model(self):
model = CamembertModel.from_pretrained("almanach/camembert-base", attn_implementation="eager")
model.to(torch_device)
input_ids = torch.tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]],
device=torch_device,
dtype=torch.long,
) # J'aime le camembert !
with torch.no_grad():
output = model(input_ids)["last_hidden_state"]
expected_shape = torch.Size((1, 10, 768))
self.assertEqual(output.shape, expected_shape)
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],
device=torch_device,
dtype=torch.float,
)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_output_embeds_base_model_sdpa(self):
input_ids = torch.tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]],
device=torch_device,
dtype=torch.long,
) # J'aime le camembert !
expected_slice = torch.tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]],
device=torch_device,
dtype=torch.float,
)
model = CamembertModel.from_pretrained("almanach/camembert-base", attn_implementation="sdpa").to(torch_device)
with torch.no_grad():
output = model(input_ids)["last_hidden_state"].detach()
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| CamembertModelIntegrationTest |
python | vyperlang__vyper | vyper/venom/check_venom.py | {
"start": 845,
"end": 1231
} | class ____(VenomError):
message: str = "function has inconsistent return arity"
def __init__(self, function: IRFunction, arities: set[int]):
self.function = function
self.arities = arities
def __str__(self):
return (
f"function {self.function.name} has inconsistent 'ret' arities: {sorted(self.arities)}"
)
| InconsistentReturnArity |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 11458,
"end": 11598
} | class ____(sgqlc.types.Scalar):
"""
See source code for more info.
"""
__schema__ = graphql_schema
ID = sgqlc.types.ID
| HTML |
python | pytorch__pytorch | torch/utils/benchmark/utils/timer.py | {
"start": 720,
"end": 2142
} | class ____:
def __init__(
self,
stmt: str,
setup: str,
global_setup: str,
timer: Callable[[], float],
globals: dict[str, Any],
) -> None:
if timer is not timeit.default_timer:
raise NotImplementedError(
"PyTorch was built with accelerators and an accelerator is present; however "
"Timer does not yet support accelerator measurements. If your "
"code is CPU only, pass `timer=timeit.default_timer` to the "
"Timer's constructor to indicate this. (Note that this will "
"produce incorrect results if an accelerator is in fact used, as "
"Timer will not synchronize the accelerator.)"
)
if globals:
raise ValueError("C++ timing does not support globals.")
self._stmt: str = textwrap.dedent(stmt)
self._setup: str = textwrap.dedent(setup)
self._global_setup: str = textwrap.dedent(global_setup)
self._timeit_module: TimeitModuleType | None = None
def timeit(self, number: int) -> float:
if self._timeit_module is None:
self._timeit_module = cpp_jit.compile_timeit_template(
stmt=self._stmt,
setup=self._setup,
global_setup=self._global_setup,
)
return self._timeit_module.timeit(number)
| CPPTimer |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 3509,
"end": 3633
} | class ____(PoolError):
"""Raised when a request enters a pool after the pool has been closed."""
pass
| ClosedPoolError |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/ray.py | {
"start": 3306,
"end": 9798
} | class ____(RayBaseOperator):
"""
Create a Ray cluster on the Vertex AI.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param head_node_type: The head node resource. Resources.node_count must be 1. If not set, default
value of Resources() class will be used.
:param python_version: Required. Python version for the ray cluster.
:param ray_version: Required. Ray version for the ray cluster.
Currently only 3 version are available: 2.9.3, 2.33, 2.42. For more information please refer to
https://github.com/googleapis/python-aiplatform/blob/main/setup.py#L101
:param network: Virtual private cloud (VPC) network. For Ray Client, VPC peering is required to
connect to the Ray Cluster managed in the Vertex API service. For Ray Job API, VPC network is not
required because Ray Cluster connection can be accessed through dashboard address.
:param service_account: Service account to be used for running Ray programs on the cluster.
:param cluster_name: This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`.
The first character cannot be a number or hyphen.
:param worker_node_types: The list of Resources of the worker nodes. The same Resources object should
not appear multiple times in the list.
:param custom_images: The NodeImages which specifies head node and worker nodes images. All the
workers will share the same image. If each Resource has a specific custom image, use
`Resources.custom_image` for head/worker_node_type(s). Note that configuring
`Resources.custom_image` will override `custom_images` here. Allowlist only.
:param enable_metrics_collection: Enable Ray metrics collection for visualization.
:param enable_logging: Enable exporting Ray logs to Cloud Logging.
:param psc_interface_config: PSC-I config.
:param reserved_ip_ranges: A list of names for the reserved IP ranges under the VPC network that can
be used for this cluster. If set, we will deploy the cluster within the provided IP ranges.
Otherwise, the cluster is deployed to any IP ranges under the provided VPC network.
Example: ["vertex-ai-ip-range"].
:param labels: The labels with user-defined metadata to organize Ray cluster.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are allowed.
See https://goo.gl/xmQnxf for more information and examples of labels.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"head_node_type", "worker_node_types"} | set(RayBaseOperator.template_fields)
)
operator_extra_links = (VertexAIRayClusterLink(),)
def __init__(
self,
python_version: str,
ray_version: Literal["2.9.3", "2.33", "2.42"],
head_node_type: resources.Resources = resources.Resources(),
network: str | None = None,
service_account: str | None = None,
cluster_name: str | None = None,
worker_node_types: list[resources.Resources] | None = None,
custom_images: resources.NodeImages | None = None,
enable_metrics_collection: bool = True,
enable_logging: bool = True,
psc_interface_config: resources.PscIConfig | None = None,
reserved_ip_ranges: list[str] | None = None,
labels: dict[str, str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.head_node_type = head_node_type
self.python_version = python_version
self.ray_version = ray_version
self.network = network
self.service_account = service_account
self.cluster_name = cluster_name
self.worker_node_types = worker_node_types
self.custom_images = custom_images
self.enable_metrics_collection = enable_metrics_collection
self.enable_logging = enable_logging
self.psc_interface_config = psc_interface_config
self.reserved_ip_ranges = reserved_ip_ranges
self.labels = labels
def execute(self, context: Context):
self.log.info("Creating a Ray cluster.")
try:
cluster_path = self.hook.create_ray_cluster(
project_id=self.project_id,
location=self.location,
head_node_type=self.head_node_type,
python_version=self.python_version,
ray_version=self.ray_version,
network=self.network,
service_account=self.service_account,
cluster_name=self.cluster_name,
worker_node_types=self.worker_node_types,
custom_images=self.custom_images,
enable_metrics_collection=self.enable_metrics_collection,
enable_logging=self.enable_logging,
psc_interface_config=self.psc_interface_config,
reserved_ip_ranges=self.reserved_ip_ranges,
labels=self.labels,
)
cluster_id = self.hook.extract_cluster_id(cluster_path)
context["ti"].xcom_push(
key="cluster_id",
value=cluster_id,
)
VertexAIRayClusterLink.persist(
context=context, location=self.location, cluster_id=cluster_id, project_id=self.project_id
)
self.log.info("Ray cluster was created.")
except Exception as error:
raise AirflowException(error)
return cluster_path
| CreateRayClusterOperator |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qactivation_test.py | {
"start": 1695,
"end": 2467
} | class ____(op_bench.TorchBenchmarkBase):
r"""Base class for all the activations."""
def _setup(self, dims, contig, dtype):
# Input
f_input = (torch.rand(*dims) - 0.5) * 256
self.scale = 1.0
self.zero_point = 0
# Quantize the tensor
q_input = torch.quantize_per_tensor(
f_input, scale=self.scale, zero_point=self.zero_point, dtype=dtype
)
if not contig:
# Make non-contiguous
new_shape = list(range(q_input.ndim))[::-1]
q_input = q_input.permute(new_shape)
self.inputs = {"q_input": q_input}
def init(self, dims, contig, inplace, dtype, op_func):
self._setup(dims, contig, dtype)
self.qop = op_func
| QActivationBenchmarkBase |
python | tensorflow__tensorflow | tensorflow/python/training/checkpoint_utils_test.py | {
"start": 2867,
"end": 17155
} | class ____(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testFSPath(self):
checkpoint_dir = pathlib.Path(self.get_temp_dir())
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir) # pylint: disable=unused-variable
reader = checkpoint_utils.load_checkpoint(checkpoint_dir)
self.assertAllEqual(reader.get_tensor("var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
my3b = variable_scope.get_variable("my3b", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, [
("var2", "some_scope/some_other_scope/my2"),
("var3", my3),
("var3", my3b),
])
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my3b.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 32000)
def testInitialValueComesFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope(
"some_scope", initializer=init_ops.zeros_initializer()):
my1 = variable_scope.get_variable("my1", [1, 10])
before = my1.initialized_value()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
after = my1.initialized_value()
self.assertAllEqual(session.run(before), [[0.0] * 10])
self.assertAllEqual(session.run(after), v1)
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
self.assertAllEqual(session.run(my1.initialized_value()), v1)
self.assertAllClose(session.run(before), v1)
self.assertAllClose(session.run(after), v1)
with self.assertRaises(AssertionError):
self.assertAllClose(v1, [[0.0] * 10])
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
with variable_scope.variable_scope("useful_scope_1"):
my5_init = [[1.0, 2.0], [3.0, 4.0]]
my5 = variable_scope.get_variable("var5", initializer=my5_init)
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
with self.session(graph=g) as session:
session.run(variables.global_variables_initializer())
self.assertAllEqual(my4.eval(session), v4)
self.assertAllEqual(my5.eval(session), my5_init)
def testRestoreRunsOnSameDevice(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default():
with ops.device("/job:ps"):
with variable_scope.variable_scope("useful_scope"):
variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
# Create another variable with different partitions than the variable in
# the checkpoint.
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=16 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
# Verify we created different number of partitions.
self.assertNotEqual(len(my2_values), len(v1))
# Verify the values were correctly initialized inspite of different
# partitions.
full_my2_values = np.concatenate(my2_values, axis=0)
full_v1_values = np.concatenate(v1, axis=0)
self.assertAllEqual(full_my2_values, full_v1_values)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
def testNoAdditionalReadOpsForResourceVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = resource_variable_ops.ResourceVariable([[0.0] * 10], name="my1")
with ops.name_scope("init_from_checkpoint"):
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
# Basic sanity checks:
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
ops_in_init_from_checkpoint_scope = [
op for op in g.get_operations()
if (op.name.startswith("init_from_checkpoint/") and
not op.name.startswith("init_from_checkpoint/checkpoint_initializer"
) and
op.type != "AssignVariableOp" and
op.type != "Identity")
]
self.assertEqual(ops_in_init_from_checkpoint_scope, [])
@test_util.run_all_in_graph_and_eager_modes
| CheckpointsTest |
python | ApeWorX__ape | src/ape/types/events.py | {
"start": 944,
"end": 2851
} | class ____(BaseModel):
addresses: list[AddressType] = []
events: list[EventABI] = []
topic_filter: TopicFilter = []
start_block: int = 0
stop_block: Optional[int] = None # Use block height
selectors: dict[str, EventABI] = {}
@model_validator(mode="before")
@classmethod
def compute_selectors(cls, values):
values["selectors"] = {
encode_hex(keccak(text=event.selector)): event for event in values.get("events") or []
}
return values
@field_validator("start_block", mode="before")
@classmethod
def validate_start_block(cls, value):
return value or 0
@field_validator("addresses", "events", "topic_filter", mode="before")
@classmethod
def _convert_none_to_empty_list(cls, value):
return value or []
@field_validator("selectors", mode="before")
@classmethod
def _convert_none_to_dict(cls, value):
return value or {}
def model_dump(self, *args, **kwargs):
return FilterParams(
address=self.addresses,
fromBlock=to_hex(self.start_block),
toBlock=to_hex(self.stop_block or self.start_block),
topics=self.topic_filter, # type: ignore
)
@classmethod
def from_event(
cls,
event: Union[EventABI, "ContractEvent"],
search_topics: Optional[dict[str, Any]] = None,
addresses: Optional[list[AddressType]] = None,
start_block=None,
stop_block=None,
):
"""
Construct a log filter from an event topic query.
"""
abi = getattr(event, "abi", event)
topic_filter = encode_topics(abi, search_topics or {})
return cls(
addresses=addresses or [],
events=[abi],
topic_filter=topic_filter,
start_block=start_block,
stop_block=stop_block,
)
| LogFilter |
python | cloudpipe__cloudpickle | tests/mock_local_folder/mod.py | {
"start": 476,
"end": 607
} | class ____:
def method(self):
return "hello from a class importable locally"
LocalT = typing.TypeVar("LocalT")
| LocalClass |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/inspection_inspect.py | {
"start": 658,
"end": 1334
} | class ____(BaseNoMeta):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
assert_type(A.__mapper__, Mapper[Any])
assert_type(B.__mapper__, Mapper[Any])
a1 = A(data="d")
b1 = B(data="d")
e = create_engine("sqlite://")
insp_a1 = inspect(a1)
t: bool = insp_a1.transient
assert_type(insp_a1, InstanceState[A])
assert_type(inspect(b1), InstanceState[B])
m: Mapper[A] = inspect(A)
assert_type(inspect(A), Mapper[A])
assert_type(inspect(B), Mapper[B])
tables: List[str] = inspect(e).get_table_names()
i: Inspector = inspect(e)
assert_type(inspect(e), Inspector)
with e.connect() as conn:
inspect(conn).get_table_names()
| B |
python | ray-project__ray | python/ray/llm/_internal/common/observability/telemetry_utils.py | {
"start": 123,
"end": 1156
} | class ____:
"""Execute a function exactly once and block all callers until the function returns
Same as golang's `sync.Once <https://pkg.go.dev/sync#Once>`_
Took this directly from OpenTelemetry's Python SDK:
Ref: https://github.com/open-telemetry/opentelemetry-python/blob
/c6fab7d4c339dc5bf9eb9ef2723caad09d69bfca/opentelemetry-api/src/opentelemetry
/util/_once.py
"""
def __init__(self) -> None:
self._lock = Lock()
self._done = False
def do_once(self, func: Callable[[], None]) -> bool:
"""Execute ``func`` if it hasn't been executed or return.
Will block until ``func`` has been called by one thread.
Returns:
Whether or not ``func`` was executed in this call
"""
# fast path, try to avoid locking
if self._done:
return False
with self._lock:
if not self._done:
func()
self._done = True
return True
return False
| Once |
python | bokeh__bokeh | src/bokeh/document/locking.py | {
"start": 1560,
"end": 3453
} | class ____(Protocol[F]):
__call__: F
nolock: Literal[True]
def without_document_lock(func: F) -> NoLockCallback[F]:
''' Wrap a callback function to execute without first obtaining the
document lock.
Args:
func (callable) : The function to wrap
Returns:
callable : a function wrapped to execute without a |Document| lock.
While inside an unlocked callback, it is completely *unsafe* to modify
``curdoc()``. The value of ``curdoc()`` inside the callback will be a
specially wrapped version of |Document| that only allows safe operations,
which are:
* :func:`~bokeh.document.Document.add_next_tick_callback`
* :func:`~bokeh.document.Document.remove_next_tick_callback`
Only these may be used safely without taking the document lock. To make
other changes to the document, you must add a next tick callback and make
your changes to ``curdoc()`` from that second callback.
Attempts to otherwise access or change the Document will result in an
exception being raised.
``func`` can be a synchronous function, an async function, or a function
decorated with ``asyncio.coroutine``. The returned function will be an
async function if ``func`` is any of the latter two.
'''
if asyncio.iscoroutinefunction(func):
@wraps(func)
async def _wrapper(*args: Any, **kw: Any) -> None:
await func(*args, **kw)
else:
@wraps(func)
def _wrapper(*args: Any, **kw: Any) -> None:
func(*args, **kw)
wrapper = cast(NoLockCallback[F], _wrapper)
wrapper.nolock = True
return wrapper
UNSAFE_DOC_ATTR_USAGE_MSG = (
"Only 'add_next_tick_callback' may be used safely without taking the document lock; "
"to make other changes to the document, add a next tick callback and make your changes "
"from that callback."
)
| NoLockCallback |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core_tests/utils_tests/test_naming.py | {
"start": 2999,
"end": 3999
} | class ____:
"""Test how component names are processed in the scaffolding pipeline."""
def test_component_name_to_module_name_conversion(self):
"""Test the full pipeline from component name to module file name."""
# This simulates what happens in _parse_component_name
component_names = [
"ACMEDatabricksJobComponent",
"HTTPSConnection",
"XMLParser",
"SimpleComponent",
"DatabaseConnection",
]
for name in component_names:
module_name = snakecase(name)
# Verify it creates valid Python module names
assert module_name.isidentifier() or "_" in module_name
# Verify no double underscores (which can be problematic)
assert "__" not in module_name
# Verify it doesn't start or end with underscore
assert not module_name.startswith("_")
assert not module_name.endswith("_")
| TestComponentNamingIntegration |
python | pytorch__pytorch | test/distributed/_tools/test_sac_ilp.py | {
"start": 8425,
"end": 10224
} | class ____(TestCase):
# tests are adapted from tests in xformers
# https://github.com/facebookresearch/xformers/blob/c6c0ac31f1b08542a0bc27278c6ed10f825f6963/tests/test_checkpoint.py#L222
def setUp(self):
super().setUp()
data = [
("aten.copy_", 5, 0),
("aten.add", 5, 100),
("aten.div", 8, 100),
("aten.mm", 15, 120),
("aten.native_dropout", 15, 0),
("aten.linear", 9, 100),
("aten.t", 1, 0),
("aten.relu_", 5, 0),
]
self.sac_stats = SACStats(
func_names=[x[0] for x in data],
runtimes=[x[1] for x in data],
memory=[x[2] for x in data],
view_like_ops=[6],
rand_ops=[4],
saved_autograd_ops=[], # not needed for SAC decisions
inplace_ops=[(0, 0), (7, 5)],
force_store_random=False,
)
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/115653")
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_get_optimial_checkpointing_policy_per_module(self):
for memory_budget, optimal_soln in [
(0, [1, 0, 0, 0, 1, 0, 0, 0]),
(100 / 420, [1, 0, 0, 0, 1, 1, 0, 1]),
(120 / 420, [1, 0, 0, 1, 1, 0, 0, 0]),
(200 / 420, [1, 0, 1, 0, 1, 1, 0, 1]),
(220 / 420, [1, 0, 0, 1, 1, 1, 0, 1]),
(320 / 420, [1, 0, 1, 1, 1, 1, 0, 1]),
(420 / 420, [1, 1, 1, 1, 1, 1, 0, 1]),
]:
soln = get_optimal_checkpointing_policy_per_module(
sac_stats=self.sac_stats, memory_budget=memory_budget
)
self.assertEqual(optimal_soln, soln)
if __name__ == "__main__":
run_tests()
| TestOptimalCheckpointingPolicy |
python | rapidsai__cudf | python/cudf/cudf/core/series.py | {
"start": 8702,
"end": 14236
} | class ____(_FrameIndexer):
"""
Label-based selection
"""
@_performance_tracking
def __getitem__(self, arg: Any) -> ScalarLike | DataFrameOrSeries:
if not isinstance(self._frame.index, cudf.MultiIndex):
indexing_spec = indexing_utils.parse_row_loc_indexer(
indexing_utils.destructure_series_loc_indexer(
arg, self._frame
),
self._frame.index,
)
return self._frame._getitem_preprocessed(indexing_spec)
if isinstance(arg, pd.MultiIndex):
arg = cudf.from_pandas(arg)
if isinstance(self._frame.index, cudf.MultiIndex) and not isinstance(
arg, cudf.MultiIndex
):
if is_scalar(arg):
row_arg = (arg,)
else:
row_arg = arg
result = self._frame.index._get_row_major(self._frame, row_arg)
if (
isinstance(arg, tuple)
and len(arg) == self._frame.index.nlevels
and not any(isinstance(x, slice) for x in arg)
):
result = result.iloc[0]
return result
try:
arg = self._loc_to_iloc(arg)
except (TypeError, KeyError, IndexError, ValueError) as err:
raise KeyError(arg) from err
return self._frame.iloc[arg]
@_performance_tracking
def __setitem__(self, key, value):
try:
key = self._loc_to_iloc(key)
except KeyError as e:
if (
is_scalar(key)
and not isinstance(self._frame.index, cudf.MultiIndex)
and is_scalar(value)
):
self.append_new_row(key, value, column=True)
return
else:
raise e
if isinstance(value, (pd.Series, cudf.Series)):
value = cudf.Series(value)
value = value._align_to_index(self._frame.index, how="right")
self._frame.iloc[key] = value
def _loc_to_iloc(self, arg):
if isinstance(arg, tuple) and arg and isinstance(arg[0], slice):
if len(arg) > 1:
raise IndexError("Too many Indexers")
arg = arg[0]
if _is_scalar_or_zero_d_array(arg):
index_dtype = self._frame.index.dtype
if isinstance(index_dtype, cudf.IntervalDtype) and not isinstance(
arg, pd.Interval
):
raise NotImplementedError(
"Interval indexing is not supported."
)
if not is_dtype_obj_numeric(
index_dtype, include_decimal=False
) and not (
isinstance(index_dtype, CategoricalDtype)
and index_dtype.categories.dtype.kind in "iu"
):
# TODO: switch to cudf.utils.dtypes.is_integer(arg)
if is_integer(arg):
# Do not remove until pandas 3.0 support is added.
assert PANDAS_LT_300, (
"Need to drop after pandas-3.0 support is added."
)
warn_msg = (
"Series.__getitem__ treating keys as positions is deprecated. "
"In a future version, integer keys will always be treated "
"as labels (consistent with DataFrame behavior). To access "
"a value by position, use `ser.iloc[pos]`"
)
warnings.warn(warn_msg, FutureWarning)
return arg
try:
if isinstance(self._frame.index, RangeIndex):
indices = self._frame.index._indices_of(arg)
else:
indices = self._frame.index._column.indices_of(arg)
if (n := len(indices)) == 0:
raise KeyError("Label scalar is out of bounds")
elif n == 1:
return indices.element_indexing(0)
else:
return indices
except (TypeError, KeyError, IndexError, ValueError):
raise KeyError("Label scalar is out of bounds")
elif isinstance(arg, slice):
indexer = indexing_utils.find_label_range_or_mask(
arg, self._frame.index
)
if isinstance(indexer, indexing_utils.EmptyIndexer):
return slice(0, 0, 1)
elif isinstance(indexer, indexing_utils.SliceIndexer):
return indexer.key
else:
return indexer.key.column
elif isinstance(arg, (cudf.MultiIndex, pd.MultiIndex)):
if isinstance(arg, pd.MultiIndex):
arg = cudf.MultiIndex(
levels=arg.levels, codes=arg.codes, names=arg.names
)
return _indices_from_labels(self._frame, arg)
else:
col = as_column(arg)
if col.dtype.kind == "b":
return Series._from_column(col)
else:
indices = _indices_from_labels(
self._frame, Index._from_column(col)
)
if indices.null_count > 0:
missing = (
indices[indices.isnull()].index.to_pandas().tolist()
)
raise KeyError(f"{missing} not in the index.")
return indices
| _SeriesLocIndexer |
python | Textualize__textual | src/textual/_compositor.py | {
"start": 1299,
"end": 1724
} | class ____(NamedTuple):
"""The result of a reflow operation. Describes the chances to widgets."""
hidden: set[Widget] # Widgets that are hidden
shown: set[Widget] # Widgets that are shown
resized: set[Widget] # Widgets that have been resized
# Maps a widget on to its geometry (information that describes its position in the composition)
CompositorMap: TypeAlias = "dict[Widget, MapGeometry]"
| ReflowResult |
python | matplotlib__matplotlib | galleries/examples/specialty_plots/skewt.py | {
"start": 3438,
"end": 10143
} | class ____(Axes):
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(projection='skewx')``.
name = 'skewx'
def _init_axis(self):
# Taken from Axes and modified to use our modified X-axis
self.xaxis = SkewXAxis(self)
self.spines.top.register_axis(self.xaxis)
self.spines.bottom.register_axis(self.xaxis)
self.yaxis = maxis.YAxis(self)
self.spines.left.register_axis(self.yaxis)
self.spines.right.register_axis(self.yaxis)
def _gen_axes_spines(self):
spines = {'top': SkewSpine.linear_spine(self, 'top'),
'bottom': mspines.Spine.linear_spine(self, 'bottom'),
'left': mspines.Spine.linear_spine(self, 'left'),
'right': mspines.Spine.linear_spine(self, 'right')}
return spines
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
rot = 30
# Get the standard transform setup from the Axes base class
super()._set_lim_and_transforms()
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (
self.transScale
+ self.transLimits
+ transforms.Affine2D().skew_deg(rot, 0)
)
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (
transforms.blended_transform_factory(
self.transScale + self.transLimits,
transforms.IdentityTransform())
+ transforms.Affine2D().skew_deg(rot, 0)
+ self.transAxes
)
@property
def lower_xlim(self):
return self.axes.viewLim.intervalx
@property
def upper_xlim(self):
pts = [[0., 1.], [1., 1.]]
return self.transDataToAxes.inverted().transform(pts)[:, 0]
# Now register the projection with matplotlib so the user can select it.
register_projection(SkewXAxes)
if __name__ == '__main__':
# Now make a simple example using the custom projection.
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator, NullFormatter, ScalarFormatter
# Some example data.
data_txt = '''
978.0 345 7.8 0.8
971.0 404 7.2 0.2
946.7 610 5.2 -1.8
944.0 634 5.0 -2.0
925.0 798 3.4 -2.6
911.8 914 2.4 -2.7
906.0 966 2.0 -2.7
877.9 1219 0.4 -3.2
850.0 1478 -1.3 -3.7
841.0 1563 -1.9 -3.8
823.0 1736 1.4 -0.7
813.6 1829 4.5 1.2
809.0 1875 6.0 2.2
798.0 1988 7.4 -0.6
791.0 2061 7.6 -1.4
783.9 2134 7.0 -1.7
755.1 2438 4.8 -3.1
727.3 2743 2.5 -4.4
700.5 3048 0.2 -5.8
700.0 3054 0.2 -5.8
698.0 3077 0.0 -6.0
687.0 3204 -0.1 -7.1
648.9 3658 -3.2 -10.9
631.0 3881 -4.7 -12.7
600.7 4267 -6.4 -16.7
592.0 4381 -6.9 -17.9
577.6 4572 -8.1 -19.6
555.3 4877 -10.0 -22.3
536.0 5151 -11.7 -24.7
533.8 5182 -11.9 -25.0
500.0 5680 -15.9 -29.9
472.3 6096 -19.7 -33.4
453.0 6401 -22.4 -36.0
400.0 7310 -30.7 -43.7
399.7 7315 -30.8 -43.8
387.0 7543 -33.1 -46.1
382.7 7620 -33.8 -46.8
342.0 8398 -40.5 -53.5
320.4 8839 -43.7 -56.7
318.0 8890 -44.1 -57.1
310.0 9060 -44.7 -58.7
306.1 9144 -43.9 -57.9
305.0 9169 -43.7 -57.7
300.0 9280 -43.5 -57.5
292.0 9462 -43.7 -58.7
276.0 9838 -47.1 -62.1
264.0 10132 -47.5 -62.5
251.0 10464 -49.7 -64.7
250.0 10490 -49.7 -64.7
247.0 10569 -48.7 -63.7
244.0 10649 -48.9 -63.9
243.3 10668 -48.9 -63.9
220.0 11327 -50.3 -65.3
212.0 11569 -50.5 -65.5
210.0 11631 -49.7 -64.7
200.0 11950 -49.9 -64.9
194.0 12149 -49.9 -64.9
183.0 12529 -51.3 -66.3
164.0 13233 -55.3 -68.3
152.0 13716 -56.5 -69.5
150.0 13800 -57.1 -70.1
136.0 14414 -60.5 -72.5
132.0 14600 -60.1 -72.1
131.4 14630 -60.2 -72.2
128.0 14792 -60.9 -72.9
125.0 14939 -60.1 -72.1
119.0 15240 -62.2 -73.8
112.0 15616 -64.9 -75.9
108.0 15838 -64.1 -75.1
107.8 15850 -64.1 -75.1
105.0 16010 -64.7 -75.7
103.0 16128 -62.9 -73.9
100.0 16310 -62.5 -73.5
'''
# Parse the data
sound_data = StringIO(data_txt)
p, h, T, Td = np.loadtxt(sound_data, unpack=True)
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(6.5875, 6.2125))
ax = fig.add_subplot(projection='skewx')
plt.grid(True)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
ax.semilogy(T, p, color='C3')
ax.semilogy(Td, p, color='C2')
# An example of a slanted line at constant X
l = ax.axvline(0, color='C0')
# Disables the log-formatting that comes with semilogy
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.yaxis.set_minor_formatter(NullFormatter())
ax.set_yticks(np.linspace(100, 1000, 10))
ax.set_ylim(1050, 100)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.set_xlim(-50, 50)
plt.show()
# %%
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.transforms`
# - `matplotlib.spines`
# - `matplotlib.spines.Spine`
# - `matplotlib.spines.Spine.register_axis`
# - `matplotlib.projections`
# - `matplotlib.projections.register_projection`
| SkewXAxes |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 16375,
"end": 16411
} | class ____(BaseModel):
y: str
| User |
python | huggingface__transformers | tests/models/patchtst/test_modeling_patchtst.py | {
"start": 1589,
"end": 5104
} | class ____:
def __init__(
self,
parent,
batch_size=13,
prediction_length=7,
context_length=14,
patch_length=5,
patch_stride=5,
num_input_channels=1,
num_time_features=1,
is_training=True,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
distil=False,
seed=42,
num_targets=2,
mask_type="random",
random_mask_ratio=0,
):
self.parent = parent
self.batch_size = batch_size
self.prediction_length = prediction_length
self.context_length = context_length
self.patch_length = patch_length
self.patch_stride = patch_stride
self.num_input_channels = num_input_channels
self.num_time_features = num_time_features
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.mask_type = mask_type
self.random_mask_ratio = random_mask_ratio
self.seed = seed
self.num_targets = num_targets
self.distil = distil
self.num_patches = (max(self.context_length, self.patch_length) - self.patch_length) // self.patch_stride + 1
# define seq_length so that it can pass the test_attention_outputs
self.seq_length = self.num_patches
def get_config(self):
return PatchTSTConfig(
prediction_length=self.prediction_length,
patch_length=self.patch_length,
patch_stride=self.patch_stride,
num_input_channels=self.num_input_channels,
d_model=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
context_length=self.context_length,
activation_function=self.hidden_act,
seed=self.seed,
num_targets=self.num_targets,
mask_type=self.mask_type,
random_mask_ratio=self.random_mask_ratio,
)
def prepare_patchtst_inputs_dict(self, config):
_past_length = config.context_length
# bs, num_input_channels, num_patch, patch_len
# [bs x seq_len x num_input_channels]
past_values = floats_tensor([self.batch_size, _past_length, self.num_input_channels])
future_values = floats_tensor([self.batch_size, config.prediction_length, self.num_input_channels])
inputs_dict = {
"past_values": past_values,
"future_values": future_values,
}
return inputs_dict
def prepare_config_and_inputs(self):
config = self.get_config()
inputs_dict = self.prepare_patchtst_inputs_dict(config)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
@require_torch
| PatchTSTModelTester |
python | streamlit__streamlit | lib/streamlit/runtime/uploaded_file_manager.py | {
"start": 2842,
"end": 4935
} | class ____(CacheStatsProvider, Protocol):
"""UploadedFileManager protocol, that should be implemented by the concrete
uploaded file managers.
It is responsible for:
- retrieving files by session_id and file_id for st.file_uploader and
st.camera_input
- cleaning up uploaded files associated with session on session end
It should be created during Runtime initialization.
Optionally UploadedFileManager could be responsible for issuing URLs which will be
used by frontend to upload files to.
"""
@abstractmethod
def get_files(
self, session_id: str, file_ids: Sequence[str]
) -> list[UploadedFileRec]:
"""Return a list of UploadedFileRec for a given sequence of file_ids.
Parameters
----------
session_id
The ID of the session that owns the files.
file_ids
The sequence of ids associated with files to retrieve.
Returns
-------
List[UploadedFileRec]
A list of URL UploadedFileRec instances, each instance contains information
about uploaded file.
"""
raise NotImplementedError
@abstractmethod
def remove_session_files(self, session_id: str) -> None:
"""Remove all files associated with a given session."""
raise NotImplementedError
def get_upload_urls(
self, session_id: str, file_names: Sequence[str]
) -> list[UploadFileUrlInfo]:
"""Return a list of UploadFileUrlInfo for a given sequence of file_names.
Optional to implement, issuing of URLs could be done by other service.
Parameters
----------
session_id
The ID of the session that request URLs.
file_names
The sequence of file names for which URLs are requested
Returns
-------
List[UploadFileUrlInfo]
A list of UploadFileUrlInfo instances, each instance contains information
about uploaded file URLs.
"""
raise NotImplementedError
| UploadedFileManager |
python | getsentry__sentry | src/sentry/search/events/builder/metrics.py | {
"start": 2507,
"end": 67828
} | class ____(BaseQueryBuilder):
requires_organization_condition = True
duration_fields = {"transaction.duration"}
organization_column: str = "organization_id"
column_remapping = {
# This MetricsQueryBuilder is only used for transaction metrics.
# So `message` is mapped to `transaction` but subclasses of this
# should be mindful of this and override this value appropriately.
#
# Note: This really shouldn't be in the parent class at all, and
# should live strictly in child classes.
"message": "transaction",
}
default_metric_tags = constants.DEFAULT_METRIC_TAGS
def __init__(
self,
*args: Any,
# Datasets are currently a bit confusing; Dataset.Metrics is actually release health/sessions
# Dataset.PerformanceMetrics is MEP. TODO: rename Dataset.Metrics to Dataset.ReleaseMetrics or similar
dataset: Dataset | None = None,
granularity: int | None = None,
# Alerts queries do not contain a start and end time, so we need to accept a time_range_window in order to calculate functions such as spm/epm/eps
time_range_window: int | None = None,
config: QueryBuilderConfig | None = None,
**kwargs: Any,
):
if config is None:
config = QueryBuilderConfig()
kwargs["config"] = config
self.distributions: list[CurriedFunction] = []
self.sets: list[CurriedFunction] = []
self.counters: list[CurriedFunction] = []
self.gauges: list[CurriedFunction] = []
self.percentiles: list[CurriedFunction] = []
# only used for metrics_layer right now
self.metrics_layer_functions: list[CurriedFunction] = []
self.metric_ids: set[int] = set()
self._indexer_cache: dict[str, int | None] = {}
self._use_default_tags: bool | None = None
self._has_nullable: bool = False
self._is_spans_metrics_query_cache: bool | None = None
self._is_unsupported_metrics_layer_query_cache: bool | None = None
# always true if this is being called
config.has_metrics = True
assert dataset is None or dataset in [Dataset.PerformanceMetrics, Dataset.Metrics]
if granularity is not None:
self._granularity = granularity
if time_range_window is not None:
self._time_range_window = time_range_window
super().__init__(
# TODO: defaulting to Metrics for now so I don't have to update incidents tests. Should be
# PerformanceMetrics
Dataset.Metrics if dataset is None else dataset,
*args,
**kwargs,
)
if self.organization_id is None:
raise InvalidSearchQuery("Organization id required to create a metrics query")
sentry_sdk.set_tag("on_demand_metrics.type", config.on_demand_metrics_type)
sentry_sdk.set_tag("on_demand_metrics.enabled", config.on_demand_metrics_enabled)
def load_config(self) -> DatasetConfig:
if hasattr(self, "config_class") and self.config_class is not None:
return super().load_config()
if self.dataset in [Dataset.Metrics, Dataset.PerformanceMetrics]:
if self.use_metrics_layer:
return MetricsLayerDatasetConfig(self)
else:
return MetricsDatasetConfig(self)
else:
raise NotImplementedError(f"Data Set configuration not found for {self.dataset}.")
@property
def use_default_tags(self) -> bool:
if self.is_spans_metrics_query:
return False
if self._use_default_tags is None:
if self.params.organization is not None:
self._use_default_tags = features.has(
"organizations:mep-use-default-tags", self.params.organization, actor=None
)
else:
self._use_default_tags = False
return self._use_default_tags
def are_columns_resolved(self) -> bool:
# If we have an on demand spec, we want to mark the columns as resolved, since we are not running the
# `resolve_query` method.
if self.use_on_demand:
return True
# If we are using the metrics layer, we consider columns to be resolved if they are of type `Function` or
# `AlisedExpression`. The reason for why we have to check for `AlisedExpression` is because some derived metrics
# are passed as aliased expressions to the MQB query transformer.
if self.use_metrics_layer:
first_column = self.columns[0]
return bool(self.columns) and (
isinstance(first_column, Function) or isinstance(first_column, AliasedExpression)
)
return super().are_columns_resolved()
def _is_on_demand_extraction_disabled(self, query_hash: str) -> bool:
spec_version = OnDemandMetricSpecVersioning.get_query_spec_version(self.organization_id)
on_demand_entries = DashboardWidgetQueryOnDemand.objects.filter(
spec_hashes__contains=[query_hash],
spec_version=spec_version.version,
dashboard_widget_query__widget__dashboard__organization_id=self.organization_id,
)
if any(not entry.extraction_enabled() for entry in on_demand_entries):
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("entries", on_demand_entries)
scope.set_extra("hash", query_hash)
sentry_sdk.capture_message(
"extraction disabled for one of the matching on-demand rows"
)
return True
return False
def _get_on_demand_metric_spec(self, field: str) -> OnDemandMetricSpec | None:
if not field:
return None
groupby_columns = self._get_group_bys()
if not should_use_on_demand_metrics_for_querying(
Organization.objects.get_from_cache(id=self.organization_id),
dataset=self.dataset,
aggregate=field,
query=self.query,
groupbys=groupby_columns,
):
return None
try:
environment = None
if self.params.environments:
environment = self.params.environments[0].name
if not self.builder_config.on_demand_metrics_type:
raise InvalidSearchQuery(
"Must include on demand metrics type when querying on demand"
)
metric_spec = fetch_on_demand_metric_spec(
self.organization_id,
field=field,
query=self.query,
environment=environment,
groupbys=groupby_columns,
spec_type=self.builder_config.on_demand_metrics_type,
)
if self._is_on_demand_extraction_disabled(metric_spec.query_hash):
return None
return metric_spec
except Exception as e:
sentry_sdk.capture_exception(e)
return None
def _get_group_bys(self) -> list[str]:
return [c for c in self.selected_columns if not fields.is_function(c)]
def _get_aggregates(self) -> list[str]:
return [c for c in self.selected_columns if fields.is_function(c)]
@cached_property
def use_on_demand(self) -> bool:
return bool(self._on_demand_metric_spec_map)
@cached_property
def _on_demand_metric_spec_map(self) -> dict[str, OnDemandMetricSpec]:
if not self.builder_config.on_demand_metrics_enabled:
return {}
spec_map = {}
for col in self.selected_columns:
spec = self._get_on_demand_metric_spec(col)
if fields.is_function(col) and spec:
spec_map[col] = spec
return spec_map
def convert_spec_to_metric_field(self, spec: OnDemandMetricSpec) -> MetricField:
if isinstance(self, (TopMetricsQueryBuilder, TimeseriesMetricQueryBuilder)):
alias = get_function_alias(spec.field) or "count"
elif isinstance(self, AlertMetricsQueryBuilder):
alias = spec.mri
else:
alias = get_function_alias(spec.field) or spec.mri
return MetricField(spec.op, spec.mri, alias=alias)
def _get_metrics_query_from_on_demand_spec(
self,
spec: OnDemandMetricSpec,
require_time_range: bool = True,
groupby: Sequence[MetricGroupByField] | None = None,
orderby: Sequence[MetricOrderByField] | None = None,
# Where normally isn't accepted for on-demand since it should only encoded into the metric
# but in the case of top events, etc. there is need for another where condition dynamically for top N groups.
additional_where: Sequence[Condition] | None = None,
) -> DeprecatingMetricsQuery:
if self.params.organization is None:
raise InvalidSearchQuery("An on demand metrics query requires an organization")
if len(self.selected_columns) == 0:
raise InvalidSearchQuery(
"An on demand metrics query requires at least one selected column"
)
max_limit = None
if isinstance(self, TopMetricsQueryBuilder):
limit = self.limit or Limit(1)
# Top N events passes a limit of 10000 by default. That's also the upper bound for metrics layer, so
# we need to reduce the interval.
intervals_len = get_num_intervals(
start=self.start,
end=self.end,
granularity=self.granularity,
interval=self.interval,
)
if intervals_len > 0:
limit = Limit(int(limit.limit / intervals_len))
max_limit = 10_000
include_series = True
interval = self.interval
elif isinstance(self, TimeseriesMetricQueryBuilder):
limit = Limit(1)
include_series = True
interval = self.interval
elif isinstance(self, AlertMetricsQueryBuilder):
limit = self.limit or Limit(1)
include_series = False
interval = None
else:
limit = self.limit or Limit(1)
include_series = False
interval = None
# Since the query builder is very convoluted, we first try to get the start and end from the validated
# parameters but in case it's none it can be that the `skip_time_conditions` was True, thus in that case we
# try to see if start and end were supplied directly in the constructor.
start = self.start or self.params.start
end = self.end or self.params.end
# The time range can be required or not, since the query generated by the builder can either be used to execute
# the query on its own (requiring a time range) or it can be used to get the snql code necessary to create a
# query subscription from the outside.
if require_time_range and (start is None or end is None):
raise InvalidSearchQuery(
"The on demand metric query requires a time range to be executed"
)
where = [
Condition(
lhs=Column(QUERY_HASH_KEY),
op=Op.EQ,
rhs=spec.query_hash,
),
]
if spec.spec_type == MetricSpecType.DYNAMIC_QUERY and spec.environment:
where.append(Condition(lhs=Column("environment"), op=Op.EQ, rhs=spec.environment))
if additional_where:
where.extend(additional_where)
return DeprecatingMetricsQuery(
select=[self.convert_spec_to_metric_field(spec)],
where=where,
limit=limit,
max_limit=max_limit,
offset=self.offset,
granularity=self.granularity,
interval=interval,
is_alerts_query=True,
org_id=self.params.organization.id,
project_ids=[p.id for p in self.params.projects],
include_series=include_series,
orderby=orderby,
groupby=groupby,
start=start,
end=end,
skip_orderby_validation=True,
)
def validate_aggregate_arguments(self) -> None:
if not self.use_metrics_layer:
super().validate_aggregate_arguments()
@property
def is_spans_metrics_query(self) -> bool:
"""This property is used to determine if a query is using at least one of the fields in the spans namespace."""
if self._is_spans_metrics_query_cache is not None:
return self._is_spans_metrics_query_cache
if self.query is not None:
tags = parse_query(
self.params.projects, self.query, self.params.user, self.params.environments
)["tags"]
for tag in tags:
if tag in constants.SPANS_METRICS_TAGS:
self._is_spans_metrics_query_cache = True
return True
for column in self.selected_columns:
# Not using parse_function since it checks against function_converter
# which is not loaded yet and we also do not need it
match = fields.is_function(column)
func = match.group("function") if match else None
if func in constants.SPANS_METRICS_FUNCTIONS:
self._is_spans_metrics_query_cache = True
return True
argument = match.group("columns") if match else None
if (
argument in constants.SPAN_METRICS_MAP.keys() - constants.METRICS_MAP.keys()
or argument in constants.SPAN_METRICS_MAP.values()
):
self._is_spans_metrics_query_cache = True
return True
self._is_spans_metrics_query_cache = False
return False
@property
def is_unsupported_metrics_layer_query(self) -> bool:
"""Some fields and functions cannot be translated to metrics layer queries.
This property is used to determine if a query is using at least one of these fields or functions, and if so, we must not use the metrics layer.
"""
if self._is_unsupported_metrics_layer_query_cache is not None:
return self._is_unsupported_metrics_layer_query_cache
if self.is_spans_metrics_query:
self._is_unsupported_metrics_layer_query_cache = True
return True
for column in self.selected_columns:
# Not using parse_function since it checks against function_converter
# which is not loaded yet and we also do not need it
match = fields.is_function(column)
func = match.group("function") if match else None
if func in constants.METRICS_LAYER_UNSUPPORTED_TRANSACTION_METRICS_FUNCTIONS:
self._is_unsupported_metrics_layer_query_cache = True
return True
self._is_unsupported_metrics_layer_query_cache = False
return False
@property
def is_performance(self) -> bool:
return self.dataset is Dataset.PerformanceMetrics
@property
def use_case_id(self) -> UseCaseID:
if self.spans_metrics_builder or self.is_spans_metrics_query:
return UseCaseID.SPANS
elif self.is_performance:
return UseCaseID.TRANSACTIONS
else:
return UseCaseID.SESSIONS
@property
def use_metrics_layer(self) -> bool:
# We want to use the metrics layer only for normal metrics, since span metrics are currently
# NOT supported.
if (
self.builder_config.insights_metrics_override_metric_layer
and self.is_unsupported_metrics_layer_query
):
return False
return self.builder_config.use_metrics_layer and not self.spans_metrics_builder
def resolve_query(
self,
query: str | None = None,
selected_columns: list[str] | None = None,
groupby_columns: list[str] | None = None,
equations: list[str] | None = None,
orderby: list[str] | None = None,
) -> None:
# Resolutions that we always must perform, irrespectively of on demand.
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_time_conditions"):
# Has to be done early, since other conditions depend on start and end
self.resolve_time_conditions()
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_granularity"):
# Needs to happen before params and after time conditions since granularity can change start&end
self.granularity = self.resolve_granularity()
if self.start is not None:
self.start = adjust_datetime_to_granularity(
self.start, self.granularity.granularity
)
# Resolutions that we will perform only in case the query is not on demand. The reasoning for this is that
# for building an on demand query we only require a time interval and granularity. All the other fields are
# automatically computed given the OnDemandMetricSpec.
if not self.use_on_demand:
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_conditions"):
self.where, self.having = self.resolve_conditions(query)
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_params"):
# params depends on parse_query, and conditions being resolved first since there may be projects
# in conditions
self.where += self.resolve_params()
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_columns"):
self.columns = self.resolve_select(selected_columns, equations)
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_orderby"):
self.orderby = self.resolve_orderby(orderby)
with sentry_sdk.start_span(op="QueryBuilder", name="resolve_groupby"):
self.groupby = self.resolve_groupby(groupby_columns)
else:
# On demand still needs to call resolve since resolving columns has a side_effect
# of adding their alias to the function_alias_map, which is required to convert snuba
# aliases back to their original functions.
for column in selected_columns:
try:
self.resolve_select([column], [])
except (IncompatibleMetricsQuery, InvalidSearchQuery):
# This may fail for some columns like apdex but it will still enter into the field_alias_map
pass
if len(self.metric_ids) > 0 and not self.use_metrics_layer:
self.where.append(
# Metric id is intentionally sorted, so we create consistent queries here both for testing & caching.
Condition(Column("metric_id"), Op.IN, sorted(self.metric_ids))
)
def resolve_column_name(self, col: str) -> str:
if col.startswith("tags["):
tag_match = constants.TAG_KEY_RE.search(col)
col = tag_match.group("tag") if tag_match else col
# on-demand metrics require metrics layer behavior
if self.use_metrics_layer or self.use_on_demand:
if col in ["project_id", "timestamp"]:
return col
# TODO: update resolve params so this isn't needed
if col == "organization_id":
return "org_id"
if col == "transaction":
self.has_transaction = True
return f"tags[{col}]"
if col in DATASETS[self.dataset]:
return str(DATASETS[self.dataset][col])
tag_id = self.resolve_tag_key(col)
if tag_id is None:
raise InvalidSearchQuery(f"Unknown field: {col}")
if self.is_performance:
return f"tags_raw[{tag_id}]"
else:
return f"tags[{tag_id}]"
def column(self, name: str) -> Column:
"""Given an unresolved sentry name and return a snql column.
:param name: The unresolved sentry name.
"""
missing_column = IncompatibleMetricsQuery(f"Column {name} was not found in metrics indexer")
try:
return super().column(name)
except InvalidSearchQuery:
raise missing_column
def aliased_column(self, name: str) -> SelectType:
missing_column = IncompatibleMetricsQuery(f"Column {name} was not found in metrics indexer")
try:
return super().aliased_column(name)
except InvalidSearchQuery:
raise missing_column
def resolve_time_range_window(self) -> int:
start = self.start or self.params.start
end = self.end or self.params.end
if self._time_range_window is not None and (start is not None or end is not None):
raise InvalidSearchQuery("time_range_window can't be set when start or end is set")
return self._time_range_window
def resolve_granularity(self) -> Granularity:
"""Granularity impacts metric queries even when they aren't timeseries because the data needs to be
pre-aggregated
Granularity is determined by checking the alignment of our start & end timestamps with the timestamps in
snuba. eg. we can only use the daily granularity if the query starts and ends at midnight
Seconds are ignored under the assumption that there currently isn't a valid use case to have
to-the-second accurate information
We also allow some flexibility on the granularity used the larger the duration of the query since the hypothesis
is that users won't be able to notice the loss of accuracy regardless. With that in mind:
- If duration is between 12 hours to 3d we allow 15 minutes on the hour boundaries for hourly granularity
- if duration is between 3d to 30d we allow 30 minutes on the day boundaries for daily granularities
and will fallback to hourly granularity
- If the duration is over 30d we always use the daily granularities
In special cases granularity can be set manually bypassing the granularity calculation below.
"""
if hasattr(self, "_granularity") and getattr(self, "_granularity") is not None:
return Granularity(self._granularity)
if self.end is None or self.start is None:
raise ValueError("skip_time_conditions must be False when calling this method")
granularity = optimal_granularity_for_date_range(self.start, self.end)
return Granularity(granularity)
def resolve_split_granularity(self) -> tuple[list[Condition], Granularity | None]:
"""This only is applicable to table queries, we can use multiple granularities across the time period, which
should improve performance"""
if self.end is None or self.start is None:
raise ValueError("skip_time_conditions must be False when calling this method")
# Only split granularity when granularity is 1h or 1m
# This is cause if its 1d we're already as efficient as possible, but we could add 1d in the future if there are
# accuracy issues
if self.granularity.granularity == 86400:
return [], self.granularity
granularity = self.granularity.granularity
self.granularity = None
if granularity == constants.METRICS_GRANULARITY_MAPPING["1m"]:
rounding_function = remove_minutes
base_granularity = constants.METRICS_GRANULARITY_MAPPING["1m"]
core_granularity = constants.METRICS_GRANULARITY_MAPPING["1h"]
elif granularity == constants.METRICS_GRANULARITY_MAPPING["1h"]:
rounding_function = remove_hours
base_granularity = constants.METRICS_GRANULARITY_MAPPING["1h"]
core_granularity = constants.METRICS_GRANULARITY_MAPPING["1d"]
else:
return [], Granularity(granularity)
if rounding_function(self.start, False) > rounding_function(self.end):
return [], Granularity(granularity)
timestamp = self.column("timestamp")
granularity = Column("granularity")
return [
Or(
[
# Grab the buckets that the core_granularity won't be able to capture at the original granularity
And(
[
Or(
[
# We won't grab outside the queries timewindow because there's still a toplevel
# filter
Condition(timestamp, Op.GTE, rounding_function(self.end)),
Condition(
timestamp, Op.LT, rounding_function(self.start, False)
),
]
),
Condition(granularity, Op.EQ, base_granularity),
]
),
# Grab the buckets that can use the core_granularity
And(
[
Condition(timestamp, Op.GTE, rounding_function(self.start, False)),
# This op is LT not LTE, here's an example why; a query is from 11:45 to 15:45
# if an event happened at 15:02, its caught by the above condition in the 1min bucket at
# 15:02, but its also caught at the 1hr bucket at 15:00
Condition(timestamp, Op.LT, rounding_function(self.end)),
Condition(granularity, Op.EQ, core_granularity),
]
),
]
)
], None
def resolve_having(self, parsed_terms: ParsedTerms) -> list[WhereType]:
if not self.builder_config.allow_metric_aggregates:
# Regardless of use_aggregate_conditions, check if any having_conditions exist
use_aggregate_conditions = self.builder_config.use_aggregate_conditions
self.builder_config.use_aggregate_conditions = True
having_conditions = super().resolve_having(parsed_terms)
self.builder_config.use_aggregate_conditions = use_aggregate_conditions
if len(having_conditions) > 0:
raise IncompatibleMetricsQuery(
"Aggregate conditions were disabled, but included in filter"
)
# Don't resolve having conditions again if we don't have to
if self.builder_config.use_aggregate_conditions:
return having_conditions
else:
return []
return super().resolve_having(parsed_terms)
def resolve_limit(self, limit: int | None) -> Limit:
"""Impose a max limit, since we may need to create a large condition based on the group by values when the query
is run"""
if limit is not None and limit > constants.METRICS_MAX_LIMIT:
raise IncompatibleMetricsQuery(
f"Can't have a limit larger than {constants.METRICS_MAX_LIMIT}"
)
elif limit is None:
return Limit(constants.METRICS_MAX_LIMIT)
else:
return Limit(limit)
def resolve_snql_function(
self,
snql_function: fields.MetricsFunction,
arguments: Mapping[str, NormalizedArg],
alias: str,
resolve_only: bool,
) -> SelectType | None:
prefix = self._get_metric_prefix(snql_function, arguments.get("column"))
# If the metric_id is 0 that means this is a function that won't return but we don't want to error the query
nullable = arguments.get("metric_id") == 0
if nullable:
self._has_nullable = True
if snql_function.snql_distribution is not None and (prefix is None or prefix == "d"):
resolved_function = snql_function.snql_distribution(arguments, alias)
if not resolve_only:
if not nullable:
if snql_function.is_percentile:
self.percentiles.append(resolved_function)
else:
self.distributions.append(resolved_function)
# Still add to aggregates so groupby is correct
self.aggregates.append(resolved_function)
return resolved_function
if snql_function.snql_set is not None and (prefix is None or prefix == "s"):
resolved_function = snql_function.snql_set(arguments, alias)
if not resolve_only:
if not nullable:
self.sets.append(resolved_function)
# Still add to aggregates so groupby is correct
self.aggregates.append(resolved_function)
return resolved_function
if snql_function.snql_counter is not None and (prefix is None or prefix == "c"):
resolved_function = snql_function.snql_counter(arguments, alias)
if not resolve_only:
if not nullable:
self.counters.append(resolved_function)
# Still add to aggregates so groupby is correct
self.aggregates.append(resolved_function)
return resolved_function
if snql_function.snql_gauge is not None and (prefix is None or prefix == "g"):
resolved_function = snql_function.snql_gauge(arguments, alias)
if not resolve_only:
if not nullable:
self.gauges.append(resolved_function)
# Still add to aggregates so groupby is correct
self.aggregates.append(resolved_function)
return resolved_function
if snql_function.snql_metric_layer is not None:
resolved_function = snql_function.snql_metric_layer(arguments, alias)
if not resolve_only:
self.aggregates.append(resolved_function)
if not nullable:
if snql_function.is_percentile:
self.percentiles.append(resolved_function)
else:
self.metrics_layer_functions.append(resolved_function)
return resolved_function
return None
def resolve_metric_index(self, value: str) -> int | None:
"""Layer on top of the metric indexer so we'll only hit it at most once per value"""
if value not in self._indexer_cache:
result = indexer.resolve(
self.use_case_id,
self.organization_id,
value,
)
self._indexer_cache[value] = result
return self._indexer_cache[value]
def resolve_tag_value(self, value: str) -> int | str | None:
# We only use the indexer for alerts queries
if self.is_performance or self.use_metrics_layer:
return value
return self.resolve_metric_index(value)
def resolve_tag_key(self, value: str) -> int | str | None:
# some tag keys needs to be remapped to a different column name
# prior to resolving it via the indexer
value = self.column_remapping.get(value, value)
if self.use_default_tags:
if value in self.default_metric_tags:
return self.resolve_metric_index(value)
else:
raise IncompatibleMetricsQuery(f"{value} is not a tag in the metrics dataset")
else:
return self.resolve_metric_index(value)
def default_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
name = self.column_remapping.get(search_filter.key.name, search_filter.key.name)
operator = search_filter.operator
value = search_filter.value.value
# Handle checks for existence
if search_filter.operator in ("=", "!=") and search_filter.value.value == "":
if name in constants.METRICS_MAP:
if search_filter.operator == "!=":
return None
else:
raise IncompatibleMetricsQuery("!has isn't compatible with metrics queries")
else:
return Condition(
Function("has", [Column("tags.key"), self.resolve_metric_index(name)]),
Op.EQ if search_filter.operator == "!=" else Op.NEQ,
1,
)
if name in ["organization_id", "org_id"]:
raise IncompatibleMetricsQuery(f"{name} isn't compatible with metrics queries")
lhs = self.resolve_column(name)
# If this is an aliasedexpression, we don't need the alias here, just the expression
if isinstance(lhs, AliasedExpression):
lhs = lhs.exp
# resolve_column will try to resolve this name with indexer, and if its a tag the Column will be tags[1]
is_tag = isinstance(lhs, Column) and lhs.subscriptable in ["tags", "tags_raw"]
if is_tag:
if isinstance(value, list):
resolved_value = []
for item in value:
resolved_item = self.resolve_tag_value(item)
if (
resolved_item is None
and not self.builder_config.skip_field_validation_for_entity_subscription_deletion
):
raise IncompatibleMetricsQuery(f"{name} value {item} in filter not found")
resolved_value.append(resolved_item)
value = resolved_value
else:
resolved_item = self.resolve_tag_value(value)
if (
resolved_item is None
and not self.builder_config.skip_field_validation_for_entity_subscription_deletion
):
raise IncompatibleMetricsQuery(f"{name} value {value} in filter not found")
value = resolved_item
# timestamp{,.to_{hour,day}} need a datetime string
# last_seen needs an integer
if isinstance(value, datetime) and name not in constants.TIMESTAMP_FIELDS:
value = int(value.timestamp()) * 1000
if name in constants.TIMESTAMP_FIELDS:
if (
operator in ["<", "<="]
and value < self.start
or operator in [">", ">="]
and value > self.end
):
raise InvalidSearchQuery(
"Filter on timestamp is outside of the selected date range."
)
if search_filter.value.is_wildcard():
return Condition(
Function("match", [lhs, f"(?i){value}"]),
Op(search_filter.operator),
1,
)
return Condition(lhs, Op(search_filter.operator), value)
def _resolve_environment_filter_value(self, value: str) -> int | str:
value_id: int | str | None = self.resolve_tag_value(f"{value}")
if value_id is None:
raise IncompatibleMetricsQuery(f"Environment: {value} was not found")
return value_id
def _environment_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
"""All of this is copied from the parent class except for the addition of `resolve_value`
Going to live with the duplicated code since this will go away anyways once we move to the metric layer
"""
# conditions added to env_conditions can be OR'ed
env_conditions = []
value = search_filter.value.value
values_set = set(value if isinstance(value, (list, tuple)) else [value])
# sorted for consistency
sorted_values = sorted(f"{value}" for value in values_set)
values = []
for value in sorted_values:
if value:
values.append(self._resolve_environment_filter_value(value))
else:
values.append("")
values.sort()
environment = self.column("environment")
if len(values) == 1:
operator = Op.EQ if search_filter.operator in constants.EQUALITY_OPERATORS else Op.NEQ
env_conditions.append(Condition(environment, operator, values.pop()))
elif values:
operator = (
Op.IN if search_filter.operator in constants.EQUALITY_OPERATORS else Op.NOT_IN
)
env_conditions.append(Condition(environment, operator, values))
if len(env_conditions) > 1:
return Or(conditions=env_conditions)
else:
return env_conditions[0]
def get_metrics_layer_snql_query(
self,
query_framework: QueryFramework | None = None,
extra_conditions: list[Condition] | None = None,
) -> Query:
"""
This method returns the metrics layer snql of the query being fed into the transformer and then into the metrics
layer.
The snql query returned by this method is a dialect of snql only understood by the "mqb_query_transformer".
This dialect has the same syntax as snql but has slightly different semantics and more operations.
This dialect should NEVER be used outside of the transformer as it will create problems if parsed by the
snuba SDK.
"""
if not self.use_metrics_layer and not self.builder_config.on_demand_metrics_enabled:
# The reasoning for this error is because if "use_metrics_layer" is false, the MQB will not generate the
# snql dialect explained below as there is not need for that because it will directly generate normal snql
# that can be returned via the "get_snql_query" method.
raise Exception("Cannot get metrics layer snql query when use_metrics_layer is false")
self.validate_having_clause()
prefix = "generic_" if self.dataset is Dataset.PerformanceMetrics else ""
return Query(
match=Entity(f"{prefix}metrics_distributions", sample=self.sample_rate),
# Metrics doesn't support columns in the select, and instead expects them in the groupby
select=(self.aggregates if query_framework is None else query_framework.functions)
+ [
# Team key transaction is a special case sigh
col
for col in self.columns
if isinstance(col, Function) and col.function == "team_key_transaction"
],
array_join=self.array_join,
where=self.where + (extra_conditions if extra_conditions else []),
having=self.having if query_framework is None else query_framework.having,
groupby=self.groupby,
orderby=self.orderby if query_framework is None else query_framework.orderby,
limit=self.limit,
offset=self.offset,
limitby=self.limitby,
granularity=self.granularity,
)
def get_snql_query(self) -> Request:
"""
This method returns the normal snql of the query being built for execution.
"""
if self.use_metrics_layer:
# The reasoning for this error is because if "use_metrics_layer" is true, the snql built within MQB will
# be a slight variation of snql that is understood only by the "mqb_query_transformer" thus we don't
# want to return it to users.
# The usage of the transformer allows MQB to build a MetricsQuery automatically from the dialect of snql
# defined by the transformer itself.
raise NotImplementedError("Cannot get snql query when use_metrics_layer is true")
self.validate_having_clause()
# Need to split orderby between the 3 possible tables
primary, query_framework = self._create_query_framework()
primary_framework = query_framework.pop(primary)
if len(primary_framework.functions) == 0:
raise IncompatibleMetricsQuery("Need at least one function")
for query_details in query_framework.values():
if len(query_details.functions) > 0:
# More than 1 dataset means multiple queries so we can't return them here
raise NotImplementedError(
"get_snql_query cannot be implemented for MetricsQueryBuilder"
)
return Request(
dataset=self.dataset.value,
app_id="default",
query=Query(
match=primary_framework.entity,
select=[
column
for column in self.columns
if column in primary_framework.functions or column not in self.aggregates
],
array_join=self.array_join,
where=self.where,
having=primary_framework.having,
groupby=self.groupby,
orderby=primary_framework.orderby,
limit=self.limit,
offset=self.offset,
limitby=self.limitby,
granularity=self.granularity,
),
flags=Flags(turbo=self.turbo),
tenant_ids=self.tenant_ids,
)
def _get_base_query_framework(self) -> dict[str, QueryFramework]:
prefix = "generic_" if self.dataset is Dataset.PerformanceMetrics else ""
query_framework: dict[str, QueryFramework] = {
"distribution": QueryFramework(
orderby=[],
having=[],
functions=self.distributions,
entity=Entity(f"{prefix}metrics_distributions", sample=self.sample_rate),
),
"counter": QueryFramework(
orderby=[],
having=[],
functions=self.counters,
entity=Entity(f"{prefix}metrics_counters", sample=self.sample_rate),
),
"set": QueryFramework(
orderby=[],
having=[],
functions=self.sets,
entity=Entity(f"{prefix}metrics_sets", sample=self.sample_rate),
),
"gauge": QueryFramework(
orderby=[],
having=[],
functions=self.gauges,
entity=Entity(f"{prefix}metrics_gauges", sample=self.sample_rate),
),
"metrics_layer": QueryFramework(
orderby=[],
having=[],
functions=self.metrics_layer_functions,
entity=Entity(f"{prefix}metrics_distributions", sample=self.sample_rate),
),
# Percentiles are a part of distributions but they're expensive, treat them as their own entity so we'll run
# a query with the cheap distributions first then only get page_size quantiles
"percentiles": QueryFramework(
orderby=[],
having=[],
functions=self.percentiles,
entity=Entity(f"{prefix}metrics_distributions", sample=self.sample_rate),
),
}
return query_framework
def _create_query_framework(self) -> tuple[str, dict[str, QueryFramework]]:
query_framework = self._get_base_query_framework()
primary = None
# if orderby spans more than one table, the query isn't possible with metrics
for orderby in self.orderby:
for entity, framework in query_framework.items():
# Metrics layer can't have aliases in the functions for some reason
if self.use_metrics_layer:
framework_functions = [
function.exp if isinstance(function, AliasedExpression) else function
for function in framework.functions
]
else:
framework_functions = framework.functions
if orderby.exp in framework_functions:
framework.orderby.append(orderby)
if primary not in [None, entity]:
raise IncompatibleMetricsQuery("Can't order across tables")
primary = entity
break
else:
# An orderby that isn't on a function add it to all of them
for framework in query_framework.values():
framework.orderby.append(orderby)
having_entity: str | None = None
for condition in self.flattened_having:
for entity, framework in query_framework.items():
if condition.lhs in framework.functions:
if having_entity is None:
having_entity = entity
elif having_entity != entity:
raise IncompatibleMetricsQuery(
"Can only have aggregate conditions on one entity"
)
break
if primary is not None and having_entity is not None and having_entity != primary:
raise IncompatibleMetricsQuery(
"Can't use a having condition on non primary distribution"
)
# Pick one arbitrarily, there's no orderby on functions
if primary is None:
if having_entity is not None:
primary = having_entity
else:
for entity, framework in query_framework.items():
if len(framework.functions) > 0:
primary = entity
break
else:
raise IncompatibleMetricsQuery("Need at least one function")
query_framework[primary].having = self.having
return primary, query_framework
def convert_metric_layer_result(self, metrics_data_list: Any) -> Any:
"""The metric_layer returns results in a non-standard format, this function changes it back to the expected
one"""
seen_metrics_metas = {}
seen_total_keys = set()
with sentry_sdk.start_span(op="metric_layer", name="transform_results"):
metric_layer_result: Any = {
"data": [],
"meta": [],
}
for metrics_data in metrics_data_list:
for meta in metrics_data["meta"]:
if meta["name"] not in seen_metrics_metas:
seen_metrics_metas[meta["name"]] = True
metric_layer_result["meta"].append(meta)
for group in metrics_data["groups"]:
data = group["by"]
data.update(group["totals"])
seen_total_keys.update(group["totals"].keys())
metric_layer_result["data"].append(data)
for meta in metric_layer_result["meta"]:
if data.get(meta["name"]) is None:
data[meta["name"]] = self.get_default_value(meta["type"])
for item in metric_layer_result["data"]:
for total_key in seen_total_keys:
if total_key not in item:
item[total_key] = 0.0 # TODO: Check if these are all Float64
return metric_layer_result
def use_case_id_from_metrics_query(self, metrics_query: DeprecatingMetricsQuery) -> UseCaseID:
"""
Extracts the use case from the `MetricsQuery` which has to be executed in the metrics layer.
This function could be moved entirely in the `MetricsQuery` object but the metrics layer wasn't designed to
infer the use case id but rather it expects to have it specified from the outside.
Note that this is an alternative way to compute the use case id, which overrides the `use_case_id()` method
which is used for non metrics layer queries.
"""
use_case_ids = set()
for field in metrics_query.select:
mri = field.metric_mri
if mri:
use_case_ids.add(extract_use_case_id(mri=mri))
if len(use_case_ids) == 0:
raise IncompatibleMetricsQuery(
"Unable to infer the use case id from the supplied metrics."
)
elif len(use_case_ids) > 1:
raise IncompatibleMetricsQuery(
"You can only query metrics belonging to the same use case id."
)
return use_case_ids.pop()
def resolve_ondemand_orderby(self) -> Any:
"""Ondemand needs to resolve their orderby separately than how any other QB system does it
- Functions are resolved in self._on_demand_metric_spec_map so we need to get those back and throw 'em into
the orderby.
- This is problematic though, because for historical reasons (ie. we used to do it and we've kept it
instead of introducing additional risk by removing it) orderbys in the QB and MetricLayer both verify
that the orderby is in the selected fields
- This is why we pass skip_orderby_validation to the MetricsQuery
"""
result = []
raw_orderby = self.raw_orderby
if not raw_orderby:
return []
if isinstance(self.raw_orderby, str):
raw_orderby = [self.raw_orderby]
# While technically feasible to order by multiple fields, we would need to know which table each orderby is
# going to. Leaving that out for now to keep this simple since we don't allow more than one in the UI anyways
if len(raw_orderby) > 1:
raise IncompatibleMetricsQuery("Can't orderby more than one field")
for orderby in raw_orderby:
direction = Direction.DESC if orderby.startswith("-") else Direction.ASC
bare_orderby = orderby.lstrip("-")
if bare_orderby in self._on_demand_metric_spec_map:
spec = self._on_demand_metric_spec_map[bare_orderby]
result.append(
MetricOrderByField(
field=self.convert_spec_to_metric_field(spec),
direction=direction,
)
)
else:
raise IncompatibleMetricsQuery(
f"Cannot orderby {bare_orderby}, likely because its a tag"
)
return result
def run_query(
self, referrer: str, use_cache: bool = False, query_source: QuerySource | None = None
) -> Any:
groupbys = self.groupby
if not groupbys and self.use_on_demand:
# Need this otherwise top_events returns only 1 item
groupbys = [self.resolve_column(col) for col in self._get_group_bys()]
# Later the query is made by passing these columns to metrics layer so we can just have the aliases be the
# raw groupbys
groupby_aliases = self._get_group_bys()
else:
groupby_aliases = [
(
groupby.alias
if isinstance(groupby, (AliasedExpression, CurriedFunction))
else groupby.name
)
for groupby in groupbys
if not (
isinstance(groupby, CurriedFunction)
and groupby.function == "team_key_transaction"
)
]
# The typing for these are weak (all using Any) since the results from snuba can contain an assortment of types
value_map: dict[str, Any] = defaultdict(dict)
groupby_values: list[Any] = []
meta_dict = {}
result: Any = {
"data": None,
"meta": [],
}
# Check if we need to make multiple queries
if not self.use_on_demand:
primary, query_framework = self._create_query_framework()
else:
primary = "metrics_layer"
query_framework = {
primary: QueryFramework(
orderby=[],
having=[],
functions=self.metrics_layer_functions,
entity=Entity("generic_metrics_distributions", sample=self.sample_rate),
)
}
self.tenant_ids = self.tenant_ids or dict()
self.tenant_ids["use_case_id"] = self.use_case_id.value
if self.use_metrics_layer or self.use_on_demand:
from sentry.snuba.metrics.datasource import get_series
from sentry.snuba.metrics.mqb_query_transformer import (
transform_mqb_query_to_metrics_query,
)
for query_details in [query_framework.pop(primary), *query_framework.values()]:
if len(query_details.functions) == 0 and not self.use_on_demand:
continue
if groupby_values:
extra_conditions = [
Condition(
# Tuples are allowed to have multiple types in clickhouse
Function(
"tuple",
[
(
groupby.exp
if isinstance(groupby, AliasedExpression)
else groupby
)
for groupby in self.groupby
if not (
isinstance(groupby, CurriedFunction)
and groupby.function == "team_key_transaction"
)
],
),
Op.IN,
Function("tuple", groupby_values),
)
]
else:
extra_conditions = None
try:
metrics_queries = []
with sentry_sdk.start_span(op="metric_layer", name="transform_query"):
if self.use_on_demand:
aggregates = self._get_aggregates()
group_bys = self._get_group_bys()
for agg in aggregates:
spec = self._on_demand_metric_spec_map[agg]
metrics_queries.append(
self._get_metrics_query_from_on_demand_spec(
spec=spec,
require_time_range=True,
groupby=[MetricGroupByField(field=c) for c in group_bys],
orderby=self.resolve_ondemand_orderby(),
)
)
else:
metrics_queries.append(
transform_mqb_query_to_metrics_query(
self.get_metrics_layer_snql_query(
query_details, extra_conditions
),
isinstance(self, AlertMetricsQueryBuilder),
)
)
metrics_data = []
for metrics_query in metrics_queries:
with sentry_sdk.start_span(op="metric_layer", name="run_query"):
metrics_data.append(
get_series(
projects=self.params.projects,
metrics_query=metrics_query,
use_case_id=self.use_case_id_from_metrics_query(metrics_query),
include_meta=True,
tenant_ids=self.tenant_ids,
)
)
except Exception as err:
raise IncompatibleMetricsQuery(err)
with sentry_sdk.start_span(op="metric_layer", name="transform_results"):
metric_layer_result = self.convert_metric_layer_result(metrics_data)
for row in metric_layer_result["data"]:
# Arrays in clickhouse cannot contain multiple types, and since groupby values
# can contain any type, we must use tuples instead
groupby_key = tuple(row[key] for key in groupby_aliases)
value_map_key = ",".join(str(value) for value in groupby_key)
# First time we're seeing this value, add it to the values we're going to filter by
if value_map_key not in value_map and groupby_key:
groupby_values.append(groupby_key)
value_map[value_map_key].update(row)
for meta in metric_layer_result["meta"]:
meta_dict[meta["name"]] = meta["type"]
else:
self.validate_having_clause()
# TODO: this should happen regardless of whether the metrics_layer is being used
granularity_condition, new_granularity = self.resolve_split_granularity()
self.granularity = new_granularity
self.where += granularity_condition
# We need to run the same logic on all 3 queries, since the `primary` query could come back with no results. The
# goal is to get n=limit results from one query, then use those n results to create a condition for the
# remaining queries. This is so that we can respect function orderbys from the first query, but also so we don't
# get 50 different results from each entity
for query_details in [query_framework.pop(primary), *query_framework.values()]:
# Only run the query if there's at least one function, can't query without metrics
if len(query_details.functions) == 0:
continue
select = [
column
for column in self.columns
if column in query_details.functions or column not in self.aggregates
]
if groupby_values:
# We already got the groupby values we want, add them to the conditions to limit our results so we
# can get the aggregates for the same values
where = self.where + [
Condition(
# Tuples are allowed to have multiple types in clickhouse
Function(
"tuple",
[
(
groupby.exp
if isinstance(groupby, AliasedExpression)
else groupby
)
for groupby in self.groupby
],
),
Op.IN,
Function("tuple", groupby_values),
)
]
# Because we've added a condition for each groupby value we don't want an offset here
offset = Offset(0)
referrer_suffix = "secondary"
else:
# We don't have our groupby values yet, this means this is the query where we're getting them
where = self.where
offset = self.offset
referrer_suffix = "primary"
query = Query(
match=query_details.entity,
select=select,
array_join=self.array_join,
where=where,
having=query_details.having,
groupby=self.groupby,
orderby=query_details.orderby,
limit=self.limit,
offset=offset,
limitby=self.limitby,
granularity=self.granularity,
)
request = Request(
dataset=self.dataset.value,
app_id="default",
query=query,
flags=Flags(turbo=self.turbo),
tenant_ids=self.tenant_ids,
)
current_result = raw_snql_query(
request=request,
referrer=f"{referrer}.{referrer_suffix}",
query_source=query_source,
use_cache=use_cache,
)
for meta in current_result["meta"]:
meta_dict[meta["name"]] = meta["type"]
for row in current_result["data"]:
# Arrays in clickhouse cannot contain multiple types, and since groupby values
# can contain any type, we must use tuples instead
groupby_key = tuple()
value_map_strings = []
for key in groupby_aliases:
value = row[key]
if meta_dict.get(key) == "DateTime":
value = datetime.fromisoformat(value).replace(tzinfo=None)
groupby_key += (str(value),)
else:
groupby_key += (value,)
value_map_strings.append(str(value))
value_map_key = ",".join(value_map_strings)
# First time we're seeing this value, add it to the values we're going to filter by
if value_map_key not in value_map and groupby_key:
groupby_values.append(groupby_key)
value_map[value_map_key].update(row)
result["data"] = list(value_map.values())
result["meta"] = [{"name": key, "type": value} for key, value in meta_dict.items()]
# Nullable columns won't be in the meta
if self._has_nullable:
for function in self.aggregates:
if function.alias not in [meta["name"] for meta in result["meta"]]:
result["meta"].append({"name": function.alias, "type": "Nullable"})
# Data might be missing for fields after merging the requests, eg a transaction with no users
for row in result["data"]:
for meta in result["meta"]:
if meta["name"] not in row:
row[meta["name"]] = self.get_default_value(meta["type"])
return result
@staticmethod
def get_default_value(meta_type: str) -> Any:
"""Given a meta type return the expected default type
for example with a UInt64 (like a count_unique) return 0
"""
if (
meta_type.startswith("Int")
or meta_type.startswith("UInt")
or meta_type.startswith("Float")
):
return 0
else:
return None
def _get_metric_prefix(
self, snql_function: fields.MetricsFunction, column: str | None
) -> str | None:
if (
column is None
or self.use_metrics_layer
or self.use_case_id
not in {
UseCaseID.SPANS,
UseCaseID.TRANSACTIONS,
}
):
return None
prefix_to_function_map = {
"d": snql_function.snql_distribution,
"s": snql_function.snql_set,
"c": snql_function.snql_counter,
"g": snql_function.snql_gauge,
}
metrics_map = {
UseCaseID.SPANS: constants.SPAN_METRICS_MAP,
UseCaseID.TRANSACTIONS: constants.METRICS_MAP,
}
primary_metric = metrics_map[self.use_case_id].get(column, column)
# Custom measurements are prefixed with "measurements." and always map to distributions
prefix = "d" if primary_metric.startswith("measurements.") else primary_metric.split(":")[0]
# Return early and allow default behaviour if the column isn't
# a metric (in the case of any() functions) or if the prefix
# doesn't have a function mapping defined
if (
not is_mri(primary_metric)
and not primary_metric.startswith("measurements.")
or prefix not in prefix_to_function_map
):
return None
if prefix_to_function_map.get(prefix) is None:
raise IncompatibleMetricsQuery(
"The functions provided do not match the requested metric type"
)
return prefix
| MetricsQueryBuilder |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_validation.py | {
"start": 4840,
"end": 5012
} | class ____(TicketingActionValidatorHandler):
provider = Action.Type.AZURE_DEVOPS
@action_validator_registry.register(Action.Type.GITHUB)
| AzureDevOpsActionValidatorHandler |
python | huggingface__transformers | src/transformers/models/cohere2/modular_cohere2.py | {
"start": 15775,
"end": 15856
} | class ____(CoherePreTrainedModel):
config: Cohere2Config
| Cohere2PreTrainedModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol11.py | {
"start": 380,
"end": 466
} | class ____(Generic[_TBase1]):
def __iter__(self):
return self
| SourceProvider |
python | sqlalchemy__sqlalchemy | test/sql/test_returning.py | {
"start": 9388,
"end": 17798
} | class ____(fixtures.TablesTest, AssertsExecutionResults):
__requires__ = ("insert_returning",)
__sparse_driver_backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(cls, metadata):
class GoofyType(TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
if value is None:
return None
return "FOO" + value
def process_result_value(self, value, dialect):
if value is None:
return None
return value + "BAR"
cls.GoofyType = GoofyType
Table(
"returning_tbl",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("persons", Integer),
Column("full", Boolean),
Column("goofy", GoofyType(50)),
Column("strval", String(50)),
)
def test_column_targeting(self, connection):
table = self.tables.returning_tbl
result = connection.execute(
table.insert().returning(table.c.id, table.c.full),
{"persons": 1, "full": False},
)
row = result.first()._mapping
assert row[table.c.id] == row["id"] == 1
assert row[table.c.full] == row["full"]
assert row["full"] is False
result = connection.execute(
table.insert()
.values(persons=5, full=True, goofy="somegoofy")
.returning(table.c.persons, table.c.full, table.c.goofy)
)
row = result.first()._mapping
assert row[table.c.persons] == row["persons"] == 5
assert row[table.c.full] == row["full"]
eq_(row[table.c.goofy], row["goofy"])
eq_(row["goofy"], "FOOsomegoofyBAR")
def test_labeling(self, connection):
table = self.tables.returning_tbl
result = connection.execute(
table.insert()
.values(persons=6)
.returning(table.c.persons.label("lala"))
)
row = result.first()._mapping
assert row["lala"] == 6
def test_anon_expressions(self, connection):
table = self.tables.returning_tbl
GoofyType = self.GoofyType
result = connection.execute(
table.insert()
.values(goofy="someOTHERgoofy")
.returning(func.lower(table.c.goofy, type_=GoofyType))
)
row = result.first()
eq_(row[0], "foosomeothergoofyBAR")
result = connection.execute(
table.insert().values(persons=12).returning(table.c.persons + 18)
)
row = result.first()
eq_(row[0], 30)
@testing.combinations(
(lambda table: (table.c.strval + "hi",), ("str1hi",)),
(
lambda table: (
table.c.persons,
table.c.full,
table.c.strval + "hi",
),
(
5,
False,
"str1hi",
),
),
(
lambda table: (
table.c.persons,
table.c.strval + "hi",
table.c.full,
),
(5, "str1hi", False),
),
(
lambda table: (
table.c.strval + "hi",
table.c.persons,
table.c.full,
),
("str1hi", 5, False),
),
argnames="testcase, expected_row",
)
def test_insert_returning_w_expression(
self, connection, testcase, expected_row
):
table = self.tables.returning_tbl
exprs = testing.resolve_lambda(testcase, table=table)
result = connection.execute(
table.insert().returning(*exprs),
{"persons": 5, "full": False, "strval": "str1"},
)
eq_(result.fetchall(), [expected_row])
result2 = connection.execute(
select(table.c.id, table.c.strval).order_by(table.c.id)
)
eq_(result2.fetchall(), [(1, "str1")])
def test_insert_explicit_pk_col(self, connection):
table = self.tables.returning_tbl
result = connection.execute(
table.insert().returning(table.c.id, table.c.strval),
{"id": 1, "strval": "str1"},
)
eq_(
result.fetchall(),
[
(
1,
"str1",
)
],
)
def test_insert_returning_w_type_coerce_expression(self, connection):
table = self.tables.returning_tbl
result = connection.execute(
table.insert().returning(type_coerce(table.c.goofy, String)),
{"persons": 5, "goofy": "somegoofy"},
)
eq_(result.fetchall(), [("FOOsomegoofy",)])
result2 = connection.execute(
select(table.c.id, table.c.goofy).order_by(table.c.id)
)
eq_(result2.fetchall(), [(1, "FOOsomegoofyBAR")])
def test_no_ipk_on_returning(self, connection, close_result_when_finished):
table = self.tables.returning_tbl
result = connection.execute(
table.insert().returning(table.c.id), {"persons": 1, "full": False}
)
close_result_when_finished(result)
assert_raises_message(
sa_exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
def test_insert_returning(self, connection):
table = self.tables.returning_tbl
result = connection.execute(
table.insert().returning(table.c.id), {"persons": 1, "full": False}
)
eq_(result.fetchall(), [(1,)])
@testing.requires.multivalues_inserts
def test_multivalues_insert_returning(self, connection):
table = self.tables.returning_tbl
ins = (
table.insert()
.returning(table.c.id, table.c.persons)
.values(
[
{"persons": 1, "full": False},
{"persons": 2, "full": True},
{"persons": 3, "full": False},
]
)
)
result = connection.execute(ins)
eq_(result.fetchall(), [(1, 1), (2, 2), (3, 3)])
@testing.fixture
def column_expression_fixture(self, metadata, connection):
class MyString(TypeDecorator):
cache_ok = True
impl = String(50)
def column_expression(self, column):
return func.lower(column)
t1 = Table(
"some_table",
metadata,
Column("name", String(50)),
Column("value", MyString(50)),
)
metadata.create_all(connection)
return t1
@testing.combinations("columns", "table", argnames="use_columns")
def test_plain_returning_column_expression(
self, column_expression_fixture, use_columns, connection
):
"""test #8770"""
table1 = column_expression_fixture
if use_columns == "columns":
stmt = (
insert(table1)
.values(name="n1", value="ValUE1")
.returning(table1)
)
else:
stmt = (
insert(table1)
.values(name="n1", value="ValUE1")
.returning(table1.c.name, table1.c.value)
)
result = connection.execute(stmt)
row = result.first()
eq_(row._mapping["name"], "n1")
eq_(row._mapping["value"], "value1")
@testing.fails_on_everything_except(
"postgresql", "mariadb>=10.5", "sqlite>=3.34"
)
def test_literal_returning(self, connection):
if testing.against("mariadb"):
quote = "`"
else:
quote = '"'
if testing.against("postgresql"):
literal_true = "true"
else:
literal_true = "1"
result4 = connection.exec_driver_sql(
"insert into returning_tbl (id, persons, %sfull%s) "
"values (5, 10, %s) returning persons"
% (quote, quote, literal_true)
)
eq_([dict(row._mapping) for row in result4], [{"persons": 10}])
| InsertReturningTest |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 29766,
"end": 30758
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[8]", L_y_: "f32[8]"):
l_x_ = L_x_
l_y_ = L_y_
subgraph_0 = self.subgraph_0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = None
a: "f32[8]" = invoke_subgraph[0]; invoke_subgraph = None
subgraph_1 = self.subgraph_0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_0', a, l_y_); subgraph_1 = a = l_y_ = None
getitem_1: "f32[8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
return (getitem_1,)
class subgraph_0(torch.nn.Module):
def forward(self, l_x_: "f32[8]", l_y_: "f32[8]"):
mul: "f32[8]" = torch.mul(l_x_, l_y_); l_x_ = l_y_ = None
return (mul,)
""",
)
self.assertExpectedInline(
normalize_gm(backend.fw_graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/marker/_line.py | {
"start": 233,
"end": 20147
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.marker"
_path_str = "scatter3d.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color` is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scatter3d.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color` is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | PyCQA__pylint | tests/functional/f/function_redefined.py | {
"start": 2025,
"end": 2664
} | class ____:
"""ABC"""
# We actually *redefine* these attributes, but these shouldn't
# be considered actual redefinitions. Issue #2451
@property
def __module__(self):
return "actual.module"
@property
def __doc__(self):
return "Docstring"
# Do not emit the error for conditional definitions
def func(callback1=None, callback2=None):
if not callback1:
def callback1():
return 42
if callback2 is None:
def callback2():
return 24
return callback1(), callback2()
do_something: Callable[[], int]
def do_something() -> int:
return 1
| ObjectProxy |
python | walkccc__LeetCode | solutions/912. Sort an Array/912-3.py | {
"start": 0,
"end": 772
} | class ____:
def sortArray(self, nums: list[int]) -> list[int]:
self._quickSort(nums, 0, len(nums) - 1)
return nums
def _quickSort(self, nums: list[int], l: int, r: int) -> None:
if l >= r:
return
def partition(nums: list[int], l: int, r: int) -> int:
randIndex = random.randint(0, r - l) + l
nums[randIndex], nums[r] = nums[r], nums[randIndex]
pivot = nums[r]
nextSwapped = l
for i in range(l, r):
if nums[i] <= pivot:
nums[nextSwapped], nums[i] = nums[i], nums[nextSwapped]
nextSwapped += 1
nums[nextSwapped], nums[r] = nums[r], nums[nextSwapped]
return nextSwapped
m = partition(nums, l, r)
self._quickSort(nums, l, m - 1)
self._quickSort(nums, m + 1, r)
| Solution |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_utils.py | {
"start": 4182,
"end": 6460
} | class ____:
# run_id_creator is added via the `user_defined_filters`
RUN_ID = "%s-{{ [run_id, dag_run.dag_id] | run_id_creator }}" % RUN_ID_PREFIX
PARAMETERS = "{{ params | json_dump }}"
STEPNAME = "{{ ti.task_id }}"
# AIRFLOW_MACROS.TASK_ID will work for linear/branched workflows.
# ti.task_id is the stepname in metaflow code.
# AIRFLOW_MACROS.TASK_ID uses a jinja filter called `task_id_creator` which helps
# concatenate the string using a `/`. Since run-id will keep changing and stepname will be
# the same task id will change. Since airflow doesn't encourage dynamic rewriting of dags
# we can rename steps in a foreach with indexes (eg. `stepname-$index`) to create those steps.
# Hence : `foreach`s will require some special form of plumbing.
# https://stackoverflow.com/questions/62962386/can-an-airflow-task-dynamically-generate-a-dag-at-runtime
TASK_ID = (
"%s-{{ [run_id, ti.task_id, dag_run.dag_id] | task_id_creator }}"
% RUN_ID_PREFIX
)
FOREACH_TASK_ID = (
"%s-{{ [run_id, ti.task_id, dag_run.dag_id, ti.map_index] | task_id_creator }}"
% RUN_ID_PREFIX
)
# Airflow run_ids are of the form : "manual__2022-03-15T01:26:41.186781+00:00"
# Such run-ids break the `metaflow.util.decompress_list`; this is why we hash the runid
# We do `echo -n` because it emits line breaks, and we don't want to consider that, since we want same hash value
# when retrieved in python.
RUN_ID_SHELL = (
"%s-$(echo -n {{ run_id }}-{{ dag_run.dag_id }} | md5sum | awk '{print $1}' | awk '{print substr ($0, 0, %s)}')"
% (RUN_ID_PREFIX, str(RUN_HASH_ID_LEN))
)
ATTEMPT = "{{ task_instance.try_number - 1 }}"
AIRFLOW_RUN_ID = "{{ run_id }}"
AIRFLOW_JOB_ID = "{{ ti.job_id }}"
FOREACH_SPLIT_INDEX = "{{ ti.map_index }}"
@classmethod
def create_task_id(cls, is_foreach):
if is_foreach:
return cls.FOREACH_TASK_ID
else:
return cls.TASK_ID
@classmethod
def pathspec(cls, flowname, is_foreach=False):
return "%s/%s/%s/%s" % (
flowname,
cls.RUN_ID,
cls.STEPNAME,
cls.create_task_id(is_foreach),
)
| AIRFLOW_MACROS |
python | google__jax | tests/lax_control_flow_test.py | {
"start": 6146,
"end": 119108
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
lax_control_flow._initial_style_open_jaxpr.cache_clear()
lax_control_flow._initial_style_jaxpr.cache_clear()
lax_control_flow.common._dedup_consts.cache_clear()
lax_control_flow.common._pad_constvars.cache_clear()
def testCallableErrors(self):
not_callable = 42
with self.assertRaisesRegex(TypeError, "lax.fori_loop.*callable.*"):
lax.fori_loop(0, 1, not_callable, 0)
with self.assertRaisesRegex(TypeError, "lax.while_loop.*callable.*"):
lax.while_loop(not_callable, not_callable, 0)
with self.assertRaisesRegex(TypeError, "lax.switch:.*callable.*"):
lax.switch(0, [not_callable])
with self.assertRaisesRegex(TypeError, "lax.cond.*callable.*"):
lax.cond(0, not_callable, not_callable)
with self.assertRaisesRegex(TypeError, "lax.scan.*callable.*"):
lax.scan(not_callable, 0, 1)
with self.assertRaisesRegex(TypeError, "lax.associative_scan.*callable.*"):
lax.associative_scan(not_callable, 0)
def testWhileWithTuple(self):
limit = 10
def loop_cond(state):
pos, _ = state
return lax.lt(pos, limit)
def loop_body(state):
pos, count = state
return (lax.add(pos, 1), lax.add(count, 1))
def loop(init):
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = jax.jit(loop)
self.assertEqual(loop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(3), limit - 3)
def testWhileWithManyArgs(self):
nargs = 256
def loop_cond(state):
return lax.lt(state[0], 2)
def loop_body(state):
return tuple(lax.add(s, 1) for s in state)
_ = lax.while_loop(loop_cond, loop_body, (0,) * nargs)
def testNestedWhile(self):
def outer_loop(num): # pylint: disable=missing-docstring
def cond_fun(state):
num, i, _ = state
return lax.lt(i, num)
def body_fun(state):
num, i, count = state
return (num, lax.add(i, 1), inner_loop(i, count))
init_val = (num, 0, 0)
_, i, count = lax.while_loop(cond_fun, body_fun, init_val)
return (i, count)
def inner_loop(i, count): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, count = state
return (i, lax.add(j, 1), lax.add(count, 1))
init_val = (i, 0, count)
_, _, count = lax.while_loop(cond_fun, body_fun, init_val)
return count
cloop = jax.jit(outer_loop)
self.assertEqual(outer_loop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(2), (2, 3))
self.assertEqual(cloop(4), (4, 10))
def testWhileWithClosure(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
return (lax.add(pos, 1), lax.add(count, inc))
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = jax.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileWithClosureJit(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))
return jax.jit(f)(pos, inc)
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = jax.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileTypeErrors(self):
"""Test typing error messages for while."""
tuple_treedef = jax.tree.structure((1., 1.))
leaf_treedef = jax.tree.structure(0.)
with self.assertRaisesRegex(
TypeError,
re.escape(f"cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.")):
lax.while_loop(lambda c: (1., 1.), lambda c: c, 0.)
with self.assertRaisesRegex(
TypeError,
re.escape("cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].")):
lax.while_loop(lambda c: np.float32(1.), lambda c: c, np.float32(0.))
with self.assertRaisesRegex(
TypeError,
re.escape("while_loop body function carry input and carry output must "
"have the same pytree structure, but they differ:\n\n"
"The input carry c is a")):
lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.)
with self.assertRaisesRegex(
TypeError,
r"The input carry component c\[1\] has type float32\[\] but the "
r"corresponding output carry component has type bool\[\], so the "
"dtypes do not match."):
lax.while_loop(lambda c: True, lambda c: (True, True),
(np.bool_(True), np.float32(0.)))
def testWhileLoopCustomPytreeDiffAuxData(self):
class Node:
def __init__(self, x, y):
self.x = x
self.y = y
tree_util.register_pytree_with_keys(
Node,
lambda o: ((("x", o.x), ("y", o.y)), 'with_keys'), # flatten_with_keys
lambda _, xy: Node(xy[0], xy[1]), # unflatten (no key involved)
lambda o: ((o.x, o.y), 'without_keys'), # flatten
)
lax.while_loop(lambda o: o.x > 0., lambda c: Node(0., 0.), Node(1., 1.))
def testNestedWhileWithDynamicUpdateSlice(self):
num = 5
def update_entry(arr, val, i, j):
val = lax.reshape(val, [1, 1])
return lax.dynamic_update_slice(arr, val, (i, j))
def outer_loop(arr): # pylint: disable=missing-docstring
def cond_fun(state):
i, num, _, _ = state
return lax.lt(i, num)
def body_fun(state):
i, num, arr, out = state
return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))
out = np.zeros(arr.shape, dtype=arr.dtype)
init_val = (0, num, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
def inner_loop(i, arr, out): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, arr, out = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)
out = update_entry(out, arr_i_j, i, j)
return (i, lax.add(j, 1), arr, out)
init_val = (i, 0, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
cloop = jax.jit(outer_loop)
arr = self.rng().randn(5, 5)
self.assertAllClose(outer_loop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
def testLoopWithConjunctionCondition(self):
def sum_first_n(arr, num): # pylint: disable=missing-docstring
def cond_fun(state):
arr, num, i, _ = state
return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))
def body_fun(state):
arr, num, i, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, num, i + 1, total + arr_i)
init_val = (arr, num, 0, 0.)
_, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)
return total
cfun = jax.jit(sum_first_n)
x = self.rng().randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testWhileLoopBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x)
ans = jax.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = jax.jit(fun)
ans = jax.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopAxisIndexBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < lax.axis_index('i'), lambda x: x + 2, x)
ans = jax.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0], dtype='int32'))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = jax.jit(fun)
ans = jax.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0], dtype='int32'))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jax.vmap(lambda _, x: fun(x), axis_name='i', in_axes=(0, None))(
np.array([0, 0, 0, 0]), 0)
expected = np.array([0, 2, 2, 4], dtype='int32')
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBatchedWithConstBody(self):
def f(x):
def body_fn(_): return jnp.asarray(0., dtype=jnp.float32)
def cond_fn(_): return jnp.logical_not(False) == False
return jax.lax.while_loop(cond_fn, body_fn, x)
x = jnp.arange(5, dtype=jnp.float32)
self.assertAllClose(jax.vmap(f)(x), x)
def testWhileLoopCondConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < y, lambda x: x + 2, x)
ans = jax.vmap(fun, in_axes=(None, 0))(0, np.array([2, 3]))
expected = np.array([2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBodyConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < 3, lambda x: x + y, x)
ans = jax.vmap(fun, in_axes=(None, 0))(0, jnp.array([2, 3]))
expected = np.array([4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopTupleBatched(self):
def cond_fun(loop_carry):
x, y = loop_carry
return x + y < 5
def body_fun(loop_carry):
x, y = loop_carry
x = x + 1
return x, y
def fun(x, y):
return lax.while_loop(cond_fun, body_fun, (x, y))
ans = jax.vmap(fun)(np.array([0, 0]), np.array([1, 2]))
expected = (np.array([4, 3]), np.array([1, 2]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_issue_3204(self):
# Error during XLA code generation for vmap of nested loops
def test(a, b):
val = 0
i = 0
j = 0
condfun_1 = lambda inp: inp[1] < a + 1
condfun_2 = lambda inp: inp[2] < b + 1
def bodyfun_1(inp):
val, i, j = inp
j = 0
def bodyfun_2(inp):
val, i, j = inp
val += i + j
j += 1
return (val, i, j)
result = lax.while_loop(condfun_2, bodyfun_2, (val, i, j))
val = result[0]
i += 1
return (val, i, j)
result = lax.while_loop(condfun_1, bodyfun_1, (val, i, j))
return result[0]
arr = np.arange(5)
vmap_test = jax.vmap(test, (0, 0))
vmap_test(arr, arr)
def testForiLoopErrors(self):
"""Test typing error messages for fori_loop."""
with self.assertRaisesRegex(
TypeError, "arguments to fori_loop must have equal types"):
lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
def testForiLoopScalarLimits(self):
"""Test that scalar limits passed to fori_loop do not cause typing errors."""
body = lambda i, c: c + 1
init = jnp.float32(10)
result = lax.fori_loop(np.int16(0), 10, body, init)
self.assertEqual(result, init + 10)
result = lax.fori_loop(0, np.int16(10), body, init)
self.assertEqual(result, init + 10)
def test_fori_loop_supports_unrolling(self):
"""Test that we can unroll static fori_loops."""
body = lambda i, c: c + 1
init = jnp.float32(10)
result = lax.fori_loop(np.int16(0), 10, body, init,
unroll=3)
self.assertEqual(result, init + 10)
result = lax.fori_loop(0, np.int16(10), body, init,
unroll=2)
self.assertEqual(result, init + 10)
def test_fori_loop_supports_unrolling_with_bool(self):
"""Test that we can unroll static fori_loops."""
body = lambda i, c: c + 1
init = jnp.float32(10)
result = lax.fori_loop(np.int16(0), 10, body, init,
unroll=True)
self.assertEqual(result, init + 10)
result = lax.fori_loop(0, np.int16(10), body, init,
unroll=False)
self.assertEqual(result, init + 10)
def test_fori_loop_with_dynamic_indices_cannot_unroll(self):
"""Test that we can't unroll dynamic fori_loops."""
body = lambda i, c: c + 1
init = jnp.float32(10)
@jax.jit
def f(upper):
return lax.fori_loop(np.int16(0), upper, body, init,
unroll=3)
with self.assertRaisesRegex(ValueError, "Can only use `unroll`"):
f(10)
@parameterized.named_parameters(
{
"testcase_name": f"_{jit=}_{upper=}_{unroll=}",
"jit": jit,
"upper": upper,
"unroll": unroll,
}
for jit in (False, True)
for upper in (0, -1)
for unroll in (False, True)
)
def test_fori_loop_returns_init_with_nonpositive_length(
self, jit, upper, unroll
):
"""Test that `length <= 0` behaves like Python `range`."""
fori_loop_with_static_upper_and_lower = partial(
lax.fori_loop, 0, upper, lambda i, c: c + 1, unroll=unroll
)
if jit:
fori_loop_with_static_upper_and_lower = jax.jit(
fori_loop_with_static_upper_and_lower
)
init = jnp.float32(10)
self.assertEqual(fori_loop_with_static_upper_and_lower(init), init)
def testForiLoopBatched(self):
def body_fun(i, loop_carry):
x, y = loop_carry
x = x + 1
y = y + 2
return x, y
def fun(x):
return lax.fori_loop(0, 10, body_fun, (x, 0))
ans = jax.vmap(fun)(np.array([0, 1]))
expected = (np.array([10, 11]), np.array([20, 20]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopBatchedIssue1190(self):
cond_fun = lambda carry: carry[0] < 4
body_fun = lambda carry: (carry[0] + 1, carry[1] + 1)
f = lambda x: lax.while_loop(cond_fun, body_fun, (0, x))
jaxpr = jax.make_jaxpr(jax.vmap(f))(jnp.arange(3))
eqn = jaxpr.jaxpr.eqns[0]
self.assertIs(eqn.primitive, lax.while_p)
self.assertEqual(eqn.params['cond_jaxpr'].in_avals[0].shape, ())
def testForiLoopBasic(self):
def body_fun(i, tot):
return lax.add(tot, i)
def count(num):
return lax.fori_loop(0, num, body_fun, 0)
self.assertEqual(count(2), 1)
self.assertEqual(count(3), 3)
self.assertEqual(count(4), 6)
for args_maker in [lambda: [2], lambda: [3], lambda: [4]]:
self._CompileAndCheck(count, args_maker)
def testForiLoopClosure(self):
def count(num):
def body_fun(i, tot):
return lax.add(num, lax.add(tot, i))
return lax.fori_loop(0, num, body_fun, 0)
cfun = jax.jit(count)
self.assertEqual(count(2), 1 + 2**2)
self.assertEqual(count(2), cfun(2))
self.assertEqual(count(3), 3 + 3**2)
self.assertEqual(count(3), cfun(3))
self.assertEqual(count(4), 6 + 4**2)
self.assertEqual(count(4), cfun(4))
def testForiLoopTupleState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i))
init_val = (arr, arr.dtype.type(0))
_, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,
init_val)
return total
cfun = jax.jit(sum_first_n)
x = self.rng().randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopDictState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state['arr'], state['total']
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return {'arr': arr, 'total': lax.add(total, arr_i)}
init_val = {'arr': arr, 'total': arr.dtype.type(0)}
out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return out_val['total']
cfun = jax.jit(sum_first_n)
x = self.rng().randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopEmptyTupleInState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total, _ = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i), ())
init_val = (arr, arr.dtype.type(0), ())
_, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return tot
cfun = jax.jit(sum_first_n)
x = self.rng().randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopIssue8152(self):
y = lax.fori_loop(lower=0, upper=0, body_fun=lambda x, i: x + i, init_val=1.)
self.assertAllClose(y, 1., check_dtypes=False)
# trivial fori_loop should work - even when jit is disabled
with jax.disable_jit():
y = lax.fori_loop(lower=0, upper=0, body_fun=lambda x, i: x + i, init_val=1.)
self.assertAllClose(y, 1., check_dtypes=False)
# scan with length 0 should work with jit, but raise an error without
def should_raise_wo_jit():
carry, out = lax.scan(lambda c, x: (c + x, x), 0., np.array([]))
return carry
self.assertAllClose(should_raise_wo_jit(), 0., check_dtypes=False)
with jax.disable_jit():
self.assertRaises(ValueError, should_raise_wo_jit)
def testCond(self):
def fun(x):
if x < 3:
return (x, x)
else:
y = lax.mul(2, x)
return y, lax.mul(2, y)
@jax.jit
def cfun(x):
def false_fun(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
return lax.cond(lax.lt(x, 3), lambda x: (x, x), false_fun, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(0), (0, 0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(1), (1, 1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(2), (2, 2))
self.assertEqual(fun(3), cfun(3))
self.assertEqual(fun(3), (6, 12))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(fun(4), (8, 16))
def testCondPredIsNone(self):
# see https://github.com/jax-ml/jax/issues/11574
def f(pred, x):
return lax.cond(pred, lambda x: x + 1, lambda x: x + 2, x)
self.assertRaisesRegex(TypeError, "cond predicate is None",
lambda: f(None, 1.))
self.assertRaisesRegex(TypeError, "cond predicate is None",
lambda: jax.jit(f)(None, 1.))
def testCondTwoOperands(self):
# see https://github.com/jax-ml/jax/issues/8469
add, mul = lax.add, lax.mul
def fun(x):
return add(x, x) if x == 0 else mul(x, x)
def cfun(x):
return lax.cond(x == 0, add, mul, x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
cfun = jax.jit(cfun)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
def testCondThreeOperands(self):
add = lambda x, y, z: x + y + z
mul = lambda x, y, z: x * y * z
def fun(x):
return add(x, x, x) if x == 0 else mul(x, x, x)
def cfun(x):
return lax.cond(x == 0, add, mul, x, x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
cfun = jax.jit(cfun)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
def testCondCallableOperands(self):
# see https://github.com/jax-ml/jax/issues/16413
@tree_util.register_pytree_node_class
class Foo:
def __init__(self, x):
self.x = x
def __call__(self, *xs):
assert False
return xs
def tree_flatten(self):
return (self.x,), None
@classmethod
def tree_unflatten(cls, _, xs):
return cls(*xs)
f_00 = lambda a, b: a + b
f_01 = lambda a, b: a + b.x
f_10 = lambda a, b: a.x + b
f_11 = lambda a, b: a.x + b.x
# these don't raise
a = lax.cond(True, f_00, f_00, 3, 4)
b = lax.cond(True, f_01, f_01, 3, Foo(4))
c = lax.cond(True, f_10, f_10, Foo(3), 4)
d = lax.cond(True, f_11, f_11, Foo(3), Foo(4))
self.assertEqual(a, b)
self.assertEqual(a, c)
self.assertEqual(a, d)
def testSwitch(self):
def branch(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun(x):
if x <= 0:
return branches[0](x)
elif x == 1:
return branches[1](x)
else:
return branches[2](x)
def cfun(x):
return lax.switch(x, branches, x)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
cfun = jax.jit(cfun)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
def testSwitchMultiOperands(self):
branches = [lax.add, lax.mul]
def fun(x):
i = 0 if x <= 0 else 1
return branches[i](x, x)
def cfun(x):
return lax.switch(x, branches, x, x)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
cfun = jax.jit(cfun)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
def testSwitchResidualsMerge(self):
def get_conds(fun):
jaxpr = jax.make_jaxpr(jax.grad(fun))(0., 0)
return [eqn for eqn in jaxpr.jaxpr.eqns if eqn.primitive.name == 'cond']
def branch_invars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.invars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
def branch_outvars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.outvars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
branches1 = [
lambda x: jnp.sin(x),
lambda x: jnp.cos(x)] # branch residuals overlap, should be reused
branches2 = branches1 + [
lambda x: jnp.sinh(x)] # another overlapping residual, expect reuse
branches3 = branches2 + [
lambda x: jnp.sin(x) + jnp.cos(x)] # requires one more residual slot
def fun1(x, i):
return lax.switch(i + 1, branches1, x)
def fun2(x, i):
return lax.switch(i + 1, branches2, x)
def fun3(x, i):
return lax.switch(i + 1, branches3, x)
fwd1, bwd1 = get_conds(fun1)
fwd2, bwd2 = get_conds(fun2)
fwd3, bwd3 = get_conds(fun3)
fwd1_num_out = branch_outvars_len(fwd1)
fwd2_num_out = branch_outvars_len(fwd2)
fwd3_num_out = branch_outvars_len(fwd3)
assert fwd1_num_out == fwd2_num_out
assert fwd3_num_out == fwd2_num_out + 1
bwd1_num_in = branch_invars_len(bwd1)
bwd2_num_in = branch_invars_len(bwd2)
bwd3_num_in = branch_invars_len(bwd3)
assert bwd1_num_in == bwd2_num_in
assert bwd3_num_in == bwd2_num_in + 1
def testOneBranchSwitch(self):
branch = lambda x: -x
f = lambda i, x: lax.switch(i, [branch], x)
x = 7.
self.assertEqual(f(-1, x), branch(x))
self.assertEqual(f(0, x), branch(x))
self.assertEqual(f(1, x), branch(x))
cf = jax.jit(f)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
cf = jax.jit(f, static_argnums=0)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
def testIssue1379(self):
def fun(pred):
return lax.cond(pred, lambda x: (True, x), lambda x: (False, x), pred)
@jax.jit
def cfun(pred):
return fun(pred)
self.assertEqual(fun(0), cfun(0), (False,0))
self.assertEqual(fun(0.), cfun(0.), (False,0.))
self.assertEqual(fun(1), cfun(1), (True,1))
self.assertEqual(fun(1.), cfun(1.), (True,1.))
# test that proper errors are raised for wrong types
for pred in ["abc", [], [1,2]]:
for f in [fun, cfun]:
self.assertRaises(TypeError, f, pred)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testNestedCond(self, cond):
def fun(x):
if x < 2:
return lax.mul(2, x)
else:
if x < 5:
return lax.mul(3, x)
else:
return lax.mul(4, x)
@jax.jit
def cfun(x):
return cond(
lax.lt(x, 2),
lambda x: lax.mul(2, x),
lambda x: cond(lax.lt(x, 5),
x, lambda x: lax.mul(3, x),
4, lambda y: lax.mul(y, x)),
x)
self.assertEqual(cfun(1), 2)
self.assertEqual(cfun(3), 9)
self.assertEqual(cfun(6), 24)
self.assertEqual(cfun(1), fun(1))
self.assertEqual(cfun(3), fun(3))
self.assertEqual(cfun(6), fun(6))
def testCondTypeErrors(self):
"""Test typing error messages for cond."""
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got <function")):
lax.cond(lambda x: True, lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got foo of type <class 'str'>")):
lax.cond("foo", lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")):
lax.cond((1., 1.), lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(
TypeError,
re.compile(
r"cond branch outputs must have the same pytree structure, but they"
r" differ:.*true_fun output at path \['a'\] is a pytree leaf but"
r" false_fun output at path \['a'\] is a <class 'tuple'>",
re.DOTALL)):
lax.cond(True, lambda top: dict(a=2.), lambda fop: dict(a=(3., 3.)), 1.)
with self.assertRaisesRegex(
TypeError,
re.compile(
r"cond branches must have equal output types but they differ.*The"
r" output of true_fun has type float32\[1\] but the corresponding"
r" output of false_fun has type float32\[\], so the shapes do not"
r" match",
re.DOTALL)):
lax.cond(True,
lambda top: jnp.array([1.], jnp.float32),
lambda fop: jnp.float32(1.),
1.)
def testSwitchErrors(self):
"""Test typing error messages for switch."""
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got <function")):
lax.switch(lambda x: True, [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got foo.")):
lax.switch("foo", [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Branch index must be scalar, got (1.0, 1.0) of shape (2,).")):
lax.switch((1., 1.), [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(ValueError,
re.escape("Empty branch sequence")):
lax.switch(0, [], 1.)
with self.assertRaisesRegex(
TypeError,
re.compile(
"switch branch outputs must have the same pytree structure, but"
r" they differ.*branch 0 output at path \['a'\] is a pytree leaf"
r" but branch1 output at path \['a'\] is a <class 'tuple'>, so"
r" their"
" Python types differ.",
re.DOTALL)):
lax.switch(1, [lambda _: dict(a=2.), lambda _: dict(a=(3., 3.))], 1.)
with self.assertRaisesRegex(
TypeError,
re.compile(
"switch branches must have equal output types but they differ.*The"
r" output of branch 0 at path \['a'\] has type float32\[1\] but the"
r" corresponding output of branch1 has type float32\[\], so the"
" shapes do not match",
re.DOTALL)):
lax.switch(1, [lambda _: dict(a=jnp.array([1.], jnp.float32)),
lambda _: dict(a=jnp.float32(1.))],
1.)
def testCondOneBranchConstant(self):
def fun(x):
if x < 3:
return 5.
else:
return x
@jax.jit
def cfun(x):
return lax.cond(lax.lt(x, 3), lambda x: 5, lambda x: x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), 5)
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), 4)
def testCondOneBranchConstantTuple(self):
def fun(x):
if x < 3:
return (1., 2., 3.)
else:
return (x, 2., 4.)
@jax.jit
def cfun(x):
return lax.cond(lax.lt(x, 3),
lambda x: (1, 2., 3.),
lambda x: (x, 2., 4.),
x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), (1, 2., 3.))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), (4, 2., 4.))
def testCondBatched(self):
def fun(x, y, z):
pred = lax.lt(x, 3)
true_fun = lambda y: y
false_fun = lambda z: lax.neg(z)
return lax.cond(pred, y, true_fun, z, false_fun)
# these cases stay as cond
x = jnp.array(2)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
ans = jax.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(4)
ans = jax.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = jax.jit(fun)
ans = jax.vmap(fun, (None, 0, 0))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = jax.vmap(fun, (None, 0, None))(x, y, z)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (None, 0, None)))(x, y, z)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([2, 4])
ans = jax.vmap(fun, (0, 0, None))(x, y, z)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (0, 0, None)))(x, y, z)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
ans = jax.vmap(fun)(x, y, z)
jaxpr = jax.make_jaxpr(jax.vmap(fun))(x, y, z)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testSwitchBatched(self):
def fun(index, x, y, z):
branches = [lambda xyz: xyz[0],
lambda xyz: lax.neg(xyz[1]),
lambda xyz: lax.sign(xyz[2])]
return lax.switch(index, branches, (x, y, z))
# these cases stay as cond
x = jnp.array(0)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
w = jnp.array(9)
ans = jax.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(1)
ans = jax.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = jax.jit(fun)
ans = jax.vmap(fun, (None, 0, 0, None))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = jax.vmap(fun, (None, 0, None, None))(x, y, z, w)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (None, 0, None, None)))(x, y, z, w)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([0, 1])
ans = jax.vmap(fun, (0, 0, None, None))(x, y, z, w)
jaxpr = jax.make_jaxpr(jax.vmap(fun, (0, 0, None, None)))(x, y, z, w)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
w = jnp.array([9, 9])
ans = jax.vmap(fun)(x, y, z, w)
jaxpr = jax.make_jaxpr(jax.vmap(fun))(x, y, z, w)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testCondJVP(self):
def fun_ref(x):
if x < 3:
return (x, x)
else:
y = 2 * x
return y, 2 * y
def fun(x):
def false_fun(x):
y = 2 * x
return y, 2 * y
return lax.cond(x < 3, lambda x: (x, x), false_fun, x)
x = 3.14
ans = jax.jvp(fun, (x,), (x,))
expected = jax.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = jax.jvp(fun, (x,), (x,))
expected = jax.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testSwitchJVP(self):
def branch(x):
y = 2 * x
return y, 2 * y
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def fun(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = jax.jvp(fun, (x,), (x,))
expected = jax.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJVP2(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, None, lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = jax.jvp(fun, (x,), (x,))
expected = jax.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = jax.jvp(fun, (x,), (x,))
expected = jax.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testCondGrad(self):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
@jax.jit
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
x = 2.14
ans = jax.grad(f)(x)
expected = jax.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72
ans = jax.grad(f)(x)
expected = jax.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testCondGradVmapNan(self):
eps = 1e-3
def safe1(x):
return lax.cond(x < eps, lambda _: eps, lambda _: jnp.sqrt(x), ())
out = jax.grad(lambda x: jax.vmap(safe1)(x).sum())(np.zeros(10))
self.assertFalse(np.isnan(out).any())
def testSwitchGrad(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
@jax.jit
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = jax.grad(f)(x)
expected = jax.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
@parameterized.parameters(itertools.product(range(4), repeat=3))
@jtu.run_on_devices("cpu")
def testSwitchGradWithForwarding(self, seed, num_input_fwd, num_output_fwd):
num_args = 3
num_branches = 4
rng = np.random.RandomState(seed)
in_perm = rng.permutation(num_args)
out_perm = rng.permutation(num_args)
def branch(s, inputs):
inputs = [inputs[i] for i in in_perm]
outputs = inputs[:num_input_fwd] + [
s * jnp.exp(inputs[i]) if i < num_output_fwd else jnp.sin(inputs[i])
for i in range(num_args - num_input_fwd)]
return [outputs[i] for i in out_perm]
branches = [partial(branch, i) for i in range(num_branches)]
@jax.jit
def f_(idx, inputs):
idx = lax.convert_element_type(idx // 1, np.int32)
return lax.switch(idx, branches, inputs)
for idx in range(num_branches):
f = partial(f_, idx)
jtu.check_grads(f, (jnp.arange(float(num_args)),),
order=1, modes=['fwd', 'rev'], atol=1e-2, rtol=1e-2)
def testSwitchGradWithWeakTypeMismatch(self): # issue #4696, PR #4896
dtype = dtypes.default_float_dtype()
dtype = jnp.float32 if dtype == jnp.float32 else jnp.float64
branches = [
lambda x: x, # This preserves the weak type of x.
lambda x: x + dtype(1), # This strips the weak type of x.
]
def f_ref(x):
i = x.astype(jnp.int32)
return branches[i](x)
def f(x):
return lax.switch(x.astype(jnp.int32), branches, x)
for x in [0., 1.]:
ans = jax.grad(f)(x)
expected = jax.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad2(self, cond=cond_with_new_checkpoint):
def f_ref(x):
z = jnp.array([1., 2.], x.dtype) * x if x[0] < 2 else jnp.sin(x)
return z.sum()
def _f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.], x.dtype) * x,
lambda x: jnp.sin(x),
x)
f = lambda x: jax.jit(_f)(x).sum()
x = 2.14 * jnp.ones(2)
ans = jax.grad(f)(x)
expected = jax.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72 * jnp.ones(2)
ans = jax.grad(f)(x)
expected = jax.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"],
rtol={jnp.float32: 1e-2, jnp.float64: 2e-3})
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad3(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, None, lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = jax.grad(fun)(x)
expected = jax.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
x = 2.72
ans = jax.grad(fun)(x)
expected = jax.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad4(self, cond):
if cond is cond_with_new_checkpoint and jtu.test_device_matches(['tpu']):
raise unittest.SkipTest("tpu bug") # TODO(parkers): tpu bug exhibited here
def fun_ref(x, y):
if x < 3:
return 2. * jnp.sin(y)
else:
return 2. * jnp.cos(x)
@jax.jit
def fun(x, y):
return cond(
x < 3,
None, lambda _: 2. * jnp.sin(y),
x, lambda x: 2. * x)
y = 5.8
x = 3.14
ans = jax.grad(fun, 1)(x, y)
expected = jax.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
x = 2.72
ans = jax.grad(fun, 1)(x, y)
expected = jax.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
def testCondLinearize(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y, f_lin = jax.linearize(f, 1.)
self.assertAllClose(y, 3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = jax.linearize(f, 4.)
self.assertAllClose(y, jnp.sin(4.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(4.) * 2., check_dtypes=False)
def testSwitchLinearize(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
# branch 0
y, f_lin = jax.linearize(f, -1.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = jax.linearize(f, 0.)
self.assertAllClose(y, 0., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
# branch 1
y, f_lin = jax.linearize(f, 1.)
self.assertAllClose(y, jnp.sin(1.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(1.) * 2., check_dtypes=False)
# branch 2
y, f_lin = jax.linearize(f, 2.)
self.assertAllClose(y, -2., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
y, f_lin = jax.linearize(f, 3.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondLinearize2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.], x.dtype) * x if x[0] < 2 else jnp.cos(jnp.sin(x))
return z.sum()
def f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.], x.dtype) * x,
lambda x: jnp.cos(jnp.sin(x)),
x).sum()
x = 2.14 * jnp.ones(2)
y, f_lin = jax.linearize(f, x)
y_ref, f_lin_ref = jax.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
x = -2.14 * jnp.ones(2)
y, f_lin = jax.linearize(f, x)
y_ref, f_lin_ref = jax.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
f = jax.jit(f)
x = 2.14 * jnp.ones(2)
y, f_lin = jax.linearize(f, x)
y_ref, f_lin_ref = jax.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
def testCondJit(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y = jax.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = jax.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
def testSwitchJit(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-1., 0., 1., 2., 3.]:
y = jax.jit(f)(x)
expected = f(x)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitDisabled(self, cond):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
with jax.disable_jit():
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
with jax.disable_jit():
y = jax.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
def f_ref(x):
if x < 2:
return np.array([1., 2.]) * x
else:
return np.array([3., 4.]) * np.sin(x)
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = f(4.)
expected = f_ref(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
y = jax.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = jax.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondVmapGrad(self, cond):
# https://github.com/jax-ml/jax/issues/2264
def f_1(x): return x ** 2
def f_2(x): return x ** 3
def f(x): return cond(x > 0, f_1, f_2, x)
def g(x): return jnp.where(x > 0, f_1(x), f_2(x))
x = jnp.linspace(-1, 1, 20)
ans = jax.vmap(jax.grad(f))(x)
expected = jax.vmap(jax.grad(g))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@jax.legacy_prng_key('allow')
def testIssue1263(self):
def f(rng, x):
cond = random.bernoulli(rng)
return lax.cond(cond, x, lambda x: x, jnp.abs(x) - 1., lambda x: x)
def body_fn(i, state):
rng, x = state
key, subkey = random.split(rng)
return key, f(subkey, x)
def g(rng, x):
return lax.fori_loop(0, 10, body_fn, (rng, x))
jax.vmap(g)(random.split(random.PRNGKey(0), 3), jnp.ones((3, 4)))
def testIssue514(self):
# just check this doesn't crash
lax.cond(True,
(0, 0), lambda x: (x[0], 0),
(1, 1), lambda x: x)
def testIssue649(self):
from jax import lax
def body(x):
a, b = x
return (7, b + 1)
def cond(x):
a, b = x
return b < 10
out = lax.while_loop(cond, body, (33, 4))
self.assertEqual(out, (7, 10))
@parameterized.named_parameters(
{"testcase_name": f"_{jit_scan=}_{jit_f=}_impl={scan_name}",
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl,
"impl_name": scan_name}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def testScanImpl(self, jit_scan, jit_f, scan, impl_name):
rng = self.rng()
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = jax.jit(f)
if jit_scan:
scan = jax.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = scan(f, c, as_)
expected = scan_reference(f, c, as_)
rtol = {np.float64: 1.4e-15}
atol = {np.float64: 8e-15}
if impl_name == "for":
rtol[np.float32] = 8e-5
atol[np.float32] = 3e-5
self.assertAllClose(
ans,
expected,
check_dtypes=False,
rtol=rtol,
atol=atol)
@parameterized.named_parameters(
{"testcase_name": f"_{jit_scan=}_{jit_f=}_impl={scan_name}",
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def testScanJVP(self, jit_scan, jit_f, scan):
rng = self.rng()
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = jax.jit(f)
if jit_scan:
scan = jax.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = jax.jvp( lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))
expected = jax.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))
tol = {np.float64: 1e-12, np.float32: 1e-4}
self.assertAllClose(ans, expected, check_dtypes=False, rtol=tol, atol=tol)
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["fwd"],
rtol={jnp.float32: 2e-1})
@parameterized.named_parameters(
{"testcase_name": f"_{jit_scan=}_{jit_f=}_impl={scan_name}",
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def testScanLinearize(self, jit_scan, jit_f, scan):
rng = self.rng()
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = jax.jit(f)
if jit_scan:
scan = jax.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
if scan is scan_with_new_checkpoint2:
atol = {}
rtol = {np.float64: 1e-12, np.float32: 1e-4}
else:
atol = {np.float64: 1e-14}
rtol = {np.float64: 1e-14, np.float32: 1e-4}
ans = jax.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)
expected = jax.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)
self.assertAllClose(
ans, expected, check_dtypes=False, atol=atol, rtol=rtol
)
@parameterized.named_parameters(
{"testcase_name": f"_{jit_scan=}_{jit_f=}_impl={scan_name}",
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanGrad(self, jit_scan, jit_f, scan):
rng = self.rng()
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.sum(jnp.sin(a)) + jnp.sum(jnp.sin(c)) + jnp.sum(jnp.sin(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if scan is scan_with_new_checkpoint:
rtol = {np.float32: 5e-5, np.float64: 1e-13}
atol = 1e-5
else:
rtol = {np.float32: 2e-4, np.float64: 1e-13}
atol = {np.float32: 8e-5, np.float64: 1e-13}
if jit_f:
f = jax.jit(f)
if jit_scan:
scan = jax.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = jax.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)
expected = jax.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False, rtol=rtol, atol=atol)
rtol = 5e-1 if scan is not scan_with_new_checkpoint2 else 5e-2
atol = 5e-2 if jtu.test_device_matches(["tpu"]) else 1e-3
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["rev"],
atol=atol, rtol=rtol)
@jtu.skip_on_devices("tpu") # TPU lacks precision for this test.
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanRnn(self):
r = self.rng()
n_in = 4
n_hid = 2
n_out = 1
length = 3
W_trans = r.randn(n_hid, n_hid + n_in).astype(jnp.float_)
W_out = r.randn(n_out, n_hid + n_in).astype(jnp.float_)
params = W_trans, W_out
inputs = r.randn(length, n_in).astype(jnp.float_)
targets = r.randn(length, n_out).astype(jnp.float_)
def step(params, state, input):
W_trans, W_out = params
stacked = jnp.concatenate([state, input])
output = jnp.tanh(jnp.dot(W_out, stacked))
next_state = jnp.tanh(jnp.dot(W_trans, stacked))
return next_state, output
def rnn(params, inputs):
init_state = jnp.zeros(n_hid)
_, outputs = lax.scan(partial(step, params), init_state, inputs)
return outputs
@jax.jit
def loss(params, inputs, targets):
predictions = rnn(params, inputs)
return jnp.sum((predictions - targets)**2)
# evaluation doesn't crash
loss(params, inputs, targets)
# jvp evaluation doesn't crash
jax.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))
# jvp numerical check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, modes=["fwd"],
rtol={np.float32: 2e-2, np.float64: 1e-6})
# linearize works
_, expected = jax.jvp(loss, (params, inputs, targets),
(params, inputs, targets))
_, linfun = jax.linearize(loss, params, inputs, targets)
ans = linfun(params, inputs, targets)
self.assertAllClose(ans, expected, check_dtypes=False)
# gradient evaluation doesn't crash
jax.grad(loss)(params, inputs, targets)
# gradient check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, rtol=2e-2)
# we can vmap to batch things
batch_size = 7
batched_inputs = r.randn(batch_size, length, n_in).astype(jnp.float_)
batched_targets = r.randn(batch_size, length, n_out).astype(jnp.float_)
batched_loss = jax.vmap(lambda x, y: loss(params, x, y))
losses = batched_loss(batched_inputs, batched_targets)
expected = np.stack(list(map(lambda x, y: loss(params, x, y),
batched_inputs, batched_targets)))
self.assertAllClose(losses, expected, check_dtypes=False, rtol=1e-2)
@parameterized.named_parameters(
{"testcase_name": f"_impl={scan_name}", "scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def testIssue711(self, scan):
# Tests reverse-mode differentiation through a scan for which the scanned
# function also involves reverse-mode differentiation.
# See https://github.com/jax-ml/jax/issues/711
def harmonic_bond(conf, params):
return jnp.sum(conf * params)
def minimize_structure(test_params):
energy_fn = partial(harmonic_bond, params=test_params)
def apply_carry(carry, _):
i, x = carry
new_x = x - 0.1 * jax.grad(energy_fn)(x)
new_carry = (i+1, new_x)
return new_carry, _
x0 = jnp.array([1., 2., 3.])
carry_final, _ = scan(apply_carry, (0, x0), jnp.zeros((75, 0)))
_, x_final = carry_final
return x_final
initial_params = 0.5
minimize_structure(initial_params) # doesn't crash
def loss(test_params):
x_final = minimize_structure(test_params)
return jnp.sum(jnp.sin(1.0 - x_final))
jax.grad(loss)(0.25) # doesn't crash
def testIssue744(self):
Point = collections.namedtuple('Point', ['x', 'y'])
p0 = Point(x=jnp.array(1), y=jnp.array(2))
def plus_one(p, iter_idx):
return Point(p.x+1, p.y+1), iter_idx
self.assertRaisesRegex(
ValueError,
'scan got value with no leading axis to scan over.*',
lambda: lax.scan(plus_one, p0, list(range(5))))
def testScanBodyOutputError(self):
with self.assertRaisesRegex(
TypeError,
re.escape("scan body output must be a pair, got float32[].")):
lax.scan(lambda c, x: np.float32(0.), 0, jnp.arange(5.))
def testScanMetadataError(self):
# Regression test for https://github.com/jax-ml/jax/issues/25507
def f(loop_i, x):
return {'T': jnp.array([0.5])}
init_val = {'t': jnp.array([1.0])}
msg = r".*with pytree metadata \('t',\).*with pytree metadata \('T',\)"
with self.assertRaisesRegex(TypeError, msg):
jax.lax.fori_loop(0, 1, f, init_val)
def testScanBodyCarryPytreeMismatchErrors(self):
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have "
"the same pytree structure, but they differ:\n\n"
"The input carry c is a tuple of length 2")):
lax.scan(lambda c, x: ((0, 0, 0), x), (1, (2, 3)), jnp.arange(5.))
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have the "
"same pytree structure, but they differ:\n\n"
"The input carry x is a tuple of length 2")):
lax.scan(lambda x, _: ((x[0].astype('float32'),), None),
(jnp.array(0, 'int32'),) * 2, None, length=1)
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have the "
"same pytree structure, but they differ:\n\n"
"The input carry x is a <class 'tuple'> but the corres")):
jax.lax.scan(lambda x, _: ([x[0].astype('float32'),] * 2, None),
(jnp.array(0, 'int32'),) * 2, None, length=1)
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have the "
"same pytree structure, but they differ:\n\n"
"The input carry x is a <class 'dict'> with 1 child but")):
jax.lax.scan(lambda x, _: ({'a': x['a'], 'b': x['a']}, None),
{'a': jnp.array(0, 'int32')}, None, length=1)
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have the "
"same pytree structure, but they differ:\n\n"
" * the input carry component x[0] is a <class 'dict'> with "
"1 child but the corresponding component of the carry "
"output is a <class 'dict'> with 2 children")):
jax.lax.scan(lambda x, _: (({'a': x[0]['a'], 'b': x[0]['a']},) * 2, None),
({'a': jnp.array(0, 'int32')},) * 2, None, length=1)
def testScanBodyCarryTypeMismatchErrors(self):
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have equal "
"types, but they differ:\n\n"
"The input carry x has type int32[] but the corresponding "
"output carry component has type float32[], so the dtypes do "
"not match"
)):
jax.lax.scan(lambda x, _: (x.astype('float32'), None),
jnp.array(0, 'int32'), None, length=1)
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have equal "
"types, but they differ:\n\n"
"The input carry component x[1] has type int32[] but the "
"corresponding output carry component has type float32[], "
"so the dtypes do not match"
)):
jax.lax.scan(lambda x, _: ((x[0], x[1].astype('float32')), None),
(jnp.array(0, 'int32'),) * 2, None, length=1)
with self.assertRaisesRegex(
TypeError,
re.escape("function carry input and carry output must have equal "
"types, but they differ:\n\n"
" * the input carry component x[0] has type int32[] but the "
"corresponding output carry component has type float32[], "
"so the dtypes do not match;\n"
" * the input carry component x[1] has type int32[] but the "
"corresponding output carry component has type float32[1,1], "
"so the dtypes do not match, and the shapes do not match."
)):
jax.lax.scan(lambda x, _: ((x[0].astype('float32'),
x[1].astype('float32').reshape(1, 1),
x[2]), None),
(jnp.array(0, 'int32'),) * 3, None, length=1)
@jax.enable_checks(False)
def testScanInvalidUnrollRaises(self):
with self.assertRaisesRegex(ValueError, "`unroll` must be"):
jax.lax.scan(lambda x, _: (x, x), 0, jnp.arange(5), unroll=-1)
@parameterized.named_parameters(
{"testcase_name": f"_{scan_name}",
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def testScanHigherOrderDifferentiation(self, scan):
d = 0.75
def f(c, a):
b = jnp.sin(c * jnp.sum(jnp.cos(d * a)))
c = 0.9 * jnp.cos(d * jnp.sum(jnp.sin(c * a)))
return c, b
as_ = jnp.arange(6.).reshape((3, 2))
c = jnp.array(1, dtype=as_.dtype)
jtu.check_grads(lambda c, as_: scan(f, c, as_), (c, as_),
modes=["rev"], order=2, rtol={np.float32: 6e-3})
@parameterized.named_parameters(
{"testcase_name": f"_{jit_scan=}_{jit_f=}_{in_axes=}_impl={scan_name}",
"jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes,
"scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR
for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2])
if in_axes != (None, None))
def testScanVmap(self, jit_scan, jit_f, in_axes, scan):
rng = self.rng()
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = jax.jit(f)
if jit_scan:
scan = jax.jit(scan, static_argnums=(0,))
as_shape = [5, 3]
c_shape = [4]
c_bdim, as_bdim = in_axes
if c_bdim is not None:
c_shape.insert(c_bdim, 7)
if as_bdim is not None:
as_shape.insert(as_bdim, 7)
as_ = rng.randn(*as_shape)
c = rng.randn(*c_shape)
ans = jax.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_)
expected = jax.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=1e-5, atol=1e-5)
def testScanVmapTuples(self):
def f(c, a):
a1, a2 = a
c1, c2 = c
b = jnp.sum(jnp.cos(a1)) * jnp.sum(c2 * a2)
c = c1 * jnp.sin(jnp.sum(a1 * a2)), c2 * jnp.cos(jnp.sum(a1))
return c, b
in_axes = (0, (1, 2))
r = self.rng()
as_ = (r.randn(3, 7), r.randn(3, 4, 7))
c = (r.randn(7, 2), r.randn(7))
expected_c_out, expected_bs = [], []
for i in range(7):
c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i]))
expected_c_out.append(c_out)
expected_bs.append(bs)
expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out)
expected_c_out = (jnp.stack(expected_c_out_0), jnp.stack(expected_c_out_1))
expected_bs = jnp.stack(expected_bs)
expected = expected_c_out, expected_bs
ans = jax.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_impl={scan_name}", "scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def testScanVmapFixpoint(self, scan):
def f(carry_init):
def scan_body(c, x):
# The carry is a 4-tuple, the last element starts batched,
# and the carry is shifted left at each iteration.
return ((c[1], c[2], c[3], 0.), None)
return scan(scan_body, (0., 1., 2., carry_init), jnp.zeros(2))
carry_init = jnp.array([3., 4., 5.])
carry_out, _ = jax.vmap(f)(carry_init)
self.assertAllClose(carry_out[3], jnp.array([0., 0., 0.]), check_dtypes=False)
self.assertAllClose(carry_out[2], jnp.array([0., 0., 0.]), check_dtypes = False)
# After two shifts, we get the carry_init
self.assertAllClose(carry_out[1], carry_init, check_dtypes=False)
self.assertAllClose(carry_out[0], jnp.array([2., 2., 2.]), check_dtypes = False)
def testIssue757(self):
# code from https://github.com/jax-ml/jax/issues/757
def fn(a):
return jnp.cos(a)
def loop(val):
iterations = 10
def apply_carry(x, i):
return jax.grad(fn, argnums=(0,))(x)[0], i
final_val, _ = lax.scan(apply_carry, val, jnp.arange(iterations))
return final_val
arg = 0.5
jax.jit(jax.jacfwd(loop, argnums=(0,)))(arg) # doesn't crash
def testIssue804(self):
# https://github.com/jax-ml/jax/issues/804
num_devices = jax.device_count()
f = partial(lax.scan, lambda c, x: (c + lax.psum(x, "i") , c), 0.)
jax.pmap(f, axis_name="i")(jnp.ones((num_devices, 4))) # doesn't crash
def testMap(self):
f = lambda x: x ** 2
xs = jnp.arange(10)
expected = xs ** 2
actual = lax.map(f, xs)
self.assertAllClose(actual, expected)
def testMapEmpty(self):
# https://github.com/jax-ml/jax/issues/2412
ans = lax.map(lambda x: x * x, jnp.array([]))
expected = jnp.array([])
self.assertAllClose(ans, expected)
@jtu.thread_unsafe_test() # Cache eviction means we might retrace
def testCaching(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
python_should_be_executing = False
lax.while_loop(cond, body, 0)
# This second caching test shows a different kind of caching that we haven't
# implemented (but could!), namely that Python functions that are distinct
# objects but are equivalent functions trigger cache hits. This kind of
# caching could be salient when using lambda functions with control flow:
#
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
#
# To get a cache hit on the second line we'd need to form a jaxpr and
# compare them for equality (including the literals on identity). We could
# implement that by adding a __hash__/__eq__ to core.Jaxpr and
# core.ClosedJaxpr (see #1221).
@unittest.skip("not implemented")
def testCaching2(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def test_caches_depend_on_axis_env(self):
# https://github.com/jax-ml/jax/issues/9187
scanned_f = lambda _, __: (lax.axis_size('i'), None)
f = lambda: lax.scan(scanned_f, 0, None, length=1)[0]
ans = jax.vmap(f, axis_name='i', axis_size=2, out_axes=None)()
self.assertEqual(ans, 2)
ans = jax.vmap(f, axis_name='i', axis_size=3, out_axes=None)()
self.assertEqual(ans, 3)
def testWhileCondConstant(self):
out = lax.while_loop(lambda _: False, lambda _: (), ()) # doesn't crash
self.assertEqual(out, ())
@parameterized.named_parameters(
{"testcase_name": f"_{jit_loop=}_{jit_body=}_{jit_cond=}",
"jit_loop": jit_loop, "jit_body": jit_body, "jit_cond": jit_cond}
for jit_loop in [False, True]
for jit_body in [False, True]
for jit_cond in [False, True])
def testWhileJVP(self, jit_loop=True, jit_body=False, jit_cond=True):
cond = lambda x: x[0, 2] <= 8
body = lambda x: x * x
if jit_cond:
cond = jax.jit(cond)
if jit_body:
body = jax.jit(body)
loop = partial(lax.while_loop, cond, body)
if jit_loop:
loop = jax.jit(loop)
loop_ref = partial(while_loop_reference, cond, body)
x = jnp.arange(9.).reshape((3, 3))
ans = jax.jvp(loop, (x,), (x,))
expected = jax.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": f"_{jit_loop=}_{jit_body=}_{jit_cond=}_impl={while_name}",
"jit_loop": jit_loop, "jit_body": jit_body, "jit_cond": jit_cond,
"while_loop": while_impl}
for jit_loop in [False, True]
for jit_body in [False, True]
for jit_cond in [False, True]
for while_impl, while_name in WHILE_LOOP_IMPLS)
def testWhileLinearize(self, while_loop, jit_loop=True, jit_body=False,
jit_cond=True):
cond = lambda x: x[0, 2] <= 8
body = lambda x: x * x
if jit_cond:
cond = jax.jit(cond)
if jit_body:
body = jax.jit(body)
loop = partial(while_loop, cond, body)
if jit_loop:
loop = jax.jit(loop)
loop_ref = partial(while_loop_reference, cond, body)
x = jnp.arange(9.).reshape((3, 3))
y, f_lin = jax.linearize(loop, x)
ydot = f_lin(x)
y_expected, ydot_expected = jax.jvp(loop_ref, (x,), (x,))
self.assertAllClose(y, y_expected, check_dtypes=False)
self.assertAllClose(ydot, ydot_expected, check_dtypes=False)
def testWhileJVPViaForiLoop(self):
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * 2, x)
self.assertAllClose(f(2.), 16., check_dtypes=False)
self.assertAllClose(jax.jvp(f, (2.,), (1.,)), (16., 8.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * (i + 1), x)
self.assertAllClose(f(2.), 12., check_dtypes=False)
self.assertAllClose(jax.jvp(f, (2.,), (1.,)), (12., 6.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
def testWhileJVPWithGrowingNonzeroTangents(self):
rng = self.rng()
def cond(state):
i, x, y, z = state
return i < 2
def body(state):
i, x, y, z = state
y = x * x
z = y * y
return i + 1, x, y, z
y, z = rng.randn(2), rng.randn(2)
def loop(loop_impl, x):
return loop_impl(cond, body, (0, x, y, z))[1]
loop_lax = partial(loop, lax.while_loop)
loop_ref = partial(loop, while_loop_reference)
x = rng.randn(2)
ans = jax.jvp(loop_lax, (x,), (x,))
expected = jax.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop_lax, (x,), order=2, modes=["fwd"])
def testStaticForiGrad(self):
func = lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x)
jax.grad(func)(1.) # doesn't crash
jax.linearize(func, 1.) # doesn't crash
@parameterized.named_parameters(
dict(testcase_name=f"_{loop=}", loop=loop)
for loop in ["while", "fori_inside_cond", "fori_inside_scan"])
def testWhileGradError(self, loop: str = "fori_inside_scan"):
# Raise error for vjp for loops
if loop == "while":
func = lambda x: lax.while_loop(lambda i: i < 5., lambda i: i + 1., x)
elif loop == "fori_inside_jit":
func = jax.jit(lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x))
elif loop == "fori_inside_cond":
func = lambda x: lax.cond(
True,
x, lambda x: lax.fori_loop(x, x + 2., lambda i, c: c * 2., x),
1., lambda x: x)
elif loop == "fori_inside_scan":
func = lambda x: lax.scan(
lambda c, x: (lax.fori_loop(x, x + 2., lambda i, c1: c1 * c, x), None),
x, np.ones(2))[0]
else:
assert False
with self.assertRaisesRegex(ValueError, "Reverse-mode differentiation does not work for lax.while_loop"):
jax.grad(func)(1.)
jax.linearize(func, 1.) # Linearization works
@jax.legacy_prng_key('allow')
def testIssue1316(self):
def f(carry, _):
c, key = carry
key, _ = random.split(key)
return (c, key), ()
key = random.PRNGKey(0)
jax.grad(lambda c: lax.scan(f, (c, key), np.ones(3))[0][0])(0.) # doesn't crash
def testIssue1361(self):
@jax.jit
def jit_run_scan(x):
def fun(carry, _):
x, _ = carry
return (2 * x, 0.), None
(x, _), _ = lax.scan(fun, (x, 0.), jnp.arange(3))
return x
jax.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash
def testIssue810(self):
def loss(A):
def step(x, i):
return jnp.matmul(A, x), None
init_x = jnp.zeros(A.shape[-1:])
last_x, _ = lax.scan(step, init_x, jnp.arange(10))
return jnp.sum(last_x)
A = jnp.zeros((3, 3))
# The second DUS was unnecessarily replicating A across time.
# We check XLA because _scan_impl is "underneath" the jaxpr language.
s = jax.jit(jax.grad(loss)).lower(A).as_text('hlo')
assert s.count("dynamic-update-slice(") < 2
def testScanLengthArg(self):
def arange(n):
return lax.scan(lambda c, _: (c + 1, c), 0, None, length=n)[1]
ans = arange(10)
expected = np.arange(10)
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_jit_of_pmap_warning()
def test_while_loop_of_pmap(self):
# Avoid accuracy issue caused by too many devices.
DEVICE_LIMITATION = 4
devices = jax.devices()
count = jax.device_count()
if jax.device_count() >= DEVICE_LIMITATION:
devices = devices[:DEVICE_LIMITATION]
count = DEVICE_LIMITATION
# code from jsnoek@
def body(i, x):
result = jax.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), devices=devices, axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x) # noqa: F821
ans = f_loop(jnp.ones(count))
del body, f_loop
def body2(i, x):
result = jnp.broadcast_to(jnp.sin(x).sum(), x.shape)
return result + x
g_loop = lambda x: lax.fori_loop(0, 3, body2, x)
expected = g_loop(jnp.ones(count))
self.assertAllClose(ans, expected, check_dtypes=False)
@ignore_jit_of_pmap_warning()
def test_while_loop_of_pmap_error_message(self):
def body(i, x):
result = jax.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x)
too_big = 2 * jax.device_count()
if config.pmap_shmap_merge.value:
expected_regex = re.compile(
"cannot select an axis to squeeze out which has size not equal to "
r"one, got shape=\(\d,\) and dimensions=\(\d,\)"
)
else:
expected_regex = re.escape(
"compiling computation `jit(scan)` that requires {} "
"replicas, but only {} XLA devices are available."
.format(too_big, jax.device_count()))
self.assertRaisesRegex(
ValueError, expected_regex,
lambda: f_loop(jnp.ones(too_big)))
@parameterized.named_parameters(
{"testcase_name": f"_{scan_name}",
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def test_scan_reverse(self, scan):
def cumsum(x, reverse):
return scan(lambda c, x: (c + x, c + x), 0, x, reverse=reverse)[1]
x = np.array([3, 1, 4, 1, 5, 9])
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
with jax.disable_jit():
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
with jax.disable_jit():
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
def test_scan_unroll(self):
d = jnp.ones(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
xs = jnp.ones((20, 3))
c = jnp.ones(4)
scan = lambda c, xs: lax.scan(f, c, xs)
scan_unrolled = lambda c, xs: lax.scan(f, c, xs, unroll=2)
scan_fully_unrolled = lambda c, xs: lax.scan(f, c, xs, unroll=True)
# jaxprs should be the same size
self.assertEqual(
len(str(jax.make_jaxpr(scan)(c, xs))),
len(str(jax.make_jaxpr(scan_unrolled)(c, xs))))
# but HLO should grow due to unrolling
scan_hlo = str(jax.jit(scan).lower(c, xs).as_text("hlo"))
scan_unrolled_hlo = str(jax.jit(scan_unrolled).lower(c, xs).as_text("hlo"))
scan_fully_unrolled_hlo = str(
jax.jit(scan_fully_unrolled).lower(c, xs).as_text("hlo"))
self.assertLess(len(scan_hlo), len(scan_unrolled_hlo))
self.assertLess(len(scan_unrolled_hlo), len(scan_fully_unrolled_hlo))
# and the lowering should contain a while loop, unless the scan is fully
# unrolled
self.assertIn("while(", scan_hlo)
self.assertIn("while(", scan_unrolled_hlo)
self.assertNotIn("while(", scan_fully_unrolled_hlo)
def test_scan_xs_none(self):
def f(h, _):
return h + 1, None
length = 20
h, _ = lax.scan(f, 0, length=length)
self.assertEqual(h, length)
def test_disable_jit_cond_with_vmap(self):
# https://github.com/jax-ml/jax/issues/3093
def fn(t):
return lax.cond(t > 0, 0, lambda x: 0, 0, lambda x: 1)
fn = jax.vmap(fn)
with jax.disable_jit():
_ = fn(jnp.array([1])) # doesn't crash
def test_disable_jit_while_loop_with_vmap(self):
# https://github.com/jax-ml/jax/issues/2823
def trivial_while(y):
return lax.while_loop(lambda x: x < 10.0, lambda x: x + 1.0, y)
with jax.disable_jit():
jax.vmap(trivial_while)(jnp.array([3.0,4.0])) # doesn't crash
def test_vmaps_of_while_loop(self):
# https://github.com/jax-ml/jax/issues/3164
def f(x, n): return lax.fori_loop(0, n, lambda _, x: x + 1, x)
x, n = jnp.arange(3), jnp.arange(4)
jax.vmap(jax.vmap(f, (None, 0)), (0, None))(x, n) # doesn't crash
def test_disable_jit_while_loop_with_mutation(self):
# https://github.com/jax-ml/jax/issues/27019
def body_fun(carry):
x, y = carry
x += 1 # in-place if x is mutable
return x, y + x
def cond_fun(carry):
x, _ = carry
return x < 10
def f():
val = np.array(1.0) # mutable value
return jax.lax.while_loop(cond_fun, body_fun, (val, val))[1]
with jax.disable_jit(False):
result_jit = f()
with jax.disable_jit(True):
result_nojit = f()
self.assertEqual(result_jit, result_nojit)
@parameterized.named_parameters(
{"testcase_name": f"_{shape}_{axis=}",
"shape": shape, "axis": axis}
for shape in [
[0], [1], [2], [3], [5], [10], [1000],
[2, 3], [7, 5], [5, 6, 7]
]
for axis in range(-len(shape), len(shape) - 1))
def testAssociativeScanUnstructured(self, shape, axis):
data = np.arange(np.prod(shape)).reshape(shape) + 7
expected = np.cumsum(data, axis=axis)
result = lax.associative_scan(operator.add, data, axis=axis)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanUnstructured1000Reverse(self):
data = np.arange(1000) + 32
expected = np.cumsum(data[::-1])[::-1]
result = lax.associative_scan(operator.add, data, reverse=True)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanStructured3(self):
pair = collections.namedtuple('pair', ('first', 'second'))
data = pair(first=np.array([0., 1., 2.]),
second=np.array([0., 10., 20.]))
def fn(a, b):
return pair(first=a.first + b.first,
second=a.second + b.second)
result = lax.associative_scan(fn, elems=data)
self.assertAllClose(result.first, np.array([0., 1., 3.]),
check_dtypes=False)
self.assertAllClose(result.second, np.array([0., 10., 30.]),
check_dtypes=False)
def testAssociativeScanOfBools(self):
x = jnp.array([False, True, True, True, False, True])
y = lax.associative_scan(lax.bitwise_xor, x)
self.assertArraysEqual(np.array([False, True, False, True, True, False]), y)
@parameterized.named_parameters({"testcase_name": f"_{shape}", "shape": shape}
for shape in [2, 43, 100])
def testAssociativeScanSolvingRegressionTest(self, shape):
# This test checks that the batching rule doesn't raise for a batch
# sensitive function (solve).
ms = np.repeat(np.eye(2).reshape(1, 2, 2), shape, axis=0)
vs = np.ones((shape, 2))
@jax.vmap
def fn(a, b):
m1, v1 = a
m2, v2 = b
return m1 + m2, jsp.linalg.solve(m1, v2) + jsp.linalg.solve(m2, v1)
_ = lax.associative_scan(fn, elems=(ms, vs))
def test_scan_typecheck_param(self):
d = jnp.ones(2)
def f(c, a):
b = jnp.cos(jnp.sum(a) + jnp.sum(c) + jnp.sum(d))
c = jnp.sin(c * b)
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan_fun = lambda c, xs: lax.scan(f, c, xs)
def new_jaxpr():
jaxpr = jax.make_jaxpr(partial(scan_fun))(c, xs).jaxpr
scan = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'scan')
return jaxpr, scan
jaxpr, eqn = new_jaxpr()
eqn.params['reverse'] = 4
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param reverse of type int, bool required: 4'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['num_consts'] = -3
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param num_consts of type int, '
'non-negative int required: -3'),
lambda: core.check_jaxpr(jaxpr))
def test_cond_typecheck_param(self):
def new_jaxpr():
jaxpr = jax.make_jaxpr(
lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
return jaxpr, cond
jaxpr, eqn = new_jaxpr()
eqn.params['branches'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param branches of type tuple, '
'tuple of ClosedJaxpr required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
def test_cond_transformation_rule_with_consts(self):
# https://github.com/jax-ml/jax/pull/9731
@jax.custom_jvp
def f(x):
return x
@f.defjvp
def f_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
const = np.arange(3, dtype=x.dtype)
return x * const, xdot * const
g = lambda x: jax.lax.cond(True, f, lambda x: x, x)
x = np.arange(3, dtype='float32')
jax.jvp(g, (x,), (x,)) # doesn't crash
@jtu.thread_unsafe_test()
def test_cond_excessive_compilation(self):
# Regression test for https://github.com/jax-ml/jax/issues/14058
def f(x):
return x + 1
def g(x):
return x + 2
with jtu.count_jit_and_pmap_lowerings() as count:
for x in range(10):
lax.cond(x, f, g, x)
# Should observe a maximum of 4 compiles: convert_element_type, f, g, cond
# In #14058, this was observed to be 31 compiles.
self.assertLess(count(), 5)
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_scan_init_weak_type(self, dtype):
def func(carry, x):
return carry + x, x
init_weak = 0 # Python scalars are weakly-typed.
x = jnp.ones(5, dtype=dtype)
carry, result = lax.scan(func, init_weak, x)
self.assertEqual(carry, x.sum(dtype=carry.dtype))
self.assertArraysEqual(result, x)
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_while_loop_init_weak_type(self, dtype):
# This tests whether lax.while_loop can properly handle weakly-typed
# initial values.
def cond_fun(val):
return val < 2
def body_fun(val):
return val + increment
increment = jnp.array(1, dtype=dtype)
init_weak = 0 # Python scalars are weakly-typed.
result = lax.while_loop(cond_fun, body_fun, init_weak)
self.assertArraysEqual(result, jnp.full_like(increment, 2))
@parameterized.named_parameters(
{"testcase_name": f"{suffix}", "remat": remat}
for suffix, remat in [
('', None),
('new_remat', new_checkpoint),
])
def test_scan_vjp_forwards_extensive_residuals(self, remat):
# https://github.com/jax-ml/jax/issues/4510
def cumprod(x):
s = jnp.ones((2, 32), jnp.float32)
return lax.scan(lambda s, x: (x*s, s), s, x)
if remat is not None:
cumprod = remat(cumprod)
rng = self.rng()
x = jnp.asarray(rng.randn(32, 2, 32).astype('float32'))
_, vjp_fun = jax.vjp(cumprod, x)
# TODO(mattjj): should we re-enable this check? The constants are now
# inlined in the Jaxprs, not easy to find them.
# ==> Yes, we don't want to change autodiff const behavior. We must make
# these tessts pass under use_simplified_jaxpr_constants.
if not config.use_simplified_jaxpr_constants.value:
if config.vjp3.value:
ext_res, = vjp_fun.args_res
else:
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIs(ext_res, x)
if remat is not None:
# TODO(mattjj): make the numpy.ndarray test pass w/ remat
raise unittest.SkipTest("new-remat-of-scan doesn't convert numpy.ndarray")
x = rng.randn(32, 2, 32).astype('float32') # numpy.ndarray, not Array
_, vjp_fun = jax.vjp(cumprod, x)
if not config.use_simplified_jaxpr_constants.value:
if config.vjp3.value:
ext_res, *_ = vjp_fun.opaque_residuals
else:
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIsInstance(ext_res, jax.Array)
def test_scan_vmap_collectives(self):
def scan_f(state, x):
s = lax.psum(state, 'i') * x
return state, s
def scan(state, xs):
return lax.scan(scan_f, state, xs)
scan_v = jax.vmap(scan, in_axes=0, out_axes=0, axis_name='i')
self.assertAllClose(
scan_v(jnp.ones([1]), jnp.arange(5.).reshape((1, 5))),
(jnp.array([1.]), jnp.array([[0., 1., 2., 3., 4.]])), check_dtypes=False)
def test_xla_cpu_gpu_loop_cond_bug(self):
# https://github.com/jax-ml/jax/issues/5900
def deriv(f):
return lambda x, *args: jax.linearize(lambda x: f(x, *args), x)[1](1.0)
def _while_loop(cond_fun, body_fun, init_val, max_iter):
def _iter(val):
next_val = body_fun(val)
next_cond = True
return next_val, next_cond
def _fun(tup, _):
val, cond = tup
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), _
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def my_pow(x, y):
def body_fun(val):
return val * x
def cond_fun(val):
return True
return _while_loop(cond_fun, body_fun, 1.0, y)
self.assertAllClose(deriv(my_pow)(3.0, 1), 1.0, check_dtypes=False)
def test_while_loop_fixed_point_with_batched_pred_and_consts(self):
def f(i, x):
def cond(carry):
i, x = carry
return i < 5
def body(carry):
i, z = carry
# Close over const with batch dim = 1
return i + 1, z + x
return lax.while_loop(cond, body, (i, jnp.ones(3)))[1]
jax.vmap(f, in_axes=(0, 1))(jnp.arange(4), jnp.ones((3, 4)))
def test_cond_ad_batched_unit(self):
# see issue #9985
def cond_id(x):
return lax.cond(x < 0., lambda x: x, lambda x: x, x)
jax.vmap(jax.jacrev(lambda x: cond_id(cond_id(x))))(jnp.ones(1))
@parameterized.named_parameters(
{"testcase_name": f"impl={scan_name}", "scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS_WITH_FOR)
def test_scan_hoisting_consts(self, scan):
A = jnp.arange(4.).reshape(2, 2)
B = jnp.arange(4.).reshape(2, 2) + 1.
def f(x):
def body(c, _):
c1, c2, c3 = c
return (jnp.dot(A, c1), jnp.dot(B, c2), jnp.dot(jnp.sin(B), c3)), None
init_carry = (x * jnp.ones(2), x * jnp.ones(2), x * jnp.ones(2))
(c1, c2, c3), _ = scan(body, init_carry, None, length=3)
return jnp.sum(c1) + jnp.sum(c2) + jnp.sum(c3)
jax.grad(f)(1.) # doesn't crash
def test_custom_jvp_tangent_cond_transpose(self):
# https://github.com/jax-ml/jax/issues/14026
def mask_fun(arr, choice):
out = (1 - choice) * arr.sum() + choice * (1 - arr.sum())
return out
def switch_fun(arr, choice):
choice = jnp.floor(choice).astype(jnp.int32)
out = jax.lax.switch(choice, [lambda x: x.sum(), lambda x: 1 - x.sum()], arr)
return out
test_arr = jnp.arange(3.)
test_val = 0.
expected1 = jax.grad(mask_fun)(test_arr, test_val)
expected2 = jax.grad(switch_fun)(test_arr, test_val)
def good_switchfun_jvp(primals, tangents):
arr, choice = primals
arr_dot, choice_dot = tangents
return switch_fun(arr, choice), mask_fun(arr_dot, choice)
def bad_switchfun_jvp(primals, tangents):
arr, choice = primals
arr_dot, choice_dot = tangents
return switch_fun(arr, choice), switch_fun(arr_dot, choice)
good_custom_switchfun = jax.custom_jvp(switch_fun)
good_custom_switchfun.defjvp(good_switchfun_jvp)
expected3 = jax.grad(good_custom_switchfun)(test_arr, test_val)
bad_custom_switchfun = jax.custom_jvp(switch_fun)
bad_custom_switchfun.defjvp(bad_switchfun_jvp)
actual = jax.grad(bad_custom_switchfun)(test_arr, test_val)
self.assertAllClose(expected1, expected2)
self.assertAllClose(expected2, expected3)
self.assertAllClose(expected3, actual)
def test_platform_dependent(self):
def f(x):
return lax.platform_dependent(x, cpu=jnp.sin, default=jnp.cos)
x = np.arange(3, dtype=np.float32)
res = f(x)
self.assertAllClose(
res,
np.sin(x) if jtu.device_under_test() == "cpu" else np.cos(x))
def test_platform_dependent_no_args(self):
def f(x):
return lax.platform_dependent(cpu=lambda: jnp.sin(x),
default=lambda: jnp.cos(x))
x = np.arange(3, dtype=np.float32)
res = f(x)
self.assertAllClose(
res,
np.sin(x) if jtu.device_under_test() == "cpu" else np.cos(x))
def test_platform_dependent_lowering(self):
def f(x):
return lax.platform_dependent(x, cpu=jnp.sin, default=jnp.cos)
x = np.arange(3, dtype=np.float32)
lowered = jax.jit(f).lower(x)
stablehlo = lowered.as_text()
# The StableHLO contains only the branch we need
if jtu.device_under_test() == "cpu":
self.assertIn("stablehlo.sine", stablehlo)
self.assertNotIn("stablehlo.cosine", stablehlo)
else:
self.assertNotIn("stablehlo.sine", stablehlo)
self.assertIn("stablehlo.cosine", stablehlo)
def test_platform_dependent_with_non_existent_custom_call(self):
if not jtu.test_device_matches(["cpu"]):
self.skipTest("Only for CPU")
def f(x):
# One use with the bad custom call on a different platform branch
x1 = lax.platform_dependent(x,
cpu=jnp.sin,
other=prim_non_existent_custom_call.bind)
# and with the bad custom call in the default branch
x2 = lax.platform_dependent(x,
cpu=jnp.sin,
default=prim_non_existent_custom_call.bind)
# and one use where the current platform is the default
x3 = lax.platform_dependent(x,
other=prim_non_existent_custom_call.bind,
default=jnp.sin)
return x1 + x2 + x3
x = np.arange(3, dtype=np.float32)
hlo = str(jax.jit(f).lower(x).compiler_ir())
self.assertNotIn(prim_non_existent_custom_call.name, hlo)
res_eager = f(x)
self.assertAllClose(res_eager, 3. * np.sin(x))
res_jit = jax.jit(f)(x)
self.assertAllClose(res_jit, 3 * np.sin(x))
res_vmap = jax.vmap(f)(x)
self.assertAllClose(res_vmap, 3. * np.sin(x))
_, res_jvp = jax.jvp(f, (x,), (np.full(x.shape, .1, dtype=x.dtype),))
self.assertAllClose(res_jvp, .3 * np.cos(x))
res_grad = jax.grad(f)(1.)
self.assertAllClose(res_grad, 3. * np.cos(1.))
def test_platform_dependent_with_primitive_with_lowering_error(self):
if not jtu.test_device_matches(["cpu", "tpu"]):
self.skipTest("Only for CPU and TPU")
def f(x):
return lax.platform_dependent(
x,
# Check that we only lower on the intended platform
cpu=lambda x: prim_with_lowering_error.bind(x, only_on="cpu"),
tpu=lambda x: prim_with_lowering_error.bind(x, only_on="tpu"))
self.assertAllClose(np.sin(1.), f(1.)) # Eager
self.assertAllClose(np.sin(1.), jax.jit(f)(1.))
self.assertAllClose(np.sin(1.), lax.cond(True, f, lambda x: x, 1.))
self.assertAllClose(1., lax.cond(False, f, lambda x: x, 1.))
self.assertAllClose((0., np.sin(np.arange(8.))),
lax.scan(lambda carry, x: (carry, f(x)),
0., np.arange(8.)))
self.assertAllClose(np.sin(np.arange(8.)), jax.vmap(f)(np.arange(8.)))
def test_platform_dependent_multiple_identical_branches(self):
x = np.arange(3, dtype=np.float32)
def f(x):
return lax.platform_dependent(
x,
cpu=jnp.sin,
tpu=jnp.sin,
default=lambda x: x)
res = f(x)
on_cpu_tpu = jtu.device_under_test() in ["cpu", "tpu"]
self.assertAllClose(
res,
np.sin(x) if on_cpu_tpu else x)
stablehlo = jax.jit(f).lower(x).as_text()
sines = re.findall(r"stablehlo.sine", stablehlo)
self.assertEqual(1 if on_cpu_tpu else 0, len(sines))
def test_platform_dependent_no_default(self):
ctx = contextlib.ExitStack()
if jtu.device_under_test() != "tpu":
ctx.enter_context(
self.assertRaisesRegex(NotImplementedError,
"translation rule .* not found for platform"))
with ctx:
lax.platform_dependent(
3.,
tpu=lambda x: x + 2.)
def test_platform_dependent_batched(self):
def f(x):
return lax.platform_dependent(x, cpu=jnp.sin, default=jnp.cos)
xs = np.arange(3, dtype=np.float32)
self.assertAllClose(
jax.vmap(f)(xs),
np.sin(xs) if jtu.device_under_test() == "cpu" else np.cos(xs))
# We can still fold the un-needed branch
hlo = jax.jit(jax.vmap(f)).lower(xs).as_text('hlo')
expect_a_sine = (jtu.device_under_test() == "cpu")
self.assertEqual(expect_a_sine, " sine(" in hlo)
self.assertEqual(not expect_a_sine, " cosine(" in hlo)
def test_platform_dependent_grad(self):
# For a function "lax.dot(x, x)", we choose two branches with very different
# implementations (a dot and a scan), and therefore different residuals,
# so that we can verify whether the residuals are as we expect (we don't
# get residuals from a different platform.
x = np.arange(8, dtype=np.float32)
def f_impl_dot(x): # x: f32[8]
return jnp.dot(x, x)
def f_impl_scan(x):
def scan_body(carry, x_i):
return (carry + x_i * x_i, None)
return lax.scan(scan_body, np.float32(0.), x)[0]
def f(x):
return jnp.sin(lax.platform_dependent(x,
cpu=f_impl_dot,
default=f_impl_scan))
self.assertAllClose(
jax.grad(f)(x),
jax.grad(lambda x: jnp.sin(f_impl_dot(x)))(x))
# Check that we do not have contamination of computations across platforms
hlo = jax.jit(jax.grad(f)).lower(x).as_text('hlo')
expect_a_dot = (jtu.device_under_test() == "cpu")
self.assertEqual(expect_a_dot, " dot(" in hlo)
self.assertEqual(not expect_a_dot, " while(" in hlo)
def test_issue_29329(self):
def outer_fn(x):
def inner_fn(x):
return jax.jit(
lambda x: lax.platform_dependent(x,
default=jnp.sin,
other=jnp.cos))(x)
_, lin_fn = jax.linearize(inner_fn, x)
def with_transpose(x):
grad = jax.linear_transpose(lin_fn, x)(x)
del grad
return x
return jax.lax.cond(x[0][0] > 0., with_transpose, lambda x: x, x)
jax.vmap(outer_fn)(jnp.ones((5, 10, 10)))
def test_scan_lowering_doesnt_introduce_singleton(self):
b = 4
i = 2
def scan(y):
def body(carry, x):
return carry, jnp.dot(x, x)
return jax.lax.scan(body, 1.0, y, unroll=False)
fn = jax.jit(scan)
init = np.array(np.arange(b * i * i), dtype=np.float32).reshape((b, i, i))
hlo_text = fn.lower(init).as_text('hlo')
self.assertNotIn('4,1,2,2', hlo_text)
def test_scan_length_concrete_error(self):
f = jax.jit(lambda n, x: jax.lax.scan(lambda c, z: (c, z), x, (), n))
with self.assertRaisesRegex(
core.ConcretizationTypeError,
"The `length` argument to `scan` expects a concrete `int` value.*"):
f(3, 1.)
def test_scan_unroll_concrete_error(self):
f = jax.jit(lambda n, x: jax.lax.scan(
lambda c, z: (c, z), x, (), 10, unroll=n))
msg = ("The `unroll` argument to `scan` expects a concrete `int` or "
"`bool` value.*")
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(3, 1.)
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f(True, 1.)
def test_cond_vmap_forwarding_doesnt_promote(self):
def f(x, y):
x, y = jax.lax.cond(
x < 3,
lambda x, y: (x * 2, y),
lambda x, y: (x * 3, y),
x, y
)
return x, y
x = jnp.arange(3)
y = jnp.array(3.)
x2, y2 = jax.vmap(f, in_axes=(0, None), out_axes=(0, None))(x, y) # don't crash
assert x is not x2
assert y is y2
def test_cond_casting(self):
x = 1.0
identity = lambda x: x
y = lax.cond(True, identity, identity, x)
self.assertEqual(y, x)
self.assertIsInstance(y, jax.Array)
@jtu.thread_unsafe_test() # live_arrays count isn't thread-safe
def test_cond_memory_leak(self):
# https://github.com/jax-ml/jax/issues/12719
def leak():
data = jax.device_put(np.zeros((1024), dtype=np.float32) + 1)
def g():
return jax.lax.cond(
True,
jax.jit(lambda: data[0]), # noqa: F821
lambda: data[1], # noqa: F821
)
# _ = g() # TODO(necula): enable this, requires fixing leaks in the
# caching of dispatch.xla_primitive_callable.
jg = jax.jit(g)
_ = jg().block_until_ready()
jg.clear_cache()
del g, jg, data, _
gc.collect()
nbufs = lambda: len(jax.live_arrays())
gc.collect()
base = nbufs()
leak()
# You would hope for exact equality here, but you cannot entirely trust
# gc.collect() to collect everything immediately under a free threaded
# build.
self.assertGreaterEqual(base, nbufs())
leak()
self.assertGreaterEqual(base, nbufs())
leak()
self.assertGreaterEqual(base, nbufs())
def test_grad_remat_while_fixpoint(self):
@jax.remat
def f(x, y):
def cond(_):
return False
def body(c):
x, y = c
return (y, x)
x, y = jax.lax.while_loop(cond, body, (x, y))
return x + y
jax.linearize(f, 1., 2.) # don't crash
def test_while_readonly_carry_optimization(self):
# https://github.com/google/flax/issues/4700
def foo(w, x, c_max):
def while_cond(val):
c, x, w = val
return c < c_max
def while_body(val):
c, x, w = val
return c + 1, x @ w, w
_, x, w = jax.lax.while_loop(while_cond, while_body, (0, x, w))
return w, x
w = jnp.ones((2, 2))
xs = jnp.ones((4, 2))
c_maxs = jnp.arange(4)
w_, _ = jax.vmap(foo, in_axes=(None, 0, 0), out_axes=(None, 0)
)(w, xs, c_maxs) # doesn't crash
self.assertAllClose(w, w_, check_dtypes=False)
@parameterized.parameters(itertools.product(range(3), repeat=5))
@jtu.run_on_devices("cpu")
def test_while_constification_correctness(
self,
seed,
num_body_consts,
num_inplace_fwds_cond_uses,
num_inplace_fwds_cond_doesnt_use,
num_noninplace_fwds):
num_fwds = (num_inplace_fwds_cond_uses + num_inplace_fwds_cond_doesnt_use +
num_noninplace_fwds)
num_carry = num_fwds + 4
rng = np.random.RandomState(seed)
perm = rng.permutation(num_carry)
iperm = np.argsort(perm)
body_consts = [rng.randn(3) for _ in range(num_body_consts)]
init_vals = list(rng.uniform(size=num_carry))
def cond_fun(c):
i, c = c
c = [c[i] for i in iperm]
c, _ = split_list(c, [num_inplace_fwds_cond_uses])
return (i < 2) + (0. * jnp.array(sum(c))).astype(bool)
def body_fun(c):
i, c = c
c = [c[i] for i in iperm]
inplace_fwds, noninplace_fwds, dont_fwd = split_list(
c, [num_inplace_fwds_cond_uses + num_inplace_fwds_cond_doesnt_use,
num_noninplace_fwds])
dont_fwd = [jnp.sin(x) * sum(jnp.sum(c) for c in body_consts)
for x in dont_fwd]
new_c_perm = [*inplace_fwds, *dont_fwd, *noninplace_fwds]
new_c = [new_c_perm[i] for i in perm]
return (i + 1, new_c)
i, outs = jax.lax.while_loop(cond_fun, body_fun, (0, init_vals))
self.assertEqual(i, 2)
_, outs_ref = body_fun(body_fun((0, init_vals)))
self.assertAllClose(outs, outs_ref, check_dtypes=False)
def test_while_constification_correctness_manually(self):
# regression test for a particular index-offset logic bug
def cond_fun(c):
# cond doesn't use first or third element of the carry
_, i, _ = c
return i == 0
def body_fun(c):
# two body consts
for _ in range(2): jnp.sin(np.zeros(3))
# first element of the carry is forwarded to third element of the carry
return 0., 1., c[0]
outs = jax.lax.while_loop(cond_fun, body_fun, (5., 0., 3.14))
self.assertAllClose(outs, (0., 1., 5.))
def test_scan_readonly_carry_optimization(self):
# https://github.com/google/flax/issues/4709
def f(x, y):
def g(_, y):
y, _ = jax.lax.scan(lambda y, _: (y, None), y, None, length=1)
return y
return jax.lax.cond(x < 0, g, g, x, y)
xs = jnp.arange(3.)
y = 3.
jax.vmap(f, (0, None), None)(xs, y) # don't crash
@parameterized.parameters(itertools.product(range(3), repeat=4))
@jtu.run_on_devices("cpu")
def test_scan_constification_correctness(
self,
seed,
num_body_consts,
num_inplace_fwds,
num_noninplace_fwds):
num_fwds = num_inplace_fwds + num_noninplace_fwds
num_carry = num_fwds + 4
num_xs = 2
num_ys = 3
rng = np.random.RandomState(seed)
perm = rng.permutation(num_carry)
iperm = np.argsort(perm)
body_consts = [rng.randn(3) for _ in range(num_body_consts)]
init_vals = list(rng.uniform(size=num_carry))
def body_fun(c, _):
c = [c[i] for i in iperm]
inplace_fwds, noninplace_fwds, dont_fwd = split_list(
c, [num_inplace_fwds, num_noninplace_fwds])
dont_fwd = [jnp.sin(x) * sum(jnp.sum(c) for c in body_consts)
for x in dont_fwd]
new_c_perm = [*inplace_fwds, *dont_fwd, *noninplace_fwds]
new_c = [new_c_perm[i] for i in perm]
return new_c, [0 for _ in range(num_ys)]
xs = [jnp.arange(2.) for _ in range(num_xs)]
outs = jax.lax.scan(body_fun, init_vals, xs)[0]
outs_ref = body_fun(body_fun(init_vals, [x[0] for x in xs])[0], [x[1] for x in xs])[0]
self.assertAllClose(outs, outs_ref, check_dtypes=False)
@parameterized.parameters(itertools.product(range(3), repeat=4))
@jtu.run_on_devices("cpu")
def test_scan_forwarding_correctness(
self,
seed,
num_body_consts,
num_const_fwds,
num_input_fwds):
num_carry = num_const_fwds + 4
num_xs = num_input_fwds + 2
num_ys = num_xs + 1
rng = np.random.RandomState(seed)
carry_perm = rng.permutation(num_carry)
carry_iperm = np.argsort(carry_perm)
xs_perm = rng.permutation(num_xs)
ys_perm = rng.permutation(num_ys)
f = np.arange(num_xs)
f = [f[i] if idx < num_input_fwds else None for idx, i in enumerate(xs_perm)]
f += [None]
in_fwd = [f[i] for i in ys_perm]
body_consts = [rng.randn(3) for _ in range(num_body_consts)]
init_vals = list(rng.uniform(size=num_carry))
def body_fun(c, x):
c = [c[i] for i in carry_iperm]
carry_fwds, carry_dont_fwd = split_list(c, [num_const_fwds])
carry_dont_fwd = [jnp.sin(x) * sum(jnp.sum(c) for c in body_consts)
for x in carry_dont_fwd]
new_c_perm = [*carry_fwds, *carry_dont_fwd]
new_c = [new_c_perm[i] for i in carry_perm]
x = [x[i] for i in xs_perm]
x_fwd, x_dont_fwd = split_list(x, [num_input_fwds])
x_dont_fwd = [jnp.cos(x) * sum(jnp.sum(c) for c in body_consts)
for x in x_dont_fwd]
y = [*x_fwd, *x_dont_fwd, 0]
y = [y[i] for i in ys_perm]
return new_c, y
xs = list(rng.uniform(size=(num_xs, 2)))
final, outs = jax.lax.scan(body_fun, init_vals, xs)
for f, y in zip(in_fwd, outs):
if f is not None:
self.assertAllClose(y, xs[f])
final_ref = body_fun(body_fun(init_vals, [x[0] for x in xs])[0], [x[1] for x in xs])[0]
self.assertAllClose(final, final_ref, check_dtypes=False)
def test_scan_diff_of_print(self):
# ref: https://github.com/jax-ml/jax/issues/28738
def f(c, _):
jax.debug.print("c = {c}", c=c, ordered=True)
return c + 1, None
def g(x):
return jax.lax.scan(f, x, length=2)[0]
jaxpr = jax.make_jaxpr(jax.value_and_grad(g))(1.0)
eqn_jaxpr = jaxpr.eqns[0].params["jaxpr"]
self.assertIn("debug_print", [e.primitive.name for e in eqn_jaxpr.eqns])
def test_scan_input_to_output_forwarding(self):
def f(c, x):
return c + 1, x
def g(x):
return jax.lax.scan(f, 0, x)
jaxpr = jax.make_jaxpr(g)(jnp.arange(3.))
self.assertLen(jaxpr.eqns[0].params["jaxpr"].jaxpr.outvars, 1)
@jtu.sample_product(
seed=range(6),
num_rule_consts=range(6),
num_const_fwds=range(6),
num_carry_fwds=range(6),
num_input_fwds=range(6),
)
@jtu.run_on_devices("cpu")
def test_scan_vjp_forwarding_correctness(
self,
seed,
num_rule_consts,
num_const_fwds,
num_carry_fwds,
num_input_fwds):
# Unlike test_scan_forwarding_correctness, which tests forwarding in the
# scan traceable, this test covers forwarding logic related to residuals in
# the scan partial eval / vjp rule. So 'forwards' refer to residuals that
# will be forwarded.
# We use a custom_jvp where the jvp rule introduces consts to populate
# jaxpr.consts in _scan_partial_eval's input.
@jax.custom_jvp
def foo(x):
return 3. * x
@foo.defjvp
def foo_jvp(primals, tangents):
(x,), (x_dot,) = primals, tangents
if num_rule_consts:
coeff = sum([jnp.array(np.ones(3) / num_rule_consts) for _ in range(num_rule_consts)]) # noqa: C419
else:
coeff = 1.
return foo(x), jnp.prod(coeff) * x_dot
num_const = num_const_fwds + 2
num_carry = num_carry_fwds + 4
num_xs = num_input_fwds + 2
num_ys = num_xs + 1
rng = np.random.RandomState(seed)
carry_perm = rng.permutation(num_carry)
carry_iperm = np.argsort(carry_perm)
xs_perm = rng.permutation(num_xs)
ys_perm = rng.permutation(num_ys)
f = np.arange(num_xs)
f = [f[i] if idx < num_input_fwds else None for idx, i in enumerate(xs_perm)]
f += [None]
in_fwd = [f[i] for i in ys_perm]
body_consts = [jnp.array(rng.randn(3)) for _ in range(num_const)]
init_vals = list(map(jnp.array, rng.uniform(size=(num_carry, 3))))
def body_fun(c, x):
c = [c[i] for i in carry_iperm]
const_fwds, const_dont_fwd = split_list(body_consts, [num_const_fwds])
z = sum(const_dont_fwd)
carry_fwds, carry_dont_fwd = split_list(c, [num_const_fwds])
carry_fwds = [math.prod([x, x, *const_fwds, z]) for x in carry_fwds]
carry_dont_fwd = [jnp.sin(x) * sum(jnp.sum(c) for c in body_consts)
for x in carry_dont_fwd]
new_c_perm = [*carry_fwds, *carry_dont_fwd]
new_c = [new_c_perm[i] for i in carry_perm]
new_c = [foo(new_c[0]), *new_c[1:]]
x = [x[i] for i in xs_perm]
x_fwd, x_dont_fwd = split_list(x, [num_input_fwds])
x_fwd = [x * x for x in x_fwd]
x_dont_fwd = [jnp.cos(x) * sum(jnp.sum(c) for c in body_consts)
for x in x_dont_fwd]
y = [*x_fwd, *x_dont_fwd, 0]
y = [y[i] for i in ys_perm]
return new_c, y
xs = list(map(jnp.array, rng.uniform(size=(num_xs, 2))))
(final, outs), vjp = jax.vjp(partial(jax.lax.scan, body_fun), init_vals, xs)
init_vals_bar, xs_bar = vjp((final, outs))
with jax.disable_jit():
(final_ref, outs_ref), vjp = jax.vjp(partial(jax.lax.scan, body_fun), init_vals, xs)
init_vals_bar_ref, xs_bar_ref = vjp((final, outs))
self.assertAllClose(final, final_ref, check_dtypes=False, rtol=1e-5)
self.assertAllClose(outs, outs_ref, check_dtypes=False)
self.assertAllClose(xs_bar, xs_bar_ref, check_dtypes=False)
def test_scan_fixpoint_instantiate(self):
def f(x):
c, () = jax.lax.scan(lambda c, _: ((0., 0.), ()), (x, 0.), (), length=5)
return sum(c)
jax.grad(f)(1.) # doesn't crash
def test_cond_basic_vjp3(self):
def f(x):
return jax.lax.cond(True, jnp.sin, lambda x: x, x)
_, f_vjp = vjp3(f, 1.)
g, = f_vjp(1.0)
self.assertAllClose(g, jnp.cos(1.), check_dtypes=False)
def h(x):
return jax.lax.cond(True, jnp.sin, lambda x: 1., x)
_, h_vjp = vjp3(h, 1.)
g, = h_vjp(1.0)
self.assertAllClose(g, jnp.cos(1.), check_dtypes=False)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| LaxControlFlowTest |
python | tornadoweb__tornado | tornado/template.py | {
"start": 23599,
"end": 23854
} | class ____(_Node):
def __init__(self, statement: str, line: int) -> None:
self.statement = statement
self.line = line
def generate(self, writer: "_CodeWriter") -> None:
writer.write_line(self.statement, self.line)
| _Statement |
python | astropy__astropy | astropy/modeling/tests/test_parameters.py | {
"start": 4680,
"end": 4730
} | class ____(M1):
m2c = Parameter(default=11.0)
| M2 |
python | automl__auto-sklearn | autosklearn/pipeline/components/data_preprocessing/rescaling/standardize.py | {
"start": 512,
"end": 2168
} | class ____(Rescaling, AutoSklearnPreprocessingAlgorithm):
def __init__(
self, random_state: Optional[Union[int, np.random.RandomState]] = None
) -> None:
from sklearn.preprocessing import StandardScaler
self.preprocessor = StandardScaler(copy=False)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "StandardScaler",
"name": "StandardScaler",
"handles_missing_values": False,
"handles_nominal_values": False,
"handles_numerical_features": True,
"prefers_data_scaled": False,
"prefers_data_normalized": False,
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
# TODO find out if this is right!
"handles_sparse": True,
"handles_dense": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (INPUT,),
"preferred_dtype": None,
}
def fit(
self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE] = None
) -> "AutoSklearnPreprocessingAlgorithm":
if self.preprocessor is None:
raise NotFittedError()
if sparse.isspmatrix(X):
self.preprocessor.set_params(with_mean=False)
return super(StandardScalerComponent, self).fit(X, y)
| StandardScalerComponent |
python | getsentry__sentry | src/sentry/grouping/variants.py | {
"start": 8963,
"end": 9303
} | class ____(TypedDict, total=False):
system: ComponentVariant
app: ComponentVariant
custom_fingerprint: CustomFingerprintVariant
built_in_fingerprint: CustomFingerprintVariant
checksum: ChecksumVariant
hashed_checksum: HashedChecksumVariant
default: ComponentVariant
fallback: FallbackVariant
| VariantsByDescriptor |
python | weaviate__weaviate-python-client | weaviate/collections/classes/tenants.py | {
"start": 5271,
"end": 6695
} | class ____(BaseModel):
"""Tenant class used to describe a tenant to create in Weaviate.
Attributes:
name: the name of the tenant.
activity_status: TenantCreateActivityStatus, default: "HOT"
"""
model_config = ConfigDict(populate_by_name=True)
name: str
activityStatusInternal: TenantCreateActivityStatus = Field(
default=TenantCreateActivityStatus.ACTIVE,
alias="activity_status",
exclude=True,
)
activityStatus: _TenantActivistatusServerValues = Field(
init_var=False, default=_TenantActivistatusServerValues.HOT
)
@property
def activity_status(self) -> TenantCreateActivityStatus:
"""Getter for the activity status of the tenant."""
return self.activityStatusInternal
def model_post_init(self, __context: Any) -> None: # noqa: D102
if self.activityStatusInternal == TenantCreateActivityStatus.HOT:
_Warnings.deprecated_tenant_type("HOT", "ACTIVE")
self.activityStatusInternal = TenantCreateActivityStatus.ACTIVE
elif self.activityStatusInternal == TenantCreateActivityStatus.COLD:
_Warnings.deprecated_tenant_type("COLD", "INACTIVE")
self.activityStatusInternal = TenantCreateActivityStatus.INACTIVE
self.activityStatus = _TenantActivistatusServerValues.from_string(
self.activityStatusInternal.value
)
| TenantCreate |
python | huggingface__transformers | src/transformers/models/distilbert/tokenization_distilbert.py | {
"start": 796,
"end": 1116
} | class ____(BertTokenizer):
model_input_names = ["input_ids", "attention_mask"]
# DistilBertTokenizerFast is an alias for DistilBertTokenizer (since BertTokenizer is already a fast tokenizer)
DistilBertTokenizerFast = DistilBertTokenizer
__all__ = ["DistilBertTokenizer", "DistilBertTokenizerFast"]
| DistilBertTokenizer |
python | scipy__scipy | scipy/interpolate/_cubic.py | {
"start": 22329,
"end": 40394
} | class ____(CubicHermiteSpline):
"""Piecewise cubic interpolator to fit values (C2 smooth).
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple ``(order, deriv_values)`` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
former controls only construction of a spline, and the latter only
evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> import numpy as np
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(x, y, 'o', label='data')
>>> ax.plot(xs, np.sin(xs), label='true')
>>> ax.plot(xs, cs(xs), label="S")
>>> ax.plot(xs, cs(xs, 1), label="S'")
>>> ax.plot(xs, cs(xs, 2), label="S''")
>>> ax.plot(xs, cs(xs, 3), label="S'''")
>>> ax.set_xlim(-0.5, 9.5)
>>> ax.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> ax.axes.set_aspect('equal')
>>> ax.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
xp = array_namespace(x, y)
x, dx, y, axis, _ = prepare_input(x, y, axis, xp=np_compat)
n = len(x)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
if y.size == 0:
# bail out early for zero-sized arrays
s = np.zeros_like(y)
else:
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the
# same way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
m = b.shape[0]
s = solve(A, b.reshape(m, -1), overwrite_a=True, overwrite_b=True,
check_finite=False).reshape(b.shape)
elif n == 3 and bc[0] == 'periodic':
# In case when number of points is 3 we compute the derivatives
# manually
t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
s = np.broadcast_to(t, (n,) + y.shape[1:])
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the
# linear system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
# for more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-3]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
m = b1.shape[0]
s1 = solve_banded((1, 1), Ac, b1.reshape(m, -1), overwrite_ab=False,
overwrite_b=False, check_finite=False)
s1 = s1.reshape(b1.shape)
m = b2.shape[0]
s2 = solve_banded((1, 1), Ac, b2.reshape(m, -1), overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = s2.reshape(b2.shape)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
m = b.shape[0]
s = solve_banded((1, 1), A, b.reshape(m, -1), overwrite_ab=True,
overwrite_b=True, check_finite=False)
s = s.reshape(b.shape)
x, y, s = map(xp.asarray, (x, y, s))
super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, str):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
f"The first and last `y` point along axis {axis} must "
"be identical (within machine precision) when "
"bc_type='periodic'.")
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, str):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError(f"bc_type={bc} is not allowed.")
else:
try:
deriv_order, deriv_value = bc
except Exception as e:
raise ValueError(
"A specified derivative value must be "
"given in the form (order, value)."
) from e
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
f"`deriv_value` shape {deriv_value.shape} is not "
f"the expected one {expected_deriv_shape}."
)
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| CubicSpline |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/art3d.py | {
"start": 17214,
"end": 19691
} | class ____(Patch):
"""
3D patch object.
"""
def __init__(self, *args, zs=(), zdir='z', axlim_clip=False, **kwargs):
"""
Parameters
----------
verts :
zs : float
The location along the *zdir* axis in 3D space to position the
patch.
zdir : {'x', 'y', 'z'}
Plane to plot patch orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
axlim_clip : bool, default: False
Whether to hide patches with a vertex outside the axes view limits.
.. versionadded:: 3.10
"""
super().__init__(*args, **kwargs)
self.set_3d_properties(zs, zdir, axlim_clip)
def set_3d_properties(self, verts, zs=0, zdir='z', axlim_clip=False):
"""
Set the *z* position and direction of the patch.
Parameters
----------
verts :
zs : float
The location along the *zdir* axis in 3D space to position the
patch.
zdir : {'x', 'y', 'z'}
Plane to plot patch orthogonal to. Default: 'z'.
See `.get_dir_vector` for a description of the values.
axlim_clip : bool, default: False
Whether to hide patches with a vertex outside the axes view limits.
.. versionadded:: 3.10
"""
zs = np.broadcast_to(zs, len(verts))
self._segment3d = [juggle_axes(x, y, z, zdir)
for ((x, y), z) in zip(verts, zs)]
self._axlim_clip = axlim_clip
def get_path(self):
# docstring inherited
# self._path2d is not initialized until do_3d_projection
if not hasattr(self, '_path2d'):
self.axes.M = self.axes.get_proj()
self.do_3d_projection()
return self._path2d
def do_3d_projection(self):
s = self._segment3d
if self._axlim_clip:
mask = _viewlim_mask(*zip(*s), self.axes)
xs, ys, zs = np.ma.array(zip(*s),
dtype=float, mask=mask).filled(np.nan)
else:
xs, ys, zs = zip(*s)
vxs, vys, vzs, vis = proj3d._proj_transform_clip(xs, ys, zs,
self.axes.M,
self.axes._focal_length)
self._path2d = mpath.Path(np.ma.column_stack([vxs, vys]))
return min(vzs)
| Patch3D |
python | pytorch__pytorch | torch/_higher_order_ops/flat_apply.py | {
"start": 1941,
"end": 4379
} | class ____(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("flat_apply")
def __call__(self, func, in_spec, *flat_args, **_unused):
"""
Functions that take in non-graphable types cannot directly be put into FX graph.
Given func(*args, **kwargs), if all of the non-graphable types are pytrees,
then we're able to store a call to flat_apply(func, in_spec, *flat_args) in the FX graph.
The semantics of flat_apply(func, in_spec, *flat_args) are roughly equivalent to:
>>> def flat_apply_impl(func, in_spec, *flat_args):
>>> args, kwargs = pytree.tree_unflatten(flat_args, in_spec)
>>> output = func(*args, **kwargs)
>>> return output
flat_apply supports the following two cases:
- an input type is a container type (e.g. of tensors) registered as a pytree.
We'll tree_flatten the input type and store the spec.
- an input type is a constant type (i.e. torch.compile will specialize on it)
registered with pytree.register_constant. The constant type goes directly
into the spec.
"""
assert isinstance(func, _op_types) or pytree._is_constant_holder(func)
assert len(_unused) == 0
return impl(func, in_spec, *flat_args)
def impl(func, in_spec, *flat_args):
if not isinstance(func, _op_types):
# assume _ConstantFunction
func = pytree._retrieve_constant(func)
assert isinstance(func, _ConstantFunction)
args, kwargs = from_graphable(flat_args, in_spec)
out = func(*args, **kwargs)
# Right now, all outputs must either be graphable or lists/tuples of graphables.
#
# TODO: The following can be updated to support non-graphable outputs and pytrees.
# For non-graphable constant outputs: the assumption would be that they are constant
# (every time the function runs those MUST be the same)
# For pytree outputs:
# I'm not sure if we need to return (flat_output, spec) or just (flat_output,):
# in the latter case the tracers need to carry out the output specs
# (they need to know how to reconstruct the object from just the flat_output).
def is_valid_output(x):
if isinstance(x, (tuple, list)):
return all(map(is_valid_output, x))
return is_graphable(x)
assert is_valid_output(out)
return out
flat_apply = FlatApply()
| FlatApply |
python | modin-project__modin | modin/tests/pandas/native_df_interoperability/test_compiler_caster.py | {
"start": 8226,
"end": 8420
} | class ____(NativeQueryCompiler):
_MAX_SIZE_THIS_ENGINE_CAN_HANDLE = BIG_DATA_CLOUD_MIN_NUM_ROWS
def __init__(self, pandas_frame):
super().__init__(pandas_frame)
| BaseTestAutoMover |
python | pypa__virtualenv | src/virtualenv/create/via_global_ref/builtin/cpython/cpython3.py | {
"start": 496,
"end": 579
} | class ____(CPython, Python3Supports, abc.ABC):
"""CPython 3 or later."""
| CPython3 |
python | hynek__structlog | src/structlog/_output.py | {
"start": 708,
"end": 2939
} | class ____:
"""
Print events into a file.
Args:
file: File to print to. (default: `sys.stdout`)
>>> from structlog import PrintLogger
>>> PrintLogger().info("hello")
hello
Useful if you follow `current logging best practices
<logging-best-practices>`.
Also very useful for testing and examples since `logging` is finicky in
doctests.
.. versionchanged:: 22.1.0
The implementation has been switched to use `print` for better
monkeypatchability.
"""
def __init__(self, file: TextIO | None = None):
self._file = file or stdout
self._lock = _get_lock_for_file(self._file)
def __getstate__(self) -> str:
"""
Our __getattr__ magic makes this necessary.
"""
if self._file is stdout:
return "stdout"
if self._file is stderr:
return "stderr"
raise PicklingError(
"Only PrintLoggers to sys.stdout and sys.stderr can be pickled."
)
def __setstate__(self, state: Any) -> None:
"""
Our __getattr__ magic makes this necessary.
"""
if state == "stdout":
self._file = stdout
else:
self._file = stderr
self._lock = _get_lock_for_file(self._file)
def __deepcopy__(self, memodict: dict[str, object]) -> PrintLogger:
"""
Create a new PrintLogger with the same attributes. Similar to pickling.
"""
if self._file not in (stdout, stderr):
raise copy.error(
"Only PrintLoggers to sys.stdout and sys.stderr "
"can be deepcopied."
)
newself = self.__class__(self._file)
newself._lock = _get_lock_for_file(newself._file)
return newself
def __repr__(self) -> str:
return f"<PrintLogger(file={self._file!r})>"
def msg(self, message: str) -> None:
"""
Print *message*.
"""
f = self._file if self._file is not stdout else None
with self._lock:
print(message, file=f, flush=True)
log = debug = info = warn = warning = msg
fatal = failure = err = error = critical = exception = msg
| PrintLogger |
python | pytorch__pytorch | functorch/dim/_wrap.py | {
"start": 572,
"end": 8286
} | class ____:
"""
This class wraps PyTorch operations to support first-class dimensions.
"""
def __init__(
self, orig: Callable, wrapper_implementation: Callable, dim_name: str = "dim"
):
self.orig = orig
self.wrapper_implementation = wrapper_implementation
self.name = getattr(orig, "__name__", "")
self.doc = getattr(orig, "__doc__", None)
self.dim_name = dim_name
self.is_pointwise = False
self.dim_offset = 0
self.keepdim_offset = 1
self.single_dim = False
self.reduce = True
# Update docstring if we have a dim_name
if self.doc and self.dim_name:
self.doc = f"{self.doc}\nArgument '{self.dim_name}' can be either an integer or a torchdim.Dim object.\n"
def function(self) -> Callable:
"""Create a wrapped function that calls our wrapper implementation."""
def wrapped_func(*args: Any, **kwargs: Any) -> Any:
return self.wrapper_implementation(self, *args, **kwargs)
# Copy metadata using functools.update_wrapper for just __name__ and __doc__
functools.update_wrapper(
wrapped_func, self.orig, assigned=("__name__",), updated=()
)
wrapped_func.__doc__ = self.doc
return wrapped_func
def _wrap_dim(dim: Any, ndim: int, keepdim: bool = False) -> DimEntry:
"""Convert single dimension specification to DimEntry object."""
from . import Dim
if isinstance(dim, Dim):
if keepdim:
raise ValueError("cannot preserve first-class dimensions with keepdim=True")
return DimEntry(dim)
elif isinstance(dim, int):
i = dim
while i >= 0:
i -= ndim
return DimEntry(i)
else:
return DimEntry()
def _wrap_dims(dim: Any, ndim: int, keepdim: bool = False) -> list[DimEntry]:
"""Convert dimension specification to list of DimEntry objects."""
de = _wrap_dim(dim, ndim, keepdim)
result = []
if not de.is_none():
result.append(de)
else:
for d in dim:
result.append(_wrap_dim(d, ndim, keepdim))
return result
def patched_dim_method(wrapper: WrappedOperator, *args: Any, **kwargs: Any) -> Any:
"""
This is the core method that handles dimension-aware operations.
"""
if not args:
raise ValueError("Expected at least one argument (self)")
# Get dimension argument
dim_arg = kwargs.get(wrapper.dim_name)
if dim_arg is None and wrapper.dim_offset < len(args):
# Try to get dim from positional args (accounting for self at index 0)
dim_idx = wrapper.dim_offset + 1
if dim_idx < len(args):
dim_arg = args[dim_idx]
# If no dimension argument provided, fall back to standard functorch handling
if dim_arg is None:
info = TensorInfo.create(args[0], ensure_batched=True, ensure_present=False)
if not info:
return wrapper.orig(*args, **kwargs)
with EnableAllLayers(info.levels) as guard:
assert info.batchedtensor is not None
guard.inplace_update_layers(info.batchedtensor, info.levels)
new_args = list(args)
new_args[0] = handle_from_tensor(info.batchedtensor)
result = wrapper.orig(*new_args, **kwargs)
return guard.from_batched(result, info.has_device)
# Handle dimension-aware operation
info = TensorInfo.create(args[0])
if not info:
return wrapper.orig(*args, **kwargs)
# Check for keepdim parameter
keepdim = False
if wrapper.reduce:
keepdim_arg = kwargs.get("keepdim")
if keepdim_arg is None and wrapper.keepdim_offset < len(args):
keepdim_idx = wrapper.keepdim_offset + 1
if keepdim_idx < len(args):
keepdim_arg = args[keepdim_idx]
if keepdim_arg is not None:
keepdim = bool(keepdim_arg)
# Wrap dimensions
ndim = info.ndim()
dims = _wrap_dims(dim_arg, ndim, keepdim)
# Convert dimensions to indices and validate
dim_indices: list[int] = []
seen = [False] * len(info.levels)
for d in dims:
midx = None
for i, level in enumerate(info.levels):
if level == d:
midx = i
break
if midx is None:
# Try to match by position/name more flexibly
for i, level in enumerate(info.levels):
if hasattr(level, "matches") and level.matches(d):
midx = i
break
if midx is None:
level_strs = [str(level) for level in info.levels]
raise ValueError(
f"Tensor with dimensions {level_strs} does not contain {d}"
)
seen[midx] = True
dim_indices.append(midx)
# Determine new levels after reduction
new_levels = []
if wrapper.reduce and not keepdim:
for i, level in enumerate(info.levels):
if not seen[i]:
new_levels.append(level)
else:
new_levels = info.levels[:]
# Create dimension indices for the original function
if len(dim_indices) == 1:
py_indices: Any = dim_indices[0]
else:
py_indices = tuple(dim_indices)
# Update arguments
new_args = list(args)
new_kwargs = kwargs.copy()
assert info.tensor is not None
new_args[0] = handle_from_tensor(info.tensor)
# Update dimension argument
if wrapper.dim_name in new_kwargs:
new_kwargs[wrapper.dim_name] = py_indices
else:
dim_idx = wrapper.dim_offset + 1
if dim_idx < len(new_args):
new_args = list(new_args)
new_args[dim_idx] = py_indices
# Call original function
result = wrapper.orig(*new_args, **new_kwargs)
# Wrap results
def wrap_result(obj: Any) -> Any:
if isinstance(obj, torch.Tensor):
from . import Tensor
return Tensor.from_positional(obj, new_levels, info.has_device)
return obj
return tree_map(wrap_result, result)
def _wrap(
orig: Callable,
dim_offset: Optional[int] = None,
keepdim_offset: Optional[int] = None,
dim_name: Optional[str] = None,
single_dim: Optional[bool] = None,
reduce: Optional[bool] = None,
) -> Callable:
"""
Wrap a PyTorch function to support first-class dimensions.
Args:
orig: Original function to wrap
dim_offset: Offset for dimension argument (default: 0)
keepdim_offset: Offset for keepdim argument (default: 1)
dim_name: Name of dimension parameter (default: "dim")
single_dim: Whether function takes single dimension (default: False)
reduce: Whether function reduces dimensions (default: True)
"""
dim_name = dim_name or "dim"
wrapper = WrappedOperator(orig, patched_dim_method, dim_name)
if dim_offset is not None:
wrapper.dim_offset = dim_offset
if keepdim_offset is not None:
wrapper.keepdim_offset = keepdim_offset
if single_dim is not None:
wrapper.single_dim = single_dim
if reduce is not None:
wrapper.reduce = reduce
return wrapper.function()
def call_torch_function(
wrapper: WrappedOperator,
func: Callable,
types: tuple,
args: tuple = (),
kwargs: Optional[dict] = None,
) -> Any:
"""
Handle __torch_function__ calls for wrapped operators.
"""
if kwargs is None:
kwargs = {}
# Import here to avoid circular imports
from . import _Tensor
# Use the torch function mechanism from _Tensor
return _Tensor.__torch_function__(func, types, args, kwargs)
| WrappedOperator |
python | catalyst-team__catalyst | catalyst/callbacks/metric.py | {
"start": 5005,
"end": 6998
} | class ____(_MetricCallback):
"""BatchMetricCallback implements batch-based metrics update
and computation over loader
Args:
metric: metric to calculate in callback
input_key: keys of tensors that should be used as inputs in metric calculation
target_key: keys of tensors that should be used as targets in metric calculation
log_on_batch: boolean flag to log computed metrics every batch
"""
def __init__(
self,
metric: ICallbackBatchMetric,
input_key: Union[str, Iterable[str], Dict[str, str]],
target_key: Union[str, Iterable[str], Dict[str, str]],
log_on_batch: bool = True,
) -> None:
"""Init BatchMetricCallback"""
super().__init__(metric=metric, input_key=input_key, target_key=target_key)
assert isinstance(metric, ICallbackBatchMetric)
self.log_on_batch = log_on_batch
self._metric_update_method = self.metric.update_key_value
def on_loader_start(self, runner: "IRunner") -> None:
"""On loader start action: reset metric values
Args:
runner: current runner
"""
self.metric.reset()
def on_batch_end(self, runner: "IRunner") -> None:
"""On batch end action: update metric with new batch data
and log it's value if necessary
Args:
runner: current runner
"""
metrics_inputs = self._get_inputs(runner=runner)
metrics = self._update_metric(metrics_inputs)
if self.log_on_batch:
runner.batch_metrics.update(metrics)
def on_loader_end(self, runner: "IRunner") -> None:
"""On loader end action: compute metric values
and update runner's loader metrics with it
Args:
runner: current runner
"""
metrics = self.metric.compute_key_value()
metrics = runner.engine.mean_reduce_ddp_metrics(metrics)
runner.loader_metrics.update(metrics)
| BatchMetricCallback |
python | mlflow__mlflow | mlflow/bedrock/stream.py | {
"start": 6177,
"end": 7875
} | class ____:
"""A helper class to accumulate the chunks of a streaming Converse API response."""
def __init__(self):
self._role = "assistant"
self._text_content_buffer = ""
self._tool_use = {}
self._response = {}
def process_event(self, event_name: str, event_attr: dict[str, Any]):
if event_name == "messageStart":
self._role = event_attr["role"]
elif event_name == "contentBlockStart":
# ContentBlockStart event is only used for tool usage. It carries the tool id
# and the name, but not the input arguments.
self._tool_use = {
# In streaming, input is always string
"input": "",
**event_attr["start"]["toolUse"],
}
elif event_name == "contentBlockDelta":
delta = event_attr["delta"]
if text := delta.get("text"):
self._text_content_buffer += text
if tool_use := delta.get("toolUse"):
self._tool_use["input"] += tool_use["input"]
elif event_name == "contentBlockStop":
pass
elif event_name in {"messageStop", "metadata"}:
self._response.update(event_attr)
else:
_logger.debug(f"Unknown event, skipping: {event_name}")
def build(self) -> dict[str, Any]:
message = {
"role": self._role,
"content": [{"text": self._text_content_buffer}],
}
if self._tool_use:
message["content"].append({"toolUse": self._tool_use})
self._response.update({"output": {"message": message}})
return self._response
| _ConverseMessageBuilder |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_pools.py | {
"start": 3971,
"end": 5867
} | class ____(TestPoolsEndpoint):
def test_get_should_respond_200(self, test_client, session):
self.create_pools()
response = test_client.get(f"/pools/{POOL1_NAME}")
assert response.status_code == 200
assert response.json() == {
"deferred_slots": 0,
"description": None,
"include_deferred": True,
"name": "pool1",
"occupied_slots": 0,
"open_slots": 3,
"queued_slots": 0,
"running_slots": 0,
"scheduled_slots": 0,
"slots": 3,
}
def test_get_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(f"/pools/{POOL1_NAME}")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get(f"/pools/{POOL1_NAME}")
assert response.status_code == 403
def test_get_should_respond_404(self, test_client):
response = test_client.get(f"/pools/{POOL1_NAME}")
assert response.status_code == 404
body = response.json()
assert f"The Pool with name: `{POOL1_NAME}` was not found" == body["detail"]
def test_get_pool3_should_respond_200(self, test_client, session):
"""Test getting POOL3 with forward slash in name"""
self.create_pools()
response = test_client.get(f"/pools/{POOL3_NAME}")
assert response.status_code == 200
assert response.json() == {
"deferred_slots": 0,
"description": "Some Description",
"include_deferred": False,
"name": "pool3/with_slashes",
"occupied_slots": 0,
"open_slots": 5,
"queued_slots": 0,
"running_slots": 0,
"scheduled_slots": 0,
"slots": 5,
}
| TestGetPool |
python | walkccc__LeetCode | solutions/2305. Fair Distribution of Cookies/2305.py | {
"start": 0,
"end": 411
} | class ____:
def distributeCookies(self, cookies: list[int], k: int) -> int:
ans = math.inf
def dfs(s: int, children: list[int]) -> None:
nonlocal ans
if s == len(cookies):
ans = min(ans, max(children))
return
for i in range(k):
children[i] += cookies[s]
dfs(s + 1, children)
children[i] -= cookies[s]
dfs(0, [0] * k)
return ans
| Solution |
python | great-expectations__great_expectations | great_expectations/render/renderer_configuration.py | {
"start": 2416,
"end": 4144
} | class ____(BaseModel):
"""
_RendererValueBase is the base for renderer classes that need to override the default pydantic dict behavior.
""" # noqa: E501 # FIXME CoP
class Config:
validate_assignment = True
arbitrary_types_allowed = True
@override
def dict( # noqa: PLR0913 # FIXME CoP
self,
include: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None,
exclude: Optional[Union[AbstractSetIntStr, MappingIntStrAny]] = None,
by_alias: bool = True,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = True,
) -> DictStrAny:
"""
Override BaseModel dict to make the defaults:
- by_alias=True because we have an existing attribute named schema, and schema is already a Pydantic
BaseModel attribute.
- exclude_none=True to ensure that None values aren't included in the json dict.
In practice this means the renderer implementer doesn't need to use .dict(by_alias=True, exclude_none=True)
everywhere.
""" # noqa: E501 # FIXME CoP
return super().dict(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
RendererParams = TypeVar("RendererParams", bound=_RendererValueBase)
RendererValueTypes: TypeAlias = Union[RendererValueType, List[RendererValueType]]
AddParamArgs: TypeAlias = Tuple[Tuple[str, RendererValueTypes], ...]
| _RendererValueBase |
python | tiangolo__fastapi | tests/test_jsonable_encoder.py | {
"start": 1159,
"end": 1369
} | class ____(BaseModel):
role: Optional[RoleEnum] = None
if PYDANTIC_V2:
model_config = {"use_enum_values": True}
else:
class Config:
use_enum_values = True
| ModelWithConfig |
python | scrapy__scrapy | tests/pipelines.py | {
"start": 173,
"end": 265
} | class ____:
def process_item(self, item):
1 / 0
| ProcessWithZeroDivisionErrorPipeline |
python | huggingface__transformers | src/transformers/models/olmo2/modeling_olmo2.py | {
"start": 15925,
"end": 19052
} | class ____(Olmo2PreTrainedModel):
def __init__(self, config: Olmo2Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Olmo2RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| Olmo2Model |
python | sqlalchemy__sqlalchemy | test/engine/test_parseconnect.py | {
"start": 29332,
"end": 30840
} | class ____(fixtures.TestBase):
@fixture
def mock_create(self):
with patch(
"sqlalchemy.engine.create.create_engine",
) as p:
yield p
def test_url_only(self, mock_create):
create_pool_from_url("sqlite://")
mock_create.assert_called_once_with("sqlite://", _initialize=False)
def test_pool_args(self, mock_create):
create_pool_from_url(
"sqlite://",
logging_name="foo",
echo=True,
timeout=42,
recycle=22,
reset_on_return=True,
pre_ping=True,
use_lifo=True,
foo=99,
)
mock_create.assert_called_once_with(
"sqlite://",
pool_logging_name="foo",
echo_pool=True,
pool_timeout=42,
pool_recycle=22,
pool_reset_on_return=True,
pool_pre_ping=True,
pool_use_lifo=True,
foo=99,
_initialize=False,
)
def test_pool_creation(self):
pp = create_pool_from_url("sqlite://")
engine_pool = create_engine("sqlite://").pool
eq_(pp.__class__, engine_pool.__class__)
pp = create_pool_from_url("sqlite://", pre_ping=True)
is_true(pp._pre_ping)
is_false(isinstance(pp, NullPool))
def test_pool_creation_custom_class(self):
pp = create_pool_from_url("sqlite://", poolclass=NullPool)
is_true(isinstance(pp, NullPool))
| CreatePoolTest |
python | walkccc__LeetCode | solutions/2154. Keep Multiplying Found Values by Two/2155-2.py | {
"start": 0,
"end": 242
} | class ____:
def findFinalValue(self, nums: list[int], original: int) -> int:
seen = [False] * 1001
for num in nums:
seen[num] = True
while original < 1001 and seen[original]:
original *= 2
return original
| Solution |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pygments/lexers/python.py | {
"start": 18271,
"end": 28811
} | class ____(RegexLexer):
"""
For Python 2.x source code.
.. versionchanged:: 2.5
This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
refers to the Python 3 variant. File name patterns like ``*.py`` have
been moved to Python 3 as well.
"""
name = 'Python 2.x'
url = 'https://www.python.org'
aliases = ['python2', 'py2']
filenames = [] # now taken over by PythonLexer (3.x)
mimetypes = ['text/x-python2', 'application/x-python2']
version_added = ''
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\n', Whitespace),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Whitespace, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Whitespace, String.Affix, String.Doc)),
(r'[^\S\n]+', Text),
(r'\A#!.+$', Comment.Hashbang),
(r'#.*$', Comment.Single),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('builtins'),
include('magicfuncs'),
include('magicvars'),
include('backtick'),
('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
bygroups(String.Affix, String.Double), 'tdqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
bygroups(String.Affix, String.Single), 'tsqs'),
('([rR]|[uUbB][rR]|[rR][uUbB])(")',
bygroups(String.Affix, String.Double), 'dqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(')",
bygroups(String.Affix, String.Single), 'sqs'),
('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
combined('stringescape', 'tdqs')),
("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
combined('stringescape', 'tsqs')),
('([uUbB]?)(")', bygroups(String.Affix, String.Double),
combined('stringescape', 'dqs')),
("([uUbB]?)(')", bygroups(String.Affix, String.Single),
combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
'print', 'raise', 'return', 'try', 'while', 'yield',
'yield from', 'as', 'with'), suffix=r'\b'),
Keyword),
],
'builtins': [
(words((
'__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
'unichr', 'unicode', 'vars', 'xrange', 'zip'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
r')\b', Name.Builtin.Pseudo),
(words((
'ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError',
'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
'PendingDeprecationWarning', 'ReferenceError',
'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
Name.Exception),
],
'magicfuncs': [
(words((
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
'__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
'__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
'__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
'__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
'__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
'__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
'__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
'__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
'__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
'__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
'__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
'__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
'__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
'__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
'__unicode__', '__xor__'), suffix=r'\b'),
Name.Function.Magic),
],
'magicvars': [
(words((
'__bases__', '__class__', '__closure__', '__code__', '__defaults__',
'__dict__', '__doc__', '__file__', '__func__', '__globals__',
'__metaclass__', '__module__', '__mro__', '__name__', '__self__',
'__slots__', '__weakref__'),
suffix=r'\b'),
Name.Variable.Magic),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[\w.]+', Name.Decorator),
(r'[a-zA-Z_]\w*', Name),
],
'funcname': [
include('magicfuncs'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
default('#pop'),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'(?:[ \t]|\\\n)+', Text),
(r'as\b', Keyword.Namespace),
(r',', Operator),
(r'[a-zA-Z_][\w.]*', Name.Namespace),
default('#pop') # all else: go back
],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
(r'import\b', Keyword.Namespace, '#pop'),
# if None occurs here, it's "raise x from None", since None can
# never be a module name
(r'None\b', Name.Builtin.Pseudo, '#pop'),
# sadly, in "raise x from y" y will be highlighted as namespace too
(r'[a-zA-Z_.][\w.]*', Name.Namespace),
# anything else here also means "raise x from y" and is therefore
# not an error
default('#pop'),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings-single': innerstring_rules(String.Single),
'strings-double': innerstring_rules(String.Double),
'dqs': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings-double')
],
'sqs': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings-single')
],
'tdqs': [
(r'"""', String.Double, '#pop'),
include('strings-double'),
(r'\n', String.Double)
],
'tsqs': [
(r"'''", String.Single, '#pop'),
include('strings-single'),
(r'\n', String.Single)
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?2(\.\d)?')
| Python2Lexer |
python | streamlit__streamlit | lib/tests/streamlit/config_util_test.py | {
"start": 10665,
"end": 53628
} | class ____(unittest.TestCase):
"""Test theme inheritance utility functions."""
def setUp(self):
self.config_template = CONFIG_OPTIONS_TEMPLATE
def _get_expected_theme_options_count(self, section: str = "theme") -> int:
"""
Get the expected count of theme options by directly counting from config template.
This uses the config template as the source of truth, ensuring the test
stays in sync with the actual theme options defined in the system.
"""
# Count theme options directly from the config template
# (not using the function under test to avoid circular logic)
theme_option_count = 0
for option_key in self.config_template:
if option_key.startswith(section):
parts = option_key.split(".")
# In the case we are counting theme options (e.g. "theme.primaryColor"):
if section == "theme" and len(parts) == 2:
theme_option_count += 1
# In the case we are counting sidebar options (e.g. "theme.sidebar.primaryColor"):
if section == "theme.sidebar" and len(parts) == 3:
theme_option_count += 1
return theme_option_count
# Tests for theme.base support functions
@parameterized.expand(
[
("#ffffff", "theme.primaryColor"),
("#000", "theme.primaryColor"),
("#FF0000", "theme.primaryColor"),
("#ffff", "theme.primaryColor"), # 4-digit hex with alpha
("#ffffffff", "theme.primaryColor"), # 8-digit hex with alpha
("rgb(255, 0, 0)", "theme.primaryColor"),
("rgba(255, 0, 0, 0.5)", "theme.primaryColor"),
(" #ff0000 ", "theme.primaryColor"), # Test trimming
]
)
def test_check_color_value_valid_single_colors(self, color: str, option_name: str):
"""Test _check_color_value with various valid single color formats."""
# Should return None and not log any warnings
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
result = config_util._check_color_value(color, option_name)
assert result is None
mock_logger.warning.assert_not_called()
@parameterized.expand(
[
(["#ff0000", "#00ff00", "#0000ff"], "theme.chartCategoricalColors"),
(
["rgb(255,0,0)", "rgba(0,255,0,0.5)", "#fff"],
"theme.chartSequentialColors",
),
(["#ffffff"], "theme.chartCategoricalColors"), # Single item array
]
)
def test_check_color_value_valid_color_arrays(
self, colors: list[str], option_name: str
):
"""Test _check_color_value with valid color arrays."""
# Should return None and not log any warnings
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
result = config_util._check_color_value(colors, option_name)
assert result is None
mock_logger.warning.assert_not_called()
@parameterized.expand(
[
("#invalid", "theme.primaryColor"),
("#ff", "theme.primaryColor"),
("#12345", "theme.primaryColor"), # Wrong length
("not-a-color", "theme.primaryColor"),
]
)
def test_check_color_value_invalid_single_colors_logs_warning(
self, color: str, option_name: str
):
"""Test _check_color_value logs warnings for invalid single colors but doesn't raise error."""
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
result = config_util._check_color_value(color, option_name)
# Should return None but log a warning
assert result is None
mock_logger.warning.assert_called_once()
# Check warning message contains expected content
warning_args = mock_logger.warning.call_args[0]
assert option_name in warning_args[1]
assert color in warning_args[2]
def test_check_color_value_invalid_colors_in_array_logs_warnings(self):
"""Test _check_color_value logs warnings for invalid colors in arrays."""
colors = ["#ff0000", "#invalid", "#00ff00", "not-a-color"]
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
result = config_util._check_color_value(
colors, "theme.chartCategoricalColors"
)
# Should return None but log warnings for invalid colors
assert result is None
# Should log 2 warnings (for "#invalid" and "not-a-color")
assert mock_logger.warning.call_count == 2
@parameterized.expand(
[
("", "theme.primaryColor", "cannot be empty"),
(123, "theme.primaryColor", "must be a string"),
(None, "theme.primaryColor", "must be a string"),
([], "theme.chartCategoricalColors", "cannot be an empty array"),
(["", "#ff0000"], "theme.chartCategoricalColors", "cannot be empty"),
([123, "#ff0000"], "theme.chartCategoricalColors", "must be a string"),
]
)
def test_check_color_value_type_and_empty_errors(
self, value, option_name: str, expected_error: str
):
"""Test _check_color_value raises exceptions for type errors and empty values."""
with pytest.raises(StreamlitInvalidThemeOptionError) as cm:
config_util._check_color_value(value, option_name)
assert expected_error in str(cm.value)
def test_iter_theme_config_options(self):
"""Test _iterate_theme_config_options extracts theme options correctly."""
mock_config_options = {
"theme.primaryColor": ConfigOption(
"theme.primaryColor", description="", default_val="#ff0000"
),
"theme.backgroundColor": ConfigOption(
"theme.backgroundColor", description="", default_val=None
), # Should be excluded
"server.port": ConfigOption(
"server.port", description="", default_val=8501
), # Should be excluded
"theme.font": ConfigOption(
"theme.font", description="", default_val="serif"
),
"theme.sidebar.primaryColor": ConfigOption(
"theme.sidebar.primaryColor", description="", default_val="#00ff00"
),
}
# Set values for some options
mock_config_options["theme.primaryColor"].set_value("#ff0000", "test")
mock_config_options["theme.font"].set_value("serif", "test")
mock_config_options["theme.sidebar.primaryColor"].set_value("#00ff00", "test")
result = list(config_util._iterate_theme_config_options(mock_config_options))
# Should only include theme options with non-None values (returns full key names)
expected_keys = {
"theme.primaryColor",
"theme.font",
"theme.sidebar.primaryColor",
}
actual_keys = {key for key, value in result}
assert actual_keys == expected_keys
# Check values
result_dict = dict(result)
assert result_dict["theme.primaryColor"] == "#ff0000"
assert result_dict["theme.font"] == "serif"
assert result_dict["theme.sidebar.primaryColor"] == "#00ff00"
def test_extract_current_theme_config(self):
"""Test _extract_current_theme_config extracts theme config correctly."""
mock_config_options = {
"theme.primaryColor": ConfigOption(
"theme.primaryColor", description="", default_val=None
),
"theme.backgroundColor": ConfigOption(
"theme.backgroundColor", description="", default_val=None
),
"theme.sidebar.primaryColor": ConfigOption(
"theme.sidebar.primaryColor", description="", default_val=None
),
"server.port": ConfigOption(
"server.port", description="", default_val=8501
), # Should be excluded
}
# Set values for theme options
mock_config_options["theme.primaryColor"].set_value("#ff0000", "test")
mock_config_options["theme.backgroundColor"].set_value("#000000", "test")
mock_config_options["theme.sidebar.primaryColor"].set_value("#00ff00", "test")
mock_config_options["server.port"].set_value(9000, "test") # Non-theme option
result = config_util._extract_current_theme_config(mock_config_options)
expected = {
"primaryColor": "#ff0000",
"backgroundColor": "#000000",
"sidebar": {"primaryColor": "#00ff00"},
}
assert result == expected
def test_extract_current_theme_config_complex_nesting(self):
"""Test _extract_current_theme_config with section and subsection nesting."""
mock_config_options = {
"theme.light.primaryColor": ConfigOption(
"theme.light.primaryColor", description="", default_val=None
),
"theme.dark.primaryColor": ConfigOption(
"theme.dark.primaryColor", description="", default_val=None
),
"theme.sidebar.primaryColor": ConfigOption(
"theme.sidebar.primaryColor", description="", default_val=None
),
"theme.light.sidebar.primaryColor": ConfigOption(
"theme.light.sidebar.primaryColor", description="", default_val=None
),
"theme.dark.sidebar.primaryColor": ConfigOption(
"theme.dark.sidebar.primaryColor", description="", default_val=None
),
}
mock_config_options["theme.light.primaryColor"].set_value("#0000ff", "test")
mock_config_options["theme.dark.primaryColor"].set_value("#ffff00", "test")
mock_config_options["theme.sidebar.primaryColor"].set_value("#00ff00", "test")
mock_config_options["theme.light.sidebar.primaryColor"].set_value(
"#ff0000", "test"
)
mock_config_options["theme.dark.sidebar.primaryColor"].set_value(
"#00ff00", "test"
)
result = config_util._extract_current_theme_config(mock_config_options)
expected = {
"light": {
"primaryColor": "#0000ff",
"sidebar": {"primaryColor": "#ff0000"},
},
"dark": {
"primaryColor": "#ffff00",
"sidebar": {"primaryColor": "#00ff00"},
},
"sidebar": {
"primaryColor": "#00ff00",
},
}
assert result == expected
def test_extract_current_theme_config_no_theme_options(self):
"""Test _extract_current_theme_config with no theme options set."""
mock_config_options = {
"server.port": ConfigOption(
"server.port", description="", default_val=8501
),
"theme.primaryColor": ConfigOption(
"theme.primaryColor", description="", default_val=None
), # None value
}
mock_config_options["server.port"].set_value(9000, "test")
# Don't set theme.primaryColor value (should remain None)
result = config_util._extract_current_theme_config(mock_config_options)
assert result == {}
def test_get_valid_theme_options(self):
"""Test that _get_valid_theme_options extracts all valid theme options."""
main_options, _ = config_util._get_valid_theme_options(self.config_template)
# Test subset of expected core theme options
expected_options = {
"base",
"baseFontWeight",
"baseRadius",
"primaryColor",
"textColor",
"font",
"headingFontSizes",
"borderColor",
"linkUnderline",
"chartCategoricalColors",
"violetColor",
"violetBackgroundColor",
"violetTextColor",
}
assert expected_options.issubset(main_options)
# Does not include section names
assert "sidebar" not in main_options
# Test that all options are strings
for option in main_options:
assert isinstance(option, str)
def test_get_valid_theme_options_main_section(self):
"""Test _get_valid_theme_options for main section specifically."""
main_options, _ = config_util._get_valid_theme_options(self.config_template)
# These should be in main section
expected_main_only = {
"base",
"baseFontWeight",
"baseRadius",
"primaryColor",
"textColor",
"font",
"headingFontSizes",
"borderColor",
"linkUnderline",
"chartSequentialColors",
"blueColor",
"blueBackgroundColor",
"blueTextColor",
}
assert expected_main_only.issubset(main_options)
# Test that we get the expected number of theme options
# Use the config template as source of truth for expected theme options count
expected_count = self._get_expected_theme_options_count()
assert len(main_options) == expected_count, (
f"Expected {expected_count} main theme options based on config template, "
f"but got {len(main_options)}"
)
def test_get_valid_theme_options_section(self):
"""Test _get_valid_theme_options for sections (sidebar, light, dark) specifically."""
_, section_options = config_util._get_valid_theme_options(self.config_template)
# These are some options that should be in sidebar section
expected_sidebar = {
# The only config with base in the name that should be in sidebar
"baseRadius",
"primaryColor",
"textColor",
"font",
"headingFontSizes",
"borderColor",
"linkUnderline",
"codeFontWeight",
"greenColor",
"greenBackgroundColor",
"greenTextColor",
}
assert expected_sidebar.issubset(section_options)
# These are all the options that should NOT be in sidebar section
main_only_options = {
"base",
"baseFontSize",
"baseFontWeight",
"fontFaces",
"showSidebarBorder",
"chartCategoricalColors",
"chartSequentialColors",
}
assert main_only_options.isdisjoint(section_options)
# Test that we get the expected number of theme.sidebar options
expected_count = self._get_expected_theme_options_count(section="theme.sidebar")
assert len(section_options) == expected_count, (
f"Expected {expected_count} theme.sidebar options based on config template, "
f"but got {len(section_options)}"
)
def test_validate_theme_file_content_with_valid_content(self):
"""Test validation of valid theme file content."""
theme_content = {
"theme": {
"base": "dark",
"primaryColor": "#ff0000",
"backgroundColor": "#000000",
"sidebar": {"primaryColor": "#00ff00", "backgroundColor": "#111111"},
}
}
# Should not raise any exception and return filtered theme
filtered_theme = config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
# Should return the same content since all options are valid
assert filtered_theme == theme_content
def test_validate_theme_file_content_invalid_option(self):
"""Test validation logs warning for invalid theme options."""
theme_content = {"theme": {"invalidOption": "value", "primaryColor": "#ff0000"}}
# Mock the logger returned by _get_logger
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
filtered_theme = config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
mock_logger.warning.assert_called_once()
# Check the warning call arguments
warning_call = mock_logger.warning.call_args
format_string = warning_call[0][0]
args = warning_call[0][1:]
# Verify content in the format string and args
assert "invalid theme option" in format_string
assert "test_theme.toml" in args[0] # file_path_or_url
assert "theme.invalidOption" in args[1] # full_option_name
assert "theme" in args[2] # section_name
# Verify invalid option was removed from filtered theme
assert "invalidOption" not in filtered_theme["theme"]
# Verify valid option was preserved
assert filtered_theme["theme"]["primaryColor"] == "#ff0000"
def test_validate_theme_file_content_invalid_section(self):
"""Test validation rejects invalid theme sections."""
theme_content = {
"theme": {
"primaryColor": "#ff0000",
"invalidSection": {"primaryColor": "#00ff00"},
}
}
with pytest.raises(StreamlitInvalidThemeSectionError) as cm:
config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
assert "Invalid theme section" in str(cm.value)
assert "invalidSection" in str(cm.value)
def test_validate_theme_file_content_invalid_section_option(self):
"""Test validation triggers warning for invalid section options."""
theme_content = {
"theme": {
"primaryColor": "#ff0000",
"light": {
"invalidSectionOption": "value",
},
}
}
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
filtered_theme = config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
mock_logger.warning.assert_called_once()
warning_call = mock_logger.warning.call_args
format_string = warning_call[0][0]
args = warning_call[0][1:]
assert "invalid theme option" in format_string
assert "test_theme.toml" in args[0] # file_path_or_url
assert "theme.light.invalidSectionOption" in args[1] # full_option_name
assert "light" in args[2] # section_name
# Verify invalid section option was removed from filtered theme
assert "invalidSectionOption" not in filtered_theme["theme"]["light"]
# Verify valid main option was preserved
assert filtered_theme["theme"]["primaryColor"] == "#ff0000"
def test_validate_theme_file_content_invalid_subsection(self):
"""Test validation rejects invalid theme subsections."""
theme_content = {
"theme": {
"primaryColor": "#ff0000",
"light": {
"primaryColor": "#00ff00",
"invalidSubsection": {"primaryColor": "#0000ff"},
},
}
}
with pytest.raises(StreamlitInvalidThemeSectionError) as cm:
config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
assert "Invalid theme section" in str(cm.value)
assert "light.invalidSubsection" in str(cm.value)
def test_validate_theme_file_content_invalid_subsection_option(self):
"""Test validation triggers warning for invalid subsection options."""
theme_content = {
"theme": {
"primaryColor": "#ff0000",
"dark": {
"sidebar": {
"invalidSubsectionOption": "value",
},
},
"sidebar": {
"primaryColor": "#00ff00",
},
}
}
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
filtered_theme = config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
mock_logger.warning.assert_called_once()
warning_call = mock_logger.warning.call_args
format_string = warning_call[0][0]
args = warning_call[0][1:]
assert "invalid theme option" in format_string
assert "test_theme.toml" in args[0] # file_path_or_url
assert (
"theme.dark.sidebar.invalidSubsectionOption" in args[1]
) # full_option_name
assert "dark.sidebar" in args[2] # section_name
# Verify invalid subsection option was removed from filtered theme
assert (
"invalidSubsectionOption" not in filtered_theme["theme"]["dark"]["sidebar"]
)
# Verify valid main option was preserved
assert filtered_theme["theme"]["primaryColor"] == "#ff0000"
def test_validate_theme_file_content_invalid_sidebar_option(self):
"""Test validation rejects invalid sidebar options."""
theme_content = {
"theme": {
"primaryColor": "#ff0000",
"sidebar": {"invalidSidebarOption": "value"},
}
}
# Mock the logger returned by _get_logger
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
filtered_theme = config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
mock_logger.warning.assert_called_once()
warning_call = mock_logger.warning.call_args
format_string = warning_call[0][0]
args = warning_call[0][1:]
assert "invalid theme option" in format_string
assert "test_theme.toml" in args[0] # file_path_or_url
assert "theme.sidebar.invalidSidebarOption" in args[1] # full_option_name
assert "sidebar" in args[2] # section_name
# Verify invalid sidebar option was removed from filtered theme
assert "invalidSidebarOption" not in filtered_theme["theme"]["sidebar"]
# Verify valid main option was preserved
assert filtered_theme["theme"]["primaryColor"] == "#ff0000"
def test_validate_theme_file_content_invalid_main_option_in_sidebar(self):
"""Test validation rejects main-theme-only options in sidebar."""
# Test each main-only option individually
main_only_options = {
"base": "#ffffff",
"baseFontSize": "16px",
"baseFontWeight": "bold",
"fontFaces": "Arial, sans-serif",
"showSidebarBorder": True,
"chartCategoricalColors": ["#ff0000", "#00ff00", "#0000ff"],
"chartSequentialColors": ["#ff0000", "#00ff00", "#0000ff"],
}
for main_only_option, option_value in main_only_options.items():
theme_content = {
"theme": {
"primaryColor": "#ff0000",
"sidebar": {main_only_option: option_value},
}
}
with patch("streamlit.config_util._get_logger") as mock_get_logger:
mock_logger = mock_get_logger.return_value
filtered_theme = config_util._validate_theme_file_content(
theme_content, "test_theme.toml", self.config_template
)
mock_logger.warning.assert_called_once()
# Check the warning call arguments
warning_call = mock_logger.warning.call_args
format_string = warning_call[0][0]
args = warning_call[0][1:]
# Verify content in the format string and args
assert "invalid theme option" in format_string
assert "test_theme.toml" in args[0] # file_path_or_url
assert f"theme.sidebar.{main_only_option}" in args[1] # full_option_name
assert "sidebar" in args[2] # section_name
# Verify invalid sidebar option was removed from filtered theme
assert main_only_option not in filtered_theme["theme"]["sidebar"]
# Verify valid main option was preserved
assert filtered_theme["theme"]["primaryColor"] == "#ff0000"
def test_load_theme_file_missing_toml(self):
"""Test _load_theme_file when toml module is missing."""
# Mock the import toml statement to raise ImportError
with patch.dict("sys.modules", {"toml": None}):
with pytest.raises(StreamlitAPIException) as cm:
config_util._load_theme_file("theme.toml", self.config_template)
assert "toml' package is required" in str(cm.value)
def test_load_theme_file_local_success(self):
"""Test loading theme file from local path successfully."""
theme_toml = """
[theme]
base = "light"
primaryColor = "#0066cc"
backgroundColor = "#ffffff"
"""
with tempfile.NamedTemporaryFile(mode="w", suffix=".toml", delete=False) as f:
f.write(theme_toml)
temp_path = f.name
try:
result = config_util._load_theme_file(temp_path, self.config_template)
assert result["theme"]["base"] == "light"
assert result["theme"]["primaryColor"] == "#0066cc"
assert result["theme"]["backgroundColor"] == "#ffffff"
finally:
os.unlink(temp_path)
@patch("streamlit.config_util.url_util.is_url")
@patch("streamlit.config_util.urllib.request.urlopen")
def test_load_theme_file_url_success(self, mock_urlopen, mock_is_url):
"""Test loading theme file from URL successfully."""
mock_is_url.return_value = True
theme_toml = """
[theme]
base = "dark"
primaryColor = "#ff0000"
"""
mock_response = MagicMock()
mock_response.read.return_value = theme_toml.encode("utf-8")
mock_response.__enter__.return_value = mock_response
mock_urlopen.return_value = mock_response
result = config_util._load_theme_file(
"https://example.com/theme.toml", self.config_template
)
assert result["theme"]["base"] == "dark"
assert result["theme"]["primaryColor"] == "#ff0000"
@patch("streamlit.config_util.os.path.exists")
def test_load_theme_file_missing_file(self, mock_exists):
"""Test _load_theme_file with missing local file."""
mock_exists.return_value = False
with pytest.raises(FileNotFoundError) as cm:
config_util._load_theme_file("missing_theme.toml", self.config_template)
assert "Theme file not found" in str(cm.value)
@patch("streamlit.config_util.url_util.is_url")
@patch("streamlit.config_util.urllib.request.urlopen")
def test_load_theme_file_url_error(self, mock_urlopen, mock_is_url):
"""Test loading theme file from URL with network error."""
mock_is_url.return_value = True
import urllib.error
mock_urlopen.side_effect = urllib.error.URLError("Network error")
with pytest.raises(StreamlitAPIException) as cm:
config_util._load_theme_file(
"https://example.com/theme.toml", self.config_template
)
assert "Could not load theme file from URL" in str(cm.value)
def test_load_theme_file_missing_theme_section(self):
"""Test loading theme file without [theme] section."""
content_toml = """
[server]
port = 8501
"""
with tempfile.NamedTemporaryFile(mode="w", suffix=".toml", delete=False) as f:
f.write(content_toml)
temp_path = f.name
try:
with pytest.raises(StreamlitAPIException) as cm:
config_util._load_theme_file(temp_path, self.config_template)
assert "must contain a [theme] section" in str(cm.value)
finally:
os.unlink(temp_path)
def test_load_theme_file_invalid_toml(self):
"""Test loading theme file with invalid TOML syntax."""
invalid_toml = """
[theme
base = "dark"
primaryColor = "#ff0000"
"""
with tempfile.NamedTemporaryFile(mode="w", suffix=".toml", delete=False) as f:
f.write(invalid_toml)
temp_path = f.name
try:
with pytest.raises(StreamlitAPIException) as cm:
config_util._load_theme_file(temp_path, self.config_template)
assert "Error loading theme file" in str(cm.value)
finally:
os.unlink(temp_path)
def test_load_theme_file_too_large_raises_error(self):
"""Test loading theme file that exceeds size limit raises error."""
# Create a theme file larger than 1MB
large_content = (
"""
[theme]
base = "light"
primaryColor = "#ff0000"
# """
+ "A" * (config_util._MAX_THEME_FILE_SIZE_BYTES + 1000)
)
with tempfile.NamedTemporaryFile(mode="w", suffix=".toml", delete=False) as f:
f.write(large_content)
temp_path = f.name
try:
with pytest.raises(StreamlitAPIException) as cm:
config_util._load_theme_file(temp_path, self.config_template)
error_msg = str(cm.value)
assert "too large" in error_msg
assert "1MB" in error_msg
assert "configuration options" in error_msg
finally:
os.unlink(temp_path)
def test_apply_theme_inheritance_basic(self):
"""Test basic theme inheritance merging."""
base_theme = {
"theme": {
"base": "dark",
"primaryColor": "#ff0000",
"backgroundColor": "#000000",
}
}
override_theme = {
"theme": {
"primaryColor": "#00ff00", # Override
"textColor": "#ffffff", # New option
}
}
result = config_util._apply_theme_inheritance(base_theme, override_theme)
# Base value should remain
assert result["theme"]["base"] == "dark"
assert result["theme"]["backgroundColor"] == "#000000"
# Override values should take precedence
assert result["theme"]["primaryColor"] == "#00ff00"
assert result["theme"]["textColor"] == "#ffffff"
def test_apply_theme_inheritance_nested(self):
"""Test theme inheritance with nested sections."""
base_theme = {
"theme": {
"primaryColor": "#ff0000",
"sidebar": {"primaryColor": "#ff4444", "backgroundColor": "#222222"},
}
}
override_theme = {
"theme": {
"sidebar": {
"primaryColor": "#00ff00" # Override sidebar primary
}
}
}
result = config_util._apply_theme_inheritance(base_theme, override_theme)
# Main theme unchanged
assert result["theme"]["primaryColor"] == "#ff0000"
# Sidebar primary overridden
assert result["theme"]["sidebar"]["primaryColor"] == "#00ff00"
# Sidebar background preserved
assert result["theme"]["sidebar"]["backgroundColor"] == "#222222"
def test_apply_theme_inheritance_new_section(self):
"""Test theme inheritance adds new sections."""
base_theme = {"theme": {"primaryColor": "#ff0000"}}
override_theme = {"theme": {"sidebar": {"primaryColor": "#00ff00"}}}
result = config_util._apply_theme_inheritance(base_theme, override_theme)
assert result["theme"]["primaryColor"] == "#ff0000"
assert result["theme"]["sidebar"]["primaryColor"] == "#00ff00"
def test_process_theme_inheritance_builtin_base(self):
"""Test process_theme_inheritance with builtin base ('light' or 'dark')."""
base_option = ConfigOption("theme.base", description="", default_val="dark")
base_option.set_value("dark", "test")
config_options = {"theme.base": base_option}
set_option_mock = MagicMock()
# Should return early since 'dark' is a builtin theme
config_util.process_theme_inheritance(
config_options, self.config_template, set_option_mock
)
set_option_mock.assert_not_called()
def test_process_theme_inheritance_no_base(self):
"""Test process_theme_inheritance when no base is set."""
config_options = {
"theme.primaryColor": ConfigOption(
"theme.primaryColor", description="", default_val=None
)
}
set_option_mock = MagicMock()
# Should return early without doing anything
config_util.process_theme_inheritance(
config_options, self.config_template, set_option_mock
)
set_option_mock.assert_not_called()
@patch("streamlit.config_util._load_theme_file")
def test_process_theme_inheritance_successful_merge(self, mock_load_theme):
"""Test successful theme inheritance processing."""
base_option = ConfigOption("theme.base", description="", default_val=None)
base_option.set_value("custom_theme.toml", "test")
primary_option = ConfigOption(
"theme.primaryColor", description="", default_val=None
)
primary_option.set_value("#override", "config.toml")
config_options = {
"theme.base": base_option,
"theme.primaryColor": primary_option,
}
# Mock loaded theme file
mock_load_theme.return_value = {
"theme": {
"base": "dark",
"primaryColor": "#base_color",
"backgroundColor": "#from_theme_file",
}
}
set_option_calls = []
def mock_set_option(key, value, source):
set_option_calls.append((key, value, source))
config_util.process_theme_inheritance(
config_options, self.config_template, mock_set_option
)
# Verify that theme options were set correctly
set_calls_dict = {call[0]: call[1] for call in set_option_calls}
# Base should be set from theme file
assert set_calls_dict.get("theme.base") == "dark"
# Background should come from theme file
assert set_calls_dict.get("theme.backgroundColor") == "#from_theme_file"
# Primary color should be the merged result (config override wins)
assert set_calls_dict.get("theme.primaryColor") == "#override"
@patch("streamlit.config_util._load_theme_file")
def test_process_theme_inheritance_successful_complex_merge(self, mock_load_theme):
"""Test successful theme inheritance processing with a complex merge."""
base_option = ConfigOption("theme.base", description="", default_val=None)
base_option.set_value("custom_theme.toml", "test")
primary_option = ConfigOption(
"theme.primaryColor", description="", default_val=None
)
primary_option.set_value("#override", "config.toml")
light_option = ConfigOption(
"theme.light.linkColor", description="", default_val=None
)
light_option.set_value("#light_link_override", "config.toml")
sidebar_option = ConfigOption(
"theme.sidebar.primaryColor", description="", default_val=None
)
sidebar_option.set_value("#sidebar_override", "config.toml")
sidebar_light_option = ConfigOption(
"theme.light.sidebar.borderColor", description="", default_val=None
)
sidebar_light_option.set_value("#sidebar_light_override", "config.toml")
config_options = {
"theme.base": base_option,
"theme.primaryColor": primary_option,
"theme.light.linkColor": light_option,
"theme.sidebar.primaryColor": sidebar_option,
"theme.light.sidebar.borderColor": sidebar_light_option,
}
# Mock loaded theme file
mock_load_theme.return_value = {
"theme": {
"base": "dark",
"primaryColor": "#base_color",
"backgroundColor": "#from_theme_file",
"light": {
"primaryColor": "#light_primary_color",
"linkColor": "#light_link_color",
"sidebar": {
"borderColor": "#light_sidebar_border_color",
},
},
"dark": {
"primaryColor": "#dark_primary_color",
"linkColor": "#dark_link_color",
"sidebar": {
"borderColor": "#dark_sidebar_border_color",
},
},
"sidebar": {
"primaryColor": "#sidebar_base_color",
},
}
}
set_option_calls = []
def mock_set_option(key, value, source):
set_option_calls.append((key, value, source))
config_util.process_theme_inheritance(
config_options, self.config_template, mock_set_option
)
# Verify that theme options were set correctly
set_calls_dict = {call[0]: call[1] for call in set_option_calls}
# Base should be set from theme file
assert set_calls_dict.get("theme.base") == "dark"
# Theme and sidebar primary colors should be the merged result (config override wins)
assert set_calls_dict.get("theme.primaryColor") == "#override"
assert set_calls_dict.get("theme.sidebar.primaryColor") == "#sidebar_override"
# Background color should come from base theme file
assert set_calls_dict.get("theme.backgroundColor") == "#from_theme_file"
# Config options should include the new section/subsections
assert set_calls_dict.get("theme.light.primaryColor") == "#light_primary_color"
assert set_calls_dict.get("theme.light.linkColor") == "#light_link_override"
assert set_calls_dict.get("theme.dark.primaryColor") == "#dark_primary_color"
assert set_calls_dict.get("theme.dark.linkColor") == "#dark_link_color"
assert (
# override from config.toml should apply in subsubsection
set_calls_dict.get("theme.light.sidebar.borderColor")
== "#sidebar_light_override"
)
assert (
set_calls_dict.get("theme.dark.sidebar.borderColor")
== "#dark_sidebar_border_color"
)
@patch("streamlit.config_util._load_theme_file")
def test_process_theme_inheritance_nested_sections(self, mock_load_theme):
"""Test process_theme_inheritance with nested sections."""
base_option = ConfigOption("theme.base", description="", default_val=None)
base_option.set_value("custom_theme.toml", "test")
primary_option = ConfigOption(
"theme.primaryColor", description="", default_val=None
)
primary_option.set_value("#override", "config.toml")
sidebar_dark_primary_option = ConfigOption(
"theme.dark.sidebar.primaryColor", description="", default_val=None
)
sidebar_dark_primary_option.set_value("#sidebar_dark_override", "test")
config_options = {
"theme.base": base_option,
"theme.primaryColor": primary_option,
"theme.dark.sidebar.primaryColor": sidebar_dark_primary_option,
}
# Mock loaded theme file
mock_load_theme.return_value = {
"theme": {
"base": "dark",
"primaryColor": "#base_color",
"light": {
"primaryColor": "#light_color",
"sidebar": {
"primaryColor": "#sidebar_light_color",
},
},
"dark": {
"primaryColor": "#dark_color",
"sidebar": {
"primaryColor": "#sidebar_dark_color",
},
},
"sidebar": {
"primaryColor": "#sidebar_color",
},
}
}
set_option_calls = []
def mock_set_option(key, value, source):
set_option_calls.append((key, value, source))
config_util.process_theme_inheritance(
config_options, self.config_template, mock_set_option
)
set_calls_dict = {call[0]: call[1] for call in set_option_calls}
assert set_calls_dict.get("theme.base") == "dark"
assert set_calls_dict.get("theme.primaryColor") == "#override"
assert set_calls_dict.get("theme.light.primaryColor") == "#light_color"
assert set_calls_dict.get("theme.dark.primaryColor") == "#dark_color"
assert (
set_calls_dict.get("theme.light.sidebar.primaryColor")
== "#sidebar_light_color"
)
assert (
set_calls_dict.get("theme.dark.sidebar.primaryColor")
== "#sidebar_dark_override"
)
@patch("streamlit.config_util._load_theme_file")
def test_process_theme_inheritance_nested_base_error(self, mock_load_theme):
"""Test process_theme_inheritance detects nested base references."""
base_option = ConfigOption("theme.base", description="", default_val=None)
base_option.set_value("custom_theme.toml", "test")
config_options = {"theme.base": base_option}
# Mock theme file with nested base reference
mock_load_theme.return_value = {
"theme": {
"base": "other_theme.toml", # Nested reference!
"primaryColor": "#ff0000",
}
}
set_option_mock = MagicMock()
with pytest.raises(StreamlitAPIException) as cm:
config_util.process_theme_inheritance(
config_options, self.config_template, set_option_mock
)
assert "cannot reference another theme file" in str(cm.value)
| ThemeInheritanceUtilTest |
python | getsentry__sentry | src/sentry/models/search_common.py | {
"start": 27,
"end": 207
} | class ____(IntEnum):
ISSUE = 0
EVENT = 1
SESSION = 2
REPLAY = 3
METRIC = 4
SPAN = 5
ERROR = 6
TRANSACTION = 7
LOG = 8
TRACEMETRIC = 9
| SearchType |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_responses/records/error_record_builder.py | {
"start": 221,
"end": 1163
} | class ____(RecordBuilder):
def __init__(
self,
template: Dict[str, Any],
id_path: Optional[Path] = None,
cursor_path: Optional[Union[FieldPath, NestedPath]] = None,
error_message_path: Optional[Path] = None,
):
super().__init__(template, id_path, cursor_path)
self._error_message_path = error_message_path
@classmethod
def non_breaking_error(cls) -> "ErrorRecordBuilder":
return cls(find_template("non_breaking_error", __file__), None, None, error_message_path=FieldPath("details"))
@classmethod
def breaking_error(cls) -> "ErrorRecordBuilder":
return cls(find_template("error", __file__), None, None, error_message_path=FieldPath("message"))
def with_error_message(self, message: str) -> "ErrorRecordBuilder":
self._set_field(self._error_message_path._path[0], self._error_message_path, message)
return self
| ErrorRecordBuilder |
python | python__mypy | mypy/typeops.py | {
"start": 43468,
"end": 49736
} | class ____(TypeTraverserVisitor):
def visit_callable_type(self, t: CallableType) -> None:
for v in t.variables:
v.id.meta_level = 0
super().visit_callable_type(t)
def custom_special_method(typ: Type, name: str, check_all: bool = False) -> bool:
"""Does this type have a custom special method such as __format__() or __eq__()?
If check_all is True ensure all items of a union have a custom method, not just some.
"""
typ = get_proper_type(typ)
if isinstance(typ, Instance):
method = typ.type.get(name)
if method and isinstance(method.node, (SYMBOL_FUNCBASE_TYPES, Decorator, Var)):
if method.node.info:
return not method.node.info.fullname.startswith(("builtins.", "typing."))
return False
if isinstance(typ, UnionType):
if check_all:
return all(custom_special_method(t, name, check_all) for t in typ.items)
return any(custom_special_method(t, name) for t in typ.items)
if isinstance(typ, TupleType):
return custom_special_method(tuple_fallback(typ), name, check_all)
if isinstance(typ, FunctionLike) and typ.is_type_obj():
# Look up __method__ on the metaclass for class objects.
return custom_special_method(typ.fallback, name, check_all)
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
if typ.item.type.metaclass_type:
# Look up __method__ on the metaclass for class objects.
return custom_special_method(typ.item.type.metaclass_type, name, check_all)
if isinstance(typ, AnyType):
# Avoid false positives in uncertain cases.
return True
# TODO: support other types (see ExpressionChecker.has_member())?
return False
def separate_union_literals(t: UnionType) -> tuple[Sequence[LiteralType], Sequence[Type]]:
"""Separate literals from other members in a union type."""
literal_items = []
union_items = []
for item in t.items:
proper = get_proper_type(item)
if isinstance(proper, LiteralType):
literal_items.append(proper)
else:
union_items.append(item)
return literal_items, union_items
def try_getting_instance_fallback(typ: Type) -> Instance | None:
"""Returns the Instance fallback for this type if one exists or None."""
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return typ
elif isinstance(typ, LiteralType):
return typ.fallback
elif isinstance(typ, NoneType):
return None # Fast path for None, which is common
elif isinstance(typ, FunctionLike):
return typ.fallback
elif isinstance(typ, TupleType):
return typ.partial_fallback
elif isinstance(typ, TypedDictType):
return typ.fallback
elif isinstance(typ, TypeVarType):
return try_getting_instance_fallback(typ.upper_bound)
return None
def fixup_partial_type(typ: Type) -> Type:
"""Convert a partial type that we couldn't resolve into something concrete.
This means, for None we make it Optional[Any], and for anything else we
fill in all of the type arguments with Any.
"""
if not isinstance(typ, PartialType):
return typ
if typ.type is None:
return UnionType.make_union([AnyType(TypeOfAny.unannotated), NoneType()])
else:
return Instance(typ.type, [AnyType(TypeOfAny.unannotated)] * len(typ.type.type_vars))
def get_protocol_member(
left: Instance, member: str, class_obj: bool, is_lvalue: bool = False
) -> Type | None:
if member == "__call__" and class_obj:
# Special case: class objects always have __call__ that is just the constructor.
# TODO: this is wrong, it creates callables that are not recognized as type objects.
# Long-term, we should probably get rid of this callback argument altogether.
def named_type(fullname: str) -> Instance:
return Instance(left.type.mro[-1], [])
return type_object_type(left.type, named_type)
if member == "__call__" and left.type.is_metaclass(precise=True):
# Special case: we want to avoid falling back to metaclass __call__
# if constructor signature didn't match, this can cause many false negatives.
return None
from mypy.subtypes import find_member
subtype = find_member(member, left, left, class_obj=class_obj, is_lvalue=is_lvalue)
if isinstance(subtype, PartialType):
subtype = (
NoneType()
if subtype.type is None
else Instance(
subtype.type, [AnyType(TypeOfAny.unannotated)] * len(subtype.type.type_vars)
)
)
return subtype
def _is_disjoint_base(info: TypeInfo) -> bool:
# It either has the @disjoint_base decorator or defines nonempty __slots__.
if info.is_disjoint_base:
return True
if not info.slots:
return False
own_slots = {
slot
for slot in info.slots
if not any(
base_info.type.slots is not None and slot in base_info.type.slots
for base_info in info.bases
)
}
return bool(own_slots)
def _get_disjoint_base_of(instance: Instance) -> TypeInfo | None:
"""Returns the disjoint base of the given instance, if it exists."""
if _is_disjoint_base(instance.type):
return instance.type
for base in instance.type.mro:
if _is_disjoint_base(base):
return base
return None
def can_have_shared_disjoint_base(instances: list[Instance]) -> bool:
"""Returns whether the given instances can share a disjoint base.
This means that a child class of these classes can exist at runtime.
"""
# Ignore None disjoint bases (which are `object`).
disjoint_bases = [
base for instance in instances if (base := _get_disjoint_base_of(instance)) is not None
]
if not disjoint_bases:
# All are `object`.
return True
candidate = disjoint_bases[0]
for base in disjoint_bases[1:]:
if candidate.has_base(base.fullname):
continue
elif base.has_base(candidate.fullname):
candidate = base
else:
return False
return True
| FreezeTypeVarsVisitor |
python | run-llama__llama_index | llama-index-utils/llama-index-utils-qianfan/llama_index/utils/qianfan/client.py | {
"start": 1418,
"end": 5712
} | class ____:
"""
The access client for Baidu's Qianfan LLM Platform.
"""
def __init__(self, access_key: str, secret_key: str):
"""
Initialize a Client instance.
:param access_key: The Access Key obtained from the Security Authentication Center of Baidu Intelligent Cloud Console.
:param secret_key: The Secret Key paired with the Access Key.
"""
self._access_key = access_key
self._secret_key = secret_key
def _get_headers(self, method: str, url: str) -> Dict[str, str]:
headers = {
"Content-Type": "application/json",
}
authorization = encode_authorization(
method.upper(), url, headers, self._access_key, self._secret_key
)
headers["Authorization"] = authorization
return headers
def _preprocess(
self, method: str, url: str, params: QueryParamTypes = None, json: Any = None
) -> httpx.Request:
full_url, url_without_query, params = _rebuild_url(url, params)
if logger.level <= logging.DEBUG:
logging.debug(f"{method} {url_without_query}, request body: {json}")
headers = self._get_headers(method, full_url)
return httpx.Request(
method=method,
url=url_without_query,
params=params,
headers=headers,
json=json,
)
def _postprocess(self, r: httpx.Response) -> Dict:
if logger.level <= logging.DEBUG:
logger.debug(f"{r.request.method} {r.url} response body: {r.text}")
resp_dict = r.json()
error_code = resp_dict.get("error_code", 0)
if error_code != 0:
raise Error(error_code, resp_dict.get("error_msg"))
return resp_dict
def _postprocess_stream_part(self, line: str) -> Iterable[Dict]:
if line == "":
return
if line.startswith("{") and line.endswith("}"): # error
resp_dict = json.loads(line)
error_code = resp_dict.get("error_code", 0)
if error_code != 0:
raise Error(error_code, resp_dict.get("error_msg"))
if line.startswith("data: "):
line = line[len("data: ") :]
resp_dict = json.loads(line)
yield resp_dict
def post(self, url: str, params: QueryParamTypes = None, json: Any = None) -> Dict:
"""
Make an Request with POST Method.
"""
request = self._preprocess("POST", url=url, params=params, json=json)
with httpx.Client() as client:
r = client.send(request=request)
r.raise_for_status()
return self._postprocess(r)
async def apost(
self, url: str, params: QueryParamTypes = None, json: Any = None
) -> Dict:
"""
Make an Asynchronous Request with POST Method.
"""
response = self._preprocess("POST", url=url, params=params, json=json)
async with httpx.AsyncClient() as aclient:
r = await aclient.send(request=response)
r.raise_for_status()
return self._postprocess(r)
def post_reply_stream(
self, url: str, params: QueryParamTypes = None, json: Any = None
) -> Iterable[Dict]:
"""
Make an Request with POST Method and the response is returned in a stream.
"""
request = self._preprocess("POST", url=url, params=params, json=json)
with httpx.Client() as client:
r = client.send(request=request, stream=True)
r.raise_for_status()
for line in r.iter_lines():
yield from self._postprocess_stream_part(line)
async def apost_reply_stream(
self, url: str, params: QueryParamTypes = None, json: Any = None
) -> AsyncIterable[Dict]:
"""
Make an Asynchronous Request with POST Method and the response is returned in a stream.
"""
request = self._preprocess("POST", url=url, params=params, json=json)
async with httpx.AsyncClient() as aclient:
r = await aclient.send(request=request, stream=True)
r.raise_for_status()
async for line in r.aiter_lines():
for part in self._postprocess_stream_part(line):
yield part
| Client |
python | numpy__numpy | numpy/polynomial/tests/test_polynomial.py | {
"start": 13945,
"end": 15282
} | class ____:
def test_polyder(self):
# check exceptions
assert_raises(TypeError, poly.polyder, [0], .5)
assert_raises(ValueError, poly.polyder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0] * i + [1]
res = poly.polyder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = poly.polyder(poly.polyint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_polyder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T
res = poly.polyder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyder(c) for c in c2d])
res = poly.polyder(c2d, axis=1)
assert_almost_equal(res, tgt)
| TestDerivative |
python | huggingface__transformers | tests/models/longformer/test_modeling_longformer.py | {
"start": 17255,
"end": 32961
} | class ____(unittest.TestCase):
def _get_hidden_states(self):
return torch.tensor(
[
[
[
4.98332758e-01,
2.69175139e00,
-7.08081422e-03,
1.04915401e00,
-1.83476661e00,
7.67220476e-01,
2.98580543e-01,
2.84803992e-02,
],
[
-7.58357372e-01,
4.20635998e-01,
-4.04739919e-02,
1.59924145e-01,
2.05135748e00,
-1.15997978e00,
5.37166397e-01,
2.62873606e-01,
],
[
-1.69438001e00,
4.17574660e-01,
-1.49196962e00,
-1.76483717e00,
-1.94566312e-01,
-1.71183858e00,
7.72903565e-01,
-1.11557056e00,
],
[
5.44028163e-01,
2.05466114e-01,
-3.63045868e-01,
2.41865062e-01,
3.20348382e-01,
-9.05611176e-01,
-1.92690727e-01,
-1.19917547e00,
],
]
],
dtype=torch.float32,
device=torch_device,
)
def test_diagonalize(self):
hidden_states = self._get_hidden_states()
hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
window_overlap_size = chunked_hidden_states.shape[2]
self.assertTrue(window_overlap_size == 4)
padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states)
self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1)
# first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000]
torch.testing.assert_close(
padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3, atol=1e-3
)
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, 0, 4:],
torch.zeros((3,), device=torch_device, dtype=torch.float32),
atol=1e-3,
)
)
# last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629]
torch.testing.assert_close(
padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3, atol=1e-3
)
self.assertTrue(
torch.allclose(
padded_hidden_states[0, 0, -1, :3],
torch.zeros((3,), device=torch_device, dtype=torch.float32),
atol=1e-3,
)
)
def test_pad_and_transpose_last_two_dims(self):
hidden_states = self._get_hidden_states()
self.assertEqual(hidden_states.shape, (1, 4, 8))
padding = (0, 0, 0, 1)
padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding)
self.assertEqual(padded_hidden_states.shape, (1, 8, 5))
expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32)
torch.testing.assert_close(expected_added_dim, padded_hidden_states[0, -1, :], rtol=1e-6, atol=1e-6)
torch.testing.assert_close(
hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], rtol=1e-6, atol=1e-6
)
def test_chunk(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size))
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
# expected slices across chunk and seq length dim
expected_slice_along_seq_length = torch.tensor(
[0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32
)
expected_slice_along_chunk = torch.tensor(
[0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32
)
torch.testing.assert_close(
chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-3
)
torch.testing.assert_close(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-3)
self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4))
def test_mask_invalid_locations(self):
hidden_states = self._get_hidden_states()
batch_size = 1
seq_length = 8
hidden_size = 4
hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size))
chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2)
hid_states_1 = chunked_hidden_states.clone()
LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1)
self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8)
hid_states_2 = chunked_hidden_states.clone()
LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2)
self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24)
hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3]
LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2)
self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24)
hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :]
LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2)
self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12)
def test_layer_local_attn(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = self._get_hidden_states()
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
attention_mask[:, -2:] = -10000
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)[0]
self.assertEqual(output_hidden_states.shape, (1, 4, 8))
self.assertTrue(
torch.allclose(
output_hidden_states[0, 1],
torch.tensor(
[0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
def test_layer_global_attn(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0)
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
# create attn mask
attention_mask[0, -2:] = 10000.0
attention_mask[0, -1:] = -10000.0
attention_mask[1, 1:] = 10000.0
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
)[0]
self.assertEqual(output_hidden_states.shape, (2, 4, 8))
self.assertTrue(
torch.allclose(
output_hidden_states[0, 2],
torch.tensor(
[-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
output_hidden_states[1, -2],
torch.tensor(
[-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
def test_layer_attn_probs(self):
model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny")
model.eval()
layer = model.encoder.layer[0].attention.self.to(torch_device)
hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0)
batch_size, seq_length, hidden_size = hidden_states.size()
attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device)
# create attn mask
attention_mask[0, -2:] = 10000.0
attention_mask[0, -1:] = -10000.0
attention_mask[1, 1:] = 10000.0
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
output_hidden_states, local_attentions, global_attentions = layer(
hidden_states,
attention_mask=attention_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=True,
)
self.assertEqual(local_attentions.shape, (2, 4, 2, 8))
self.assertEqual(global_attentions.shape, (2, 2, 3, 4))
# All tokens with global attention have weight 0 in local attentions.
self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0))
self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0))
# The weight of all tokens with local attention must sum to 1.
self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6))
self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6))
self.assertTrue(
torch.allclose(
local_attentions[0, 0, 0, :],
torch.tensor(
[0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
local_attentions[1, 0, 0, :],
torch.tensor(
[0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
# All the global attention weights must sum to 1.
self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6))
self.assertTrue(
torch.allclose(
global_attentions[0, 0, 1, :],
torch.tensor(
[0.2500, 0.2500, 0.2500, 0.2500],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
self.assertTrue(
torch.allclose(
global_attentions[1, 0, 0, :],
torch.tensor(
[0.2497, 0.2500, 0.2499, 0.2504],
dtype=torch.float32,
device=torch_device,
),
atol=1e-3,
)
)
@slow
def test_inference_no_head(self):
model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world!'
input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output = model(input_ids, attention_mask=attention_mask)[0]
output_without_mask = model(input_ids)[0]
expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device)
torch.testing.assert_close(output[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_no_head_long(self):
model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world! ' repeated 1000 times
input_ids = torch.tensor(
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device)
global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device)
global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions
output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0]
expected_output_sum = torch.tensor(74585.8594, device=torch_device)
expected_output_mean = torch.tensor(0.0243, device=torch_device)
torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4)
@slow
def test_inference_masked_lm_long(self):
model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
model.to(torch_device)
# 'Hello world! ' repeated 1000 times
input_ids = torch.tensor(
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
) # long input
input_ids = input_ids.to(torch_device)
loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple()
expected_loss = torch.tensor(0.0074, device=torch_device)
expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device)
expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device)
torch.testing.assert_close(loss, expected_loss, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(prediction_scores.sum(), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(prediction_scores.mean(), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4)
| LongformerModelIntegrationTest |
python | lxml__lxml | src/lxml/tests/dummy_http_server.py | {
"start": 997,
"end": 1126
} | class ____(wsgiserver.WSGIServer, ThreadingMixIn):
"""A web server that starts a new thread for each request.
"""
| WebServer |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/bilstm_classifier.py | {
"start": 2651,
"end": 3350
} | class ____(nn.Module):
"""Applies word dropout to a batch of input IDs.
This is basically the same as `nn.Dropout`, but allows specifying the
value of dropped out items.
"""
dropout_rate: float
unk_idx: int
deterministic: bool | None = None
@nn.compact
def __call__(self, inputs: Array, deterministic: bool | None = None):
deterministic = nn.module.merge_param(
'deterministic', self.deterministic, deterministic)
if deterministic or self.dropout_rate == 0.:
return inputs
rng = self.make_rng('dropout')
mask = jax.random.bernoulli(rng, p=self.dropout_rate, shape=inputs.shape)
return jnp.where(mask, jnp.array([self.unk_idx]), inputs)
| WordDropout |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.