language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/llava/modeling_llava.py | {
"start": 4752,
"end": 5315
} | class ____(PreTrainedModel):
config: LlavaConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
@auto_docstring(
custom_intro="""
The Llava model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
| LlavaPreTrainedModel |
python | walkccc__LeetCode | solutions/2826. Sorting Three Groups/2826.py | {
"start": 0,
"end": 345
} | class ____:
def minimumOperations(self, nums: list[int]) -> int:
# dp[i] := the longest non-decreasing subsequence so far with numbers in [1..i]
dp = [0] * 4
for num in nums:
dp[num] += 1 # Append num to the sequence so far.
dp[2] = max(dp[2], dp[1])
dp[3] = max(dp[3], dp[2])
return len(nums) - dp[3]
| Solution |
python | django__django | tests/generic_views/views.py | {
"start": 1963,
"end": 2016
} | class ____(generic.ListView):
model = Book
| BookList |
python | davidhalter__jedi | test/refactor/extract_function.py | {
"start": 2380,
"end": 2528
} | class ____:
def f(self, b, c):
#? 18 text {'new_name': 'b'}
return b | self.a
# ++++++++++++++++++++++++++++++++++++++++++++++++++
| X |
python | sphinx-doc__sphinx | sphinx/domains/cpp/__init__.py | {
"start": 31093,
"end": 32595
} | class ____(SphinxRole):
def __init__(self, asCode: bool) -> None:
super().__init__()
if asCode:
# render the expression as inline code
self.class_type = 'cpp-expr'
else:
# render the expression as inline text
self.class_type = 'cpp-texpr'
def run(self) -> tuple[list[Node], list[system_message]]:
text = self.text.replace('\n', ' ')
parser = DefinitionParser(
text, location=self.get_location(), config=self.config
)
# attempt to mimic XRefRole classes, except that...
try:
ast = parser.parse_expression()
except DefinitionError as ex:
logger.warning(
'Unparseable C++ expression: %r\n%s',
text,
ex,
location=self.get_location(),
)
# see below
node = addnodes.desc_inline('cpp', text, text, classes=[self.class_type])
return [node], []
parent_symbol = self.env.current_document.cpp_parent_symbol
if parent_symbol is None:
parent_symbol = self.env.domaindata['cpp']['root_symbol']
# ...most if not all of these classes should really apply to the individual references,
# not the container node
signode = addnodes.desc_inline('cpp', classes=[self.class_type])
ast.describe_signature(signode, 'markType', self.env, parent_symbol)
return [signode], []
| CPPExprRole |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 32971,
"end": 33423
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
This indicates that the user has sent too many
requests in a given amount of time ("rate limiting").
RFC 6585.4
code: 429, title: Too Many Requests
"""
code = 429
title = 'Too Many Requests'
explanation = (
'The action could not be performed because there were too '
'many requests by the client.'
)
| HTTPTooManyRequests |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_variables.py | {
"start": 2002,
"end": 3996
} | class ____:
@pytest.mark.parametrize(
("key", "value"),
[
("var1", "value"),
("var2/with_slash", "slash_value"),
],
)
def test_variable_get_from_db(self, client, session, key, value):
Variable.set(key=key, value=value, session=session)
session.commit()
response = client.get(f"/execution/variables/{key}")
assert response.status_code == 200
assert response.json() == {"key": key, "value": value}
# Remove connection
Variable.delete(key=key, session=session)
session.commit()
@mock.patch.dict(
"os.environ",
{"AIRFLOW_VAR_KEY1": "VALUE"},
)
def test_variable_get_from_env_var(self, client, session):
response = client.get("/execution/variables/key1")
assert response.status_code == 200
assert response.json() == {"key": "key1", "value": "VALUE"}
@pytest.mark.parametrize(
"key",
[
"non_existent_var",
"non/existent/slash/var",
],
)
def test_variable_get_not_found(self, client, key):
response = client.get(f"/execution/variables/{key}")
assert response.status_code == 404
assert response.json() == {
"detail": {
"message": f"Variable with key '{key}' not found",
"reason": "not_found",
}
}
@pytest.mark.usefixtures("access_denied")
def test_variable_get_access_denied(self, client, caplog):
with caplog.at_level(logging.DEBUG):
response = client.get("/execution/variables/key1")
# Assert response status code and detail for access denied
assert response.status_code == 403
assert response.json() == {
"detail": {
"reason": "access_denied",
}
}
assert any(msg.startswith("Checking read access for task instance") for msg in caplog.messages)
| TestGetVariable |
python | jazzband__django-oauth-toolkit | tests/test_ui_locales.py | {
"start": 443,
"end": 2027
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.application = Application.objects.create(
name="Test Application",
client_id="test",
redirect_uris="https://www.example.com/",
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
cls.trusted_application = Application.objects.create(
name="Trusted Application",
client_id="trusted",
redirect_uris="https://www.example.com/",
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
skip_authorization=True,
)
cls.user = UserModel.objects.create_user("test_user")
cls.url = reverse("oauth2_provider:authorize")
def setUp(self):
self.client.force_login(self.user)
def test_application_ui_locales_param(self):
response = self.client.get(
f"{self.url}?response_type=code&client_id=test&scope=openid&ui_locales=de",
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "oauth2_provider/authorize.html")
def test_trusted_application_ui_locales_param(self):
response = self.client.get(
f"{self.url}?response_type=code&client_id=trusted&scope=openid&ui_locales=de",
)
self.assertEqual(response.status_code, 302)
self.assertRegex(response.url, r"https://www\.example\.com/\?code=[a-zA-Z0-9]+")
| TestUILocalesParam |
python | mlflow__mlflow | mlflow/pyfunc/scoring_server/client.py | {
"start": 3190,
"end": 5432
} | class ____(BaseScoringServerClient):
def __init__(self, process):
super().__init__()
self.process = process
try:
# Use /dev/shm (memory-based filesystem) if possible to make read/write efficient.
tmpdir = tempfile.mkdtemp(dir="/dev/shm")
except Exception:
tmpdir = tempfile.mkdtemp()
self.tmpdir = Path(tmpdir)
self.output_json = self.tmpdir.joinpath("output.json")
def wait_server_ready(self, timeout=30, scoring_server_proc=None):
return_code = self.process.poll()
if return_code is not None:
raise RuntimeError(f"Server process already exit with returncode {return_code}")
def invoke(self, data, params: dict[str, Any] | None = None):
"""
Invoke inference on input data. The input data must be pandas dataframe or numpy array or
a dict of numpy arrays.
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
:py:class:`PredictionsResponse <mlflow.deployments.PredictionsResponse>` result.
"""
if not self.output_json.exists():
self.output_json.touch()
request_id = str(uuid.uuid4())
request = {
"id": request_id,
"data": dump_input_data(data, params=params),
"output_file": str(self.output_json),
}
self.process.stdin.write(json.dumps(request) + "\n")
self.process.stdin.flush()
begin_time = time.time()
while True:
_logger.info("Waiting for scoring to complete...")
try:
with self.output_json.open(mode="r+") as f:
resp = PredictionsResponse.from_json(f.read())
if resp.get("id") == request_id:
f.truncate(0)
return resp
except Exception as e:
_logger.debug("Exception while waiting for scoring to complete: %s", e)
if time.time() - begin_time > MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT.get():
raise MlflowException("Scoring timeout")
time.sleep(1)
| StdinScoringServerClient |
python | pydantic__pydantic | tests/mypy/outputs/pyproject-plugin-strict-equality_toml/strict_equality.py | {
"start": 33,
"end": 448
} | class ____(BaseModel):
username: str
user = User(username='test')
print(user == 'test')
# MYPY: error: Non-overlapping equality check (left operand type: "User", right operand type: "Literal['test']") [comparison-overlap]
print(user.username == int('1'))
# MYPY: error: Non-overlapping equality check (left operand type: "str", right operand type: "int") [comparison-overlap]
print(user.username == 'test')
| User |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/dbt_manifest_asset_selection.py | {
"start": 832,
"end": 5072
} | class ____(AssetSelection):
"""Defines a selection of assets from a dbt manifest wrapper and a dbt selection string.
Args:
manifest (Mapping[str, Any]): The dbt manifest blob.
select (str): A dbt selection string to specify a set of dbt resources.
exclude (Optional[str]): A dbt selection string to exclude a set of dbt resources.
Examples:
.. code-block:: python
import json
from pathlib import Path
from dagster_dbt import DbtManifestAssetSelection
manifest = json.loads(Path("path/to/manifest.json").read_text())
# select the dbt assets that have the tag "foo".
my_selection = DbtManifestAssetSelection(manifest=manifest, select="tag:foo")
"""
manifest: Mapping[str, Any]
select: str
dagster_dbt_translator: DagsterDbtTranslator
exclude: str
selector: str
project: Optional[DbtProject] = None
def __eq__(self, other):
if not isinstance(other, DbtManifestAssetSelection):
return False
self_metadata = self.manifest.get("metadata")
other_metadata = other.manifest.get("metadata")
if not self_metadata or not other_metadata:
return super().__eq__(other)
# Compare metadata only since it uniquely identifies the manifest and the
# full manifest dictionary can be large
return (
self_metadata == other_metadata
and self.select == other.select
and self.dagster_dbt_translator == other.dagster_dbt_translator
and self.exclude == other.exclude
and self.selector == other.selector
)
@classmethod
def build(
cls,
manifest: DbtManifestParam,
select: str = DBT_DEFAULT_SELECT,
*,
dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,
exclude: str = DBT_DEFAULT_EXCLUDE,
selector: str = DBT_DEFAULT_SELECTOR,
project: Optional[DbtProject] = None,
):
return cls(
manifest=validate_manifest(manifest),
select=check.str_param(select, "select"),
dagster_dbt_translator=check.opt_inst_param(
dagster_dbt_translator,
"dagster_dbt_translator",
DagsterDbtTranslator,
DagsterDbtTranslator(),
),
exclude=check.str_param(exclude, "exclude"),
selector=check.str_param(selector, "selector"),
project=check.opt_inst_param(project, "project", DbtProject),
)
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool = False
) -> AbstractSet[AssetKey]:
keys = set()
for unique_id in select_unique_ids(
select=self.select,
exclude=self.exclude,
selector=self.selector,
manifest_json=self.manifest,
project=self.project,
):
dbt_resource_props = get_node(self.manifest, unique_id)
is_dbt_asset = dbt_resource_props["resource_type"] in ASSET_RESOURCE_TYPES
if is_dbt_asset and not is_non_asset_node(dbt_resource_props):
asset_key = self.dagster_dbt_translator.get_asset_spec(
self.manifest, unique_id, self.project
).key
keys.add(asset_key)
return keys
def resolve_checks_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetCheckKey]:
if not self.dagster_dbt_translator.settings.enable_asset_checks:
return set()
keys = set()
for unique_id in select_unique_ids(
select=self.select,
exclude=self.exclude,
selector=self.selector,
manifest_json=self.manifest,
project=self.project,
):
asset_check_key = get_asset_check_key_for_test(
self.manifest,
self.dagster_dbt_translator,
test_unique_id=unique_id,
project=self.project,
)
if asset_check_key:
keys.add(asset_check_key)
return keys
| DbtManifestAssetSelection |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py | {
"start": 35281,
"end": 38622
} | class ____(Wav2Vec2ForSequenceClassification):
def __init__(self, config):
super().__init__(config)
def freeze_feature_encoder(self):
raise AttributeError("Not needed for Wav2Vec2Bert")
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.wav2vec2_bert.parameters():
param.requires_grad = False
def forward(
self,
input_features: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.wav2vec2_bert(
input_features,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Wav2Vec2BertForSequenceClassification |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/domain/lease_domains.py | {
"start": 7074,
"end": 7952
} | class ____(DictToObject):
def __init__(self, url, method, headers) -> None:
self.url = url
self.method = method
self.headers = headers
@classmethod
def from_dict(cls, data: dict):
"""
Creates an instance of `QueryFileResult` from a dictionary.
Args:
data (dict): A dictionary containing the necessary keys and values corresponding to the class attributes.
Returns:
QueryFileResult: An instance of `QueryFileResult` populated with data from the input dictionary.
"""
default_values = {"url": "", "method": "GET", "headers": {}}
return cls(
url=data.get("url", default_values["url"]),
method=data.get("method", default_values["method"]),
headers=data.get("headers", default_values["headers"]),
)
| HttpDownloadParameter |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 37716,
"end": 37903
} | class ____(BoringModel):
def on_before_optimizer_step(self, optimizer):
if self.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledModelOnBeforeOptimizerStep |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/vertex/_beta_messages.py | {
"start": 496,
"end": 1472
} | class ____(SyncAPIResource):
create = FirstPartyMessagesAPI.create
stream = FirstPartyMessagesAPI.stream
count_tokens = FirstPartyMessagesAPI.count_tokens
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
| Messages |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 4570,
"end": 5572
} | class ____(Benchmark):
param_names = ['sparse_type']
params = [
['spmatrix', 'sparray']
]
def setup(self, sparse_type):
coo = sparse.coo_array if sparse_type == "sparray" else sparse.coo_matrix
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
rng = np.random.default_rng(0)
i = rng.integers(H1, size=C1)
j = rng.integers(W1, size=C1)
data = rng.random(C1)
self.matrix1 = coo((data, (i, j)), shape=(H1, W1)).tocsr()
i = rng.integers(H2, size=C2)
j = rng.integers(W2, size=C2)
data = rng.random(C2)
self.matrix2 = coo((data, (i, j)), shape=(H2, W2)).tocsr()
def time_large(self, sparse_type):
for i in range(100):
self.matrix1 @ self.matrix2
# Retain old benchmark results (remove this if changing the benchmark)
time_large.version = (
"33aee08539377a7cb0fabaf0d9ff9d6d80079a428873f451b378c39f6ead48cb"
)
| Matmul |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_category.py | {
"start": 4723,
"end": 6061
} | class ____:
test_cases = [("ascii", ["hello", "world", "hi"]),
("unicode", ["Здравствуйте", "привет"])]
ids, cases = zip(*test_cases)
@pytest.mark.parametrize("ydata", cases, ids=ids)
def test_StrCategoryFormatter(self, ydata):
unit = cat.UnitData(ydata)
labels = cat.StrCategoryFormatter(unit._mapping)
for i, d in enumerate(ydata):
assert labels(i, i) == d
assert labels(i, None) == d
@pytest.mark.parametrize("ydata", cases, ids=ids)
@pytest.mark.parametrize("plotter", PLOT_LIST, ids=PLOT_IDS)
def test_StrCategoryFormatterPlot(self, ydata, plotter):
ax = plt.figure().subplots()
plotter(ax, range(len(ydata)), ydata)
for i, d in enumerate(ydata):
assert ax.yaxis.major.formatter(i) == d
assert ax.yaxis.major.formatter(i+1) == ""
def axis_test(axis, labels):
ticks = list(range(len(labels)))
np.testing.assert_array_equal(axis.get_majorticklocs(), ticks)
graph_labels = [axis.major.formatter(i, i) for i in ticks]
# _text also decodes bytes as utf-8.
assert graph_labels == [cat.StrCategoryFormatter._text(l) for l in labels]
assert list(axis.units._mapping.keys()) == [l for l in labels]
assert list(axis.units._mapping.values()) == ticks
| TestStrCategoryFormatter |
python | django__django | tests/forms_tests/field_tests/test_urlfield.py | {
"start": 166,
"end": 6059
} | class ____(FormFieldAssertionsMixin, SimpleTestCase):
def test_urlfield_widget(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" required>')
def test_urlfield_widget_max_min_length(self):
f = URLField(min_length=15, max_length=20)
self.assertEqual("http://example.com", f.clean("http://example.com"))
self.assertWidgetRendersTo(
f,
'<input id="id_f" type="url" name="f" maxlength="20" '
'minlength="15" required>',
)
msg = "'Ensure this value has at least 15 characters (it has 12).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("http://f.com")
msg = "'Ensure this value has at most 20 characters (it has 37).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean("http://abcdefghijklmnopqrstuvwxyz.com")
def test_urlfield_clean(self):
f = URLField(required=False)
tests = [
("http://localhost", "http://localhost"),
("http://example.com", "http://example.com"),
("http://example.com/test", "http://example.com/test"),
("http://example.com.", "http://example.com."),
("http://www.example.com", "http://www.example.com"),
("http://www.example.com:8000/test", "http://www.example.com:8000/test"),
(
"http://example.com?some_param=some_value",
"http://example.com?some_param=some_value",
),
("valid-with-hyphens.com", "https://valid-with-hyphens.com"),
("subdomain.domain.com", "https://subdomain.domain.com"),
("http://200.8.9.10", "http://200.8.9.10"),
("http://200.8.9.10:8000/test", "http://200.8.9.10:8000/test"),
("http://valid-----hyphens.com", "http://valid-----hyphens.com"),
(
"http://some.idn.xyzäöüßabc.domain.com:123/blah",
"http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah",
),
(
"www.example.com/s/http://code.djangoproject.com/ticket/13804",
"https://www.example.com/s/http://code.djangoproject.com/ticket/13804",
),
# Normalization.
("http://example.com/ ", "http://example.com/"),
# Valid IDN.
("http://עברית.idn.icann.org/", "http://עברית.idn.icann.org/"),
("http://sãopaulo.com/", "http://sãopaulo.com/"),
("http://sãopaulo.com.br/", "http://sãopaulo.com.br/"),
("http://пример.испытание/", "http://пример.испытание/"),
("http://مثال.إختبار/", "http://مثال.إختبار/"),
("http://例子.测试/", "http://例子.测试/"),
("http://例子.測試/", "http://例子.測試/"),
(
"http://उदाहरण.परीक्षा/",
"http://उदाहरण.परीक्षा/",
),
("http://例え.テスト/", "http://例え.テスト/"),
("http://مثال.آزمایشی/", "http://مثال.آزمایشی/"),
("http://실례.테스트/", "http://실례.테스트/"),
("http://العربية.idn.icann.org/", "http://العربية.idn.icann.org/"),
# IPv6.
("http://[12:34::3a53]/", "http://[12:34::3a53]/"),
("http://[a34:9238::]:8080/", "http://[a34:9238::]:8080/"),
]
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(f.clean(url), expected)
def test_urlfield_clean_invalid(self):
f = URLField()
tests = [
"foo",
"com.",
".",
"http://",
"http://example",
"http://example.",
"http://.com",
"http://invalid-.com",
"http://-invalid.com",
"http://inv-.alid-.com",
"http://inv-.-alid.com",
"[a",
"http://[a",
# Non-string.
23,
# Hangs "forever" before fixing a catastrophic backtracking,
# see #11198.
"http://%s" % ("X" * 60,),
# A second example, to make sure the problem is really addressed,
# even on domains that don't fail the domain label length check in
# the regex.
"http://%s" % ("X" * 200,),
# urlsplit() raises ValueError.
"////]@N.AN",
# Empty hostname.
"#@A.bO",
]
msg = "'Enter a valid URL.'"
for value in tests:
with self.subTest(value=value):
with self.assertRaisesMessage(ValidationError, msg):
f.clean(value)
def test_urlfield_clean_required(self):
f = URLField()
msg = "'This field is required.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(None)
with self.assertRaisesMessage(ValidationError, msg):
f.clean("")
def test_urlfield_clean_not_required(self):
f = URLField(required=False)
self.assertEqual(f.clean(None), "")
self.assertEqual(f.clean(""), "")
def test_urlfield_strip_on_none_value(self):
f = URLField(required=False, empty_value=None)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
def test_urlfield_unable_to_set_strip_kwarg(self):
msg = "got multiple values for keyword argument 'strip'"
with self.assertRaisesMessage(TypeError, msg):
URLField(strip=False)
def test_urlfield_assume_scheme(self):
f = URLField()
self.assertEqual(f.clean("example.com"), "https://example.com")
f = URLField(assume_scheme="http")
self.assertEqual(f.clean("example.com"), "http://example.com")
f = URLField(assume_scheme="https")
self.assertEqual(f.clean("example.com"), "https://example.com")
| URLFieldTest |
python | walkccc__LeetCode | solutions/1414. Find the Minimum Number of Fibonacci Numbers Whose Sum Is K/1414-2.py | {
"start": 0,
"end": 416
} | class ____:
def findMinFibonacciNumbers(self, k: int) -> int:
ans = 0
a = 1 # F_1
b = 1 # F_2
while b <= k:
# a, b = F_{i + 1}, F_{i + 2}
# -> a, b = F_{i + 2}, F_{i + 3}
a, b = b, a + b
while a > 0:
if a <= k:
k -= a
ans += 1
# a, b = F_{i + 2}, F_{i + 3}
# -> a, b = F_{i + 1}, F_{i + 2}
a, b = b - a, a
return ans
| Solution |
python | mlflow__mlflow | mlflow/server/job_api.py | {
"start": 2357,
"end": 3182
} | class ____(BaseModel):
"""
Pydantic model for job searching response.
"""
jobs: list[Job]
@job_api_router.post("/search", response_model=SearchJobsResponse)
def search_jobs(payload: SearchJobPayload) -> SearchJobsResponse:
from mlflow.server.handlers import _get_job_store
try:
store = _get_job_store()
job_results = [
Job.from_job_entity(job)
for job in store.list_jobs(
function_fullname=payload.function_fullname,
statuses=payload.statuses,
params=payload.params,
)
]
return SearchJobsResponse(jobs=job_results)
except MlflowException as e:
raise HTTPException(
status_code=e.get_http_status_code(),
detail=e.message,
)
| SearchJobsResponse |
python | matplotlib__matplotlib | lib/mpl_toolkits/mplot3d/art3d.py | {
"start": 13850,
"end": 17214
} | class ____(LineCollection):
"""
A collection of 3D lines.
"""
def __init__(self, lines, axlim_clip=False, **kwargs):
super().__init__(lines, **kwargs)
self._axlim_clip = axlim_clip
"""
Parameters
----------
lines : list of (N, 3) array-like
A sequence ``[line0, line1, ...]`` where each line is a (N, 3)-shape
array-like containing points:: line0 = [(x0, y0, z0), (x1, y1, z1), ...]
Each line can contain a different number of points.
linewidths : float or list of float, default: :rc:`lines.linewidth`
The width of each line in points.
colors : :mpltype:`color` or list of color, default: :rc:`lines.color`
A sequence of RGBA tuples (e.g., arbitrary color strings, etc, not
allowed).
antialiaseds : bool or list of bool, default: :rc:`lines.antialiased`
Whether to use antialiasing for each line.
facecolors : :mpltype:`color` or list of :mpltype:`color`, default: 'none'
When setting *facecolors*, each line is interpreted as a boundary
for an area, implicitly closing the path from the last point to the
first point. The enclosed area is filled with *facecolor*.
In order to manually specify what should count as the "interior" of
each line, please use `.PathCollection` instead, where the
"interior" can be specified by appropriate usage of
`~.path.Path.CLOSEPOLY`.
**kwargs : Forwarded to `.Collection`.
"""
def set_sort_zpos(self, val):
"""Set the position to use for z-sorting."""
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
"""
Set 3D segments.
"""
self._segments3d = segments
super().set_segments([])
def do_3d_projection(self):
"""
Project the points according to renderer matrix.
"""
segments = np.asanyarray(self._segments3d)
mask = False
if np.ma.isMA(segments):
mask = segments.mask
if self._axlim_clip:
viewlim_mask = _viewlim_mask(segments[..., 0],
segments[..., 1],
segments[..., 2],
self.axes)
if np.any(viewlim_mask):
# broadcast mask to 3D
viewlim_mask = np.broadcast_to(viewlim_mask[..., np.newaxis],
(*viewlim_mask.shape, 3))
mask = mask | viewlim_mask
xyzs = np.ma.array(proj3d._proj_transform_vectors(segments, self.axes.M),
mask=mask)
segments_2d = xyzs[..., 0:2]
LineCollection.set_segments(self, segments_2d)
# FIXME
if len(xyzs) > 0:
minz = min(xyzs[..., 2].min(), 1e9)
else:
minz = np.nan
return minz
def line_collection_2d_to_3d(col, zs=0, zdir='z', axlim_clip=False):
"""Convert a `.LineCollection` to a `.Line3DCollection` object."""
segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
col._axlim_clip = axlim_clip
| Line3DCollection |
python | openai__openai-python | tests/test_response.py | {
"start": 3650,
"end": 8394
} | class ____(BaseModel):
foo: str
bar: int
def test_response_parse_custom_model(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(to=CustomModel)
assert obj.foo == "hello!"
assert obj.bar == 2
@pytest.mark.asyncio
async def test_async_response_parse_custom_model(async_client: AsyncOpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=async_client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = await response.parse(to=CustomModel)
assert obj.foo == "hello!"
assert obj.bar == 2
def test_response_basemodel_request_id(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(
200,
headers={"x-request-id": "my-req-id"},
content=json.dumps({"foo": "hello!", "bar": 2}),
),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(to=CustomModel)
assert obj._request_id == "my-req-id"
assert obj.foo == "hello!"
assert obj.bar == 2
assert obj.to_dict() == {"foo": "hello!", "bar": 2}
assert "_request_id" not in rich_print_str(obj)
assert "__exclude_fields__" not in rich_print_str(obj)
@pytest.mark.asyncio
async def test_async_response_basemodel_request_id(client: OpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(
200,
headers={"x-request-id": "my-req-id"},
content=json.dumps({"foo": "hello!", "bar": 2}),
),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = await response.parse(to=CustomModel)
assert obj._request_id == "my-req-id"
assert obj.foo == "hello!"
assert obj.bar == 2
assert obj.to_dict() == {"foo": "hello!", "bar": 2}
def test_response_parse_annotated_type(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = response.parse(
to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]),
)
assert obj.foo == "hello!"
assert obj.bar == 2
async def test_async_response_parse_annotated_type(async_client: AsyncOpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
client=async_client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
obj = await response.parse(
to=cast("type[CustomModel]", Annotated[CustomModel, "random metadata"]),
)
assert obj.foo == "hello!"
assert obj.bar == 2
@pytest.mark.parametrize(
"content, expected",
[
("false", False),
("true", True),
("False", False),
("True", True),
("TrUe", True),
("FalSe", False),
],
)
def test_response_parse_bool(client: OpenAI, content: str, expected: bool) -> None:
response = APIResponse(
raw=httpx.Response(200, content=content),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
result = response.parse(to=bool)
assert result is expected
@pytest.mark.parametrize(
"content, expected",
[
("false", False),
("true", True),
("False", False),
("True", True),
("TrUe", True),
("FalSe", False),
],
)
async def test_async_response_parse_bool(client: AsyncOpenAI, content: str, expected: bool) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(200, content=content),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)
result = await response.parse(to=bool)
assert result is expected
| CustomModel |
python | python__mypy | mypy/test/meta/_pytest.py | {
"start": 226,
"end": 2276
} | class ____:
input: str
input_updated: str # any updates made by --update-data
stdout: str
stderr: str
def dedent_docstring(s: str) -> str:
return textwrap.dedent(s).lstrip()
def run_pytest_data_suite(
data_suite: str,
*,
data_file_prefix: str = "check",
pytest_node_prefix: str = "mypy/test/testcheck.py::TypeCheckSuite",
extra_args: Iterable[str],
max_attempts: int,
) -> PytestResult:
"""
Runs a suite of data test cases through pytest until either tests pass
or until a maximum number of attempts (needed for incremental tests).
:param data_suite: the actual "suite" i.e. the contents of a .test file
"""
p_test_data = Path(test_data_prefix)
p_root = p_test_data.parent.parent
p = p_test_data / f"{data_file_prefix}-meta-{uuid.uuid4()}.test"
assert not p.exists()
data_suite = dedent_docstring(data_suite)
try:
p.write_text(data_suite)
test_nodeid = f"{pytest_node_prefix}::{p.name}"
extra_args = [sys.executable, "-m", "pytest", "-n", "0", "-s", *extra_args, test_nodeid]
cmd = shlex.join(extra_args)
for i in range(max_attempts - 1, -1, -1):
print(f">> {cmd}")
proc = subprocess.run(extra_args, capture_output=True, check=False, cwd=p_root)
if proc.returncode == 0:
break
prefix = "NESTED PYTEST STDOUT"
for line in proc.stdout.decode().splitlines():
print(f"{prefix}: {line}")
prefix = " " * len(prefix)
prefix = "NESTED PYTEST STDERR"
for line in proc.stderr.decode().splitlines():
print(f"{prefix}: {line}")
prefix = " " * len(prefix)
print(f"Exit code {proc.returncode} ({i} attempts remaining)")
return PytestResult(
input=data_suite,
input_updated=p.read_text(),
stdout=proc.stdout.decode(),
stderr=proc.stderr.decode(),
)
finally:
p.unlink()
| PytestResult |
python | numpy__numpy | numpy/polynomial/tests/test_chebyshev.py | {
"start": 17356,
"end": 17937
} | class ____:
def test_100(self):
x, w = cheb.chebgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = cheb.chebvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.pi
assert_almost_equal(w.sum(), tgt)
| TestGauss |
python | langchain-ai__langchain | libs/langchain/langchain_classic/memory/entity.py | {
"start": 10429,
"end": 15155
} | class ____(BaseEntityStore):
"""SQLite-backed Entity store with safe query construction."""
session_id: str = "default"
table_name: str = "memory_store"
conn: Any = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def __init__(
self,
session_id: str = "default",
db_file: str = "entities.db",
table_name: str = "memory_store",
*args: Any,
**kwargs: Any,
):
"""Initializes the SQLiteEntityStore.
Args:
session_id: Unique identifier for the session.
db_file: Path to the SQLite database file.
table_name: Name of the table to store entities.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
try:
import sqlite3
except ImportError as e:
msg = (
"Could not import sqlite3 python package. "
"Please install it with `pip install sqlite3`."
)
raise ImportError(msg) from e
# Basic validation to prevent obviously malicious table/session names
if not table_name.isidentifier() or not session_id.isidentifier():
# Since we validate here, we can safely suppress the S608 bandit warning
msg = "Table name and session ID must be valid Python identifiers."
raise ValueError(msg)
self.conn = sqlite3.connect(db_file)
self.session_id = session_id
self.table_name = table_name
self._create_table_if_not_exists()
@property
def full_table_name(self) -> str:
"""Returns the full table name with session ID."""
return f"{self.table_name}_{self.session_id}"
def _execute_query(self, query: str, params: tuple = ()) -> "sqlite3.Cursor":
"""Executes a query with proper connection handling."""
with self.conn:
return self.conn.execute(query, params)
def _create_table_if_not_exists(self) -> None:
"""Creates the entity table if it doesn't exist, using safe quoting."""
# Use standard SQL double quotes for the table name identifier
create_table_query = f"""
CREATE TABLE IF NOT EXISTS "{self.full_table_name}" (
key TEXT PRIMARY KEY,
value TEXT
)
"""
self._execute_query(create_table_query)
def get(self, key: str, default: str | None = None) -> str | None:
"""Retrieves a value, safely quoting the table name."""
# `?` placeholder is used for the value to prevent SQL injection
# Ignore S608 since we validate for malicious table/session names in `__init__`
query = f'SELECT value FROM "{self.full_table_name}" WHERE key = ?' # noqa: S608
cursor = self._execute_query(query, (key,))
result = cursor.fetchone()
return result[0] if result is not None else default
def set(self, key: str, value: str | None) -> None:
"""Inserts or replaces a value, safely quoting the table name."""
if not value:
return self.delete(key)
# Ignore S608 since we validate for malicious table/session names in `__init__`
query = (
"INSERT OR REPLACE INTO " # noqa: S608
f'"{self.full_table_name}" (key, value) VALUES (?, ?)'
)
self._execute_query(query, (key, value))
return None
def delete(self, key: str) -> None:
"""Deletes a key-value pair, safely quoting the table name."""
# Ignore S608 since we validate for malicious table/session names in `__init__`
query = f'DELETE FROM "{self.full_table_name}" WHERE key = ?' # noqa: S608
self._execute_query(query, (key,))
def exists(self, key: str) -> bool:
"""Checks for the existence of a key, safely quoting the table name."""
# Ignore S608 since we validate for malicious table/session names in `__init__`
query = f'SELECT 1 FROM "{self.full_table_name}" WHERE key = ? LIMIT 1' # noqa: S608
cursor = self._execute_query(query, (key,))
return cursor.fetchone() is not None
@override
def clear(self) -> None:
# Ignore S608 since we validate for malicious table/session names in `__init__`
query = f"""
DELETE FROM {self.full_table_name}
""" # noqa: S608
with self.conn:
self.conn.execute(query)
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
| SQLiteEntityStore |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 21364,
"end": 21641
} | class ____(APIv3Settings, RemoteQuerySetMixin, ListModelMixin, GenericViewSet):
model = RemoteOrganization
serializer_class = RemoteOrganizationSerializer
filterset_class = RemoteOrganizationFilter
permission_classes = (IsAuthenticated,)
| RemoteOrganizationViewSet |
python | pandas-dev__pandas | pandas/tests/indexes/interval/test_setops.py | {
"start": 424,
"end": 8346
} | class ____:
def test_union(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
other = monotonic_index(5, 13, closed=closed)
expected = monotonic_index(0, 13, closed=closed)
result = index[::-1].union(other, sort=sort)
if sort in (None, True):
tm.assert_index_equal(result, expected)
else:
tm.assert_index_equal(result.sort_values(), expected)
result = other[::-1].union(index, sort=sort)
if sort in (None, True):
tm.assert_index_equal(result, expected)
else:
tm.assert_index_equal(result.sort_values(), expected)
tm.assert_index_equal(index.union(index, sort=sort), index)
tm.assert_index_equal(index.union(index[:1], sort=sort), index)
def test_union_empty_result(self, closed, sort):
# GH 19101: empty result, same dtype
index = empty_index(dtype="int64", closed=closed)
result = index.union(index, sort=sort)
tm.assert_index_equal(result, index)
# GH 19101: empty result, different numeric dtypes -> common dtype is f8
other = empty_index(dtype="float64", closed=closed)
result = index.union(other, sort=sort)
expected = other
tm.assert_index_equal(result, expected)
other = index.union(index, sort=sort)
tm.assert_index_equal(result, expected)
other = empty_index(dtype="uint64", closed=closed)
result = index.union(other, sort=sort)
tm.assert_index_equal(result, expected)
result = other.union(index, sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
other = monotonic_index(5, 13, closed=closed)
expected = monotonic_index(5, 11, closed=closed)
result = index[::-1].intersection(other, sort=sort)
if sort in (None, True):
tm.assert_index_equal(result, expected)
else:
tm.assert_index_equal(result.sort_values(), expected)
result = other[::-1].intersection(index, sort=sort)
if sort in (None, True):
tm.assert_index_equal(result, expected)
else:
tm.assert_index_equal(result.sort_values(), expected)
tm.assert_index_equal(index.intersection(index, sort=sort), index)
# GH 26225: nested intervals
index = IntervalIndex.from_tuples([(1, 2), (1, 3), (1, 4), (0, 2)])
other = IntervalIndex.from_tuples([(1, 2), (1, 3)])
expected = IntervalIndex.from_tuples([(1, 2), (1, 3)])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
# GH 26225
index = IntervalIndex.from_tuples([(0, 3), (0, 2)])
other = IntervalIndex.from_tuples([(0, 2), (1, 3)])
expected = IntervalIndex.from_tuples([(0, 2)])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
# GH 26225: duplicate nan element
index = IntervalIndex([np.nan, np.nan])
other = IntervalIndex([np.nan])
expected = IntervalIndex([np.nan])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
def test_intersection_empty_result(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
# GH 19101: empty result, same dtype
other = monotonic_index(300, 314, closed=closed)
expected = empty_index(dtype="int64", closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different numeric dtypes -> common dtype is float64
other = monotonic_index(300, 314, dtype="float64", closed=closed)
result = index.intersection(other, sort=sort)
expected = other[:0]
tm.assert_index_equal(result, expected)
other = monotonic_index(300, 314, dtype="uint64", closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
def test_intersection_duplicates(self):
# GH#38743
index = IntervalIndex.from_tuples([(1, 2), (1, 2), (2, 3), (3, 4)])
other = IntervalIndex.from_tuples([(1, 2), (2, 3)])
expected = IntervalIndex.from_tuples([(1, 2), (2, 3)])
result = index.intersection(other)
tm.assert_index_equal(result, expected)
def test_difference(self, closed, sort):
index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed)
result = index.difference(index[:1], sort=sort)
expected = index[1:]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# GH 19101: empty result, same dtype
result = index.difference(index, sort=sort)
expected = empty_index(dtype="int64", closed=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(
index.left.astype("float64"), index.right, closed=closed
)
result = index.difference(other, sort=sort)
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self, closed, sort):
index = monotonic_index(0, 11, closed=closed)
result = index[1:].symmetric_difference(index[:-1], sort=sort)
expected = IntervalIndex([index[0], index[-1]])
if sort in (None, True):
tm.assert_index_equal(result, expected)
else:
tm.assert_index_equal(result.sort_values(), expected)
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index, sort=sort)
expected = empty_index(dtype="int64", closed=closed)
if sort in (None, True):
tm.assert_index_equal(result, expected)
else:
tm.assert_index_equal(result.sort_values(), expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(
index.left.astype("float64"), index.right, closed=closed
)
result = index.symmetric_difference(other, sort=sort)
expected = empty_index(dtype="float64", closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.filterwarnings("ignore:'<' not supported between:RuntimeWarning")
@pytest.mark.parametrize(
"op_name", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_set_incompatible_types(self, closed, op_name, sort):
index = monotonic_index(0, 11, closed=closed)
set_op = getattr(index, op_name)
# TODO: standardize return type of non-union setops type(self vs other)
# non-IntervalIndex
if op_name == "difference":
expected = index
else:
expected = getattr(index.astype("O"), op_name)(Index([1, 2, 3]))
result = set_op(Index([1, 2, 3]), sort=sort)
tm.assert_index_equal(result, expected)
# mixed closed -> cast to object
for other_closed in {"right", "left", "both", "neither"} - {closed}:
other = monotonic_index(0, 11, closed=other_closed)
expected = getattr(index.astype(object), op_name)(other, sort=sort)
if op_name == "difference":
expected = index
result = set_op(other, sort=sort)
tm.assert_index_equal(result, expected)
# GH 19016: incompatible dtypes -> cast to object
other = interval_range(Timestamp("20180101"), periods=9, closed=closed)
expected = getattr(index.astype(object), op_name)(other, sort=sort)
if op_name == "difference":
expected = index
result = set_op(other, sort=sort)
tm.assert_index_equal(result, expected)
| TestIntervalIndex |
python | wandb__wandb | wandb/sdk/mailbox/mailbox_handle.py | {
"start": 437,
"end": 3231
} | class ____(abc.ABC, Generic[_T]):
"""A handle for waiting on a response to a request."""
def __init__(self, asyncer: asyncio_manager.AsyncioManager) -> None:
self._asyncer = asyncer
@property
def asyncer(self) -> asyncio_manager.AsyncioManager:
"""The asyncio thread to which the handle belongs.
The handle's async methods must be run using this object.
"""
return self._asyncer
def map(self, fn: Callable[[_T], _S]) -> MailboxHandle[_S]:
"""Returns a transformed handle.
Methods on the returned handle call methods on this handle, but the
response type is derived using the given function.
Args:
fn: A function to apply to this handle's result to get the new
handle's result. The function should be pure and fast.
"""
return _MailboxMappedHandle(self, fn)
@abc.abstractmethod
def abandon(self) -> None:
"""Abandon the handle, indicating it will not receive a response.
This may not happen immediately: it is possible for an existing
call to `wait_async` to complete successfully after this method returns.
"""
@abc.abstractmethod
def cancel(self, iface: interface.InterfaceBase) -> None:
"""Cancel the handle, requesting any associated work to not complete.
It is an error to call this from an async function.
This automatically abandons the handle, as a response is no longer
guaranteed.
Args:
iface: The interface on which to publish the cancel request.
"""
@abc.abstractmethod
def wait_or(self, *, timeout: float | None) -> _T:
"""Wait for a response or a timeout.
It is an error to call this from an async function.
Args:
timeout: A finite number of seconds or None to never time out.
If less than or equal to zero, times out immediately unless
the response is available.
Returns:
The response if it arrives before the timeout or has already arrived.
Raises:
TimeoutError: If the timeout is reached.
HandleAbandonedError: If the handle becomes abandoned.
"""
@abc.abstractmethod
async def wait_async(self, *, timeout: float | None) -> _T:
"""Wait for a response or timeout.
This must run in an `asyncio` event loop.
Args:
timeout: A finite number of seconds or None to never time out.
Returns:
The response if it arrives before the timeout or has already arrived.
Raises:
TimeoutError: If the timeout is reached.
HandleAbandonedError: If the handle becomes abandoned.
"""
| MailboxHandle |
python | aimacode__aima-python | gui/grid_mdp.py | {
"start": 12187,
"end": 16421
} | class ____(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
tk.Tk.wm_title(self, 'Grid MDP')
self.shared_data = {
'height': tk.IntVar(),
'width': tk.IntVar()}
self.shared_data['height'].set(1)
self.shared_data['width'].set(1)
self.container = tk.Frame(self)
self.container.pack(side='top', fill='both', expand=True)
self.container.grid_rowconfigure(0, weight=1)
self.container.grid_columnconfigure(0, weight=1)
self.frames = {}
self.menu_bar = tk.Menu(self.container)
self.file_menu = tk.Menu(self.menu_bar, tearoff=0)
self.file_menu.add_command(label='Exit', command=self.exit)
self.menu_bar.add_cascade(label='File', menu=self.file_menu)
self.edit_menu = tk.Menu(self.menu_bar, tearoff=1)
self.edit_menu.add_command(label='Reset', command=self.master_reset)
self.edit_menu.add_command(label='Initialize', command=self.initialize)
self.edit_menu.add_separator()
self.edit_menu.add_command(label='View matrix', command=self.view_matrix)
self.edit_menu.add_command(label='View terminals', command=self.view_terminals)
self.menu_bar.add_cascade(label='Edit', menu=self.edit_menu)
self.menu_bar.entryconfig('Edit', state=tk.DISABLED)
self.build_menu = tk.Menu(self.menu_bar, tearoff=1)
self.build_menu.add_command(label='Build and Run', command=self.build)
self.menu_bar.add_cascade(label='Build', menu=self.build_menu)
self.menu_bar.entryconfig('Build', state=tk.DISABLED)
tk.Tk.config(self, menu=self.menu_bar)
for F in (HomePage, BuildMDP, SolveMDP):
frame = F(self.container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky='nsew')
self.show_frame(HomePage)
def placeholder_function(self):
"""placeholder function"""
print('Not supported yet!')
def exit(self):
"""function to exit"""
if tkinter.messagebox.askokcancel('Exit?', 'All changes will be lost'):
quit()
def new(self):
"""function to create new GridMDP"""
self.master_reset()
build_page = self.get_page(BuildMDP)
build_page.gridmdp = None
build_page.terminals = None
build_page.buttons = None
self.show_frame(HomePage)
def get_page(self, page_class):
"""returns pages from stored frames"""
return self.frames[page_class]
def view_matrix(self):
"""prints current matrix to console"""
build_page = self.get_page(BuildMDP)
_height = self.shared_data['height'].get()
_width = self.shared_data['width'].get()
print(build_page.gridmdp)
display(build_page.gridmdp, _height, _width)
def view_terminals(self):
"""prints current terminals to console"""
build_page = self.get_page(BuildMDP)
print('Terminals', build_page.terminals)
def initialize(self):
"""calls initialize from BuildMDP"""
build_page = self.get_page(BuildMDP)
build_page.initialize()
def master_reset(self):
"""calls master_reset from BuildMDP"""
build_page = self.get_page(BuildMDP)
build_page.master_reset()
def build(self):
"""runs specified mdp solving algorithm"""
frame = SolveMDP(self.container, self)
self.frames[SolveMDP] = frame
frame.grid(row=0, column=0, sticky='nsew')
self.show_frame(SolveMDP)
build_page = self.get_page(BuildMDP)
gridmdp = build_page.gridmdp
terminals = build_page.terminals
solve_page = self.get_page(SolveMDP)
_height = self.shared_data['height'].get()
_width = self.shared_data['width'].get()
solve_page.create_graph(gridmdp, terminals, _height, _width)
def show_frame(self, controller, cb=False):
"""shows specified frame and optionally runs create_buttons"""
if cb:
build_page = self.get_page(BuildMDP)
build_page.create_buttons()
frame = self.frames[controller]
frame.tkraise()
| MDPapp |
python | getsentry__sentry | src/sentry/snuba/dataset.py | {
"start": 40,
"end": 1652
} | class ____(Enum):
Events = "events"
"The events dataset contains all ingested errors."
Transactions = "transactions"
"The transactions dataset contains all ingested transactions."
Discover = "discover"
"The discover dataset is a combination of both the events and transactions datasets."
Outcomes = "outcomes"
"""
The outcomes dataset contains materialized views of raw outcomes.
Outcomes are used to track usage of the product, i.e. how many errors has the
project ingested, etc.
"""
OutcomesRaw = "outcomes_raw"
"The raw, non materialized version of the above"
Sessions = "sessions"
"The sessions dataset is deprecated."
Metrics = "metrics"
"this 'metrics' dataset is only used for release health."
PerformanceMetrics = "generic_metrics"
"""
PerformanceMetrics contains all generic metrics platform metrics.
"""
Replays = "replays"
"Indexed data for the Session Replays feature"
Profiles = "profiles"
"Indexed data for the Profiling feature"
IssuePlatform = "search_issues"
"Issues made via the issue platform will be searchable via the IssuePlatform dataset"
Functions = "functions"
"The functions dataset is built on top of profiling and contains more granular data about profiling functions"
SpansIndexed = "spans"
"""
Contains span data which is searchable.
This is different from metrics,
indexed spans are similar to indexed transactions in the fields available to search
"""
EventsAnalyticsPlatform = "events_analytics_platform"
@unique
| Dataset |
python | scikit-image__scikit-image | tests/skimage/exposure/test_histogram_matching.py | {
"start": 678,
"end": 5157
} | class ____:
image_rgb = data.chelsea()
template_rgb = data.astronaut()
@pytest.mark.parametrize(
'image, reference, channel_axis',
[
(image_rgb, template_rgb, -1),
(image_rgb[:, :, 0], template_rgb[:, :, 0], None),
],
)
def test_match_histograms(self, image, reference, channel_axis):
"""Assert that pdf of matched image is close to the reference's pdf for
all channels and all values of matched"""
matched = exposure.match_histograms(image, reference, channel_axis=channel_axis)
matched_pdf = self._calculate_image_empirical_pdf(matched)
reference_pdf = self._calculate_image_empirical_pdf(reference)
for channel in range(len(matched_pdf)):
reference_values, reference_quantiles = reference_pdf[channel]
matched_values, matched_quantiles = matched_pdf[channel]
for i, matched_value in enumerate(matched_values):
closest_id = (np.abs(reference_values - matched_value)).argmin()
assert_almost_equal(
matched_quantiles[i], reference_quantiles[closest_id], decimal=1
)
@pytest.mark.parametrize('channel_axis', (0, 1, -1))
def test_match_histograms_channel_axis(self, channel_axis):
"""Assert that pdf of matched image is close to the reference's pdf for
all channels and all values of matched"""
image = np.moveaxis(self.image_rgb, -1, channel_axis)
reference = np.moveaxis(self.template_rgb, -1, channel_axis)
matched = exposure.match_histograms(image, reference, channel_axis=channel_axis)
assert matched.dtype == image.dtype
matched = np.moveaxis(matched, channel_axis, -1)
reference = np.moveaxis(reference, channel_axis, -1)
matched_pdf = self._calculate_image_empirical_pdf(matched)
reference_pdf = self._calculate_image_empirical_pdf(reference)
for channel in range(len(matched_pdf)):
reference_values, reference_quantiles = reference_pdf[channel]
matched_values, matched_quantiles = matched_pdf[channel]
for i, matched_value in enumerate(matched_values):
closest_id = (np.abs(reference_values - matched_value)).argmin()
assert_almost_equal(
matched_quantiles[i], reference_quantiles[closest_id], decimal=1
)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_match_histograms_float_dtype(self, dtype):
"""float16 or float32 inputs give float32 output"""
image = self.image_rgb.astype(dtype, copy=False)
reference = self.template_rgb.astype(dtype, copy=False)
matched = exposure.match_histograms(image, reference)
assert matched.dtype == _supported_float_type(dtype)
@pytest.mark.parametrize(
'image, reference',
[(image_rgb, template_rgb[:, :, 0]), (image_rgb[:, :, 0], template_rgb)],
)
def test_raises_value_error_on_channels_mismatch(self, image, reference):
with pytest.raises(ValueError):
exposure.match_histograms(image, reference)
@classmethod
def _calculate_image_empirical_pdf(cls, image):
"""Helper function for calculating empirical probability density
function of a given image for all channels"""
if image.ndim > 2:
image = image.transpose(2, 0, 1)
channels = np.array(image, copy=False, ndmin=3)
channels_pdf = []
for channel in channels:
channel_values, counts = np.unique(channel, return_counts=True)
channel_quantiles = np.cumsum(counts).astype(np.float64)
channel_quantiles /= channel_quantiles[-1]
channels_pdf.append((channel_values, channel_quantiles))
return np.asarray(channels_pdf, dtype=object)
def test_match_histograms_consistency(self):
"""ensure equivalent results for float and integer-based code paths"""
image_u8 = self.image_rgb
reference_u8 = self.template_rgb
image_f64 = self.image_rgb.astype(np.float64)
reference_f64 = self.template_rgb.astype(np.float64, copy=False)
matched_u8 = exposure.match_histograms(image_u8, reference_u8)
matched_f64 = exposure.match_histograms(image_f64, reference_f64)
assert_array_almost_equal(matched_u8.astype(np.float64), matched_f64)
| TestMatchHistogram |
python | huggingface__transformers | tests/tokenization/test_tokenization_fast.py | {
"start": 13447,
"end": 14287
} | class ____(unittest.TestCase):
def test_async_share_tokenizer(self):
# See https://github.com/huggingface/transformers/pull/12550
# and https://github.com/huggingface/tokenizers/issues/537
tokenizer = PreTrainedTokenizerFast.from_pretrained("robot-test/dummy-tokenizer-wordlevel")
text = "The Matrix is a 1999 science fiction action film."
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(self.fetch, tokenizer, text) for i in range(10)]
return_value = [future.result() for future in futures]
self.assertEqual(return_value, [[1, 10, 0, 8, 0, 18, 0, 0, 0, 2] for i in range(10)])
def fetch(self, tokenizer, text):
return tokenizer.encode(text, truncation="longest_first", padding="longest")
| ReduceMutableBorrowTests |
python | jina-ai__jina | tests/integration/dynamic_batching/test_dynamic_batching.py | {
"start": 2214,
"end": 3414
} | class ____(Executor):
@requests(on=['/foo'])
@dynamic_batching(preferred_batch_size=1)
def foo_fun(self, docs, **kwargs):
for doc in docs:
doc.text += FOO_SUCCESS_MSG
@requests(on=['/bar', '/baz'])
@dynamic_batching(preferred_batch_size=1, timeout=1)
def bar_fun(self, docs, **kwargs):
for doc in docs:
doc.text += BAR_SUCCESS_MSG
return docs
@requests(on=['/wrongtype'])
@dynamic_batching(preferred_batch_size=1, timeout=1)
def wrong_return_type_fun(self, docs, **kwargs):
return 'Fail me!'
@requests(on=['/wronglenda'])
@dynamic_batching(preferred_batch_size=1, timeout=1)
def wrong_return_lenda_fun(self, docs, **kwargs):
return DocumentArray.empty(len(docs) + 1)
@requests(on=['/wronglennone'])
@dynamic_batching(preferred_batch_size=1, timeout=1)
def wrong_return_lennone_fun(self, docs, **kwargs):
docs.append(Document())
@requests(on=['/param'])
@dynamic_batching(preferred_batch_size=1, timeout=1)
def param_fun(self, docs, parameters, **kwargs):
for doc in docs:
doc.text += str(parameters)
| PlaceholderExecutorWrongDecorator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 849317,
"end": 850181
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"md5",
"name",
"package_version",
"sha1",
"sha256",
"size",
"updated_at",
"url",
)
md5 = sgqlc.types.Field(String, graphql_name="md5")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
package_version = sgqlc.types.Field("PackageVersion", graphql_name="packageVersion")
sha1 = sgqlc.types.Field(String, graphql_name="sha1")
sha256 = sgqlc.types.Field(String, graphql_name="sha256")
size = sgqlc.types.Field(Int, graphql_name="size")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(URI, graphql_name="url")
| PackageFile |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 15942,
"end": 16363
} | class ____:
"""Test nl_NL currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.nl_NL import Provider as NlCurrencyProvider
cls.provider = NlCurrencyProvider
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
| TestNlNl |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/version.py | {
"start": 879,
"end": 1011
} | class ____(BaseModel):
"""Version information serializer for responses."""
version: str
git_version: str | None
| VersionInfo |
python | tiangolo__fastapi | docs_src/security/tutorial003.py | {
"start": 928,
"end": 2477
} | class ____(User):
hashed_password: str
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def fake_decode_token(token):
# This doesn't provide any security at all
# Check the next version
user = get_user(fake_users_db, token)
return user
async def get_current_user(token: str = Depends(oauth2_scheme)):
user = fake_decode_token(token)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login(form_data: OAuth2PasswordRequestForm = Depends()):
user_dict = fake_users_db.get(form_data.username)
if not user_dict:
raise HTTPException(status_code=400, detail="Incorrect username or password")
user = UserInDB(**user_dict)
hashed_password = fake_hash_password(form_data.password)
if not hashed_password == user.hashed_password:
raise HTTPException(status_code=400, detail="Incorrect username or password")
return {"access_token": user.username, "token_type": "bearer"}
@app.get("/users/me")
async def read_users_me(current_user: User = Depends(get_current_active_user)):
return current_user
| UserInDB |
python | mahmoud__boltons | boltons/queueutils.py | {
"start": 6774,
"end": 7205
} | class ____(BasePriorityQueue):
"""A priority queue inherited from :class:`BasePriorityQueue`,
backed by a list and based on the :func:`heapq.heappop` and
:func:`heapq.heappush` functions in the built-in :mod:`heapq`
module.
"""
@staticmethod
def _pop_entry(backend):
return heappop(backend)
@staticmethod
def _push_entry(backend, entry):
heappush(backend, entry)
| HeapPriorityQueue |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 60720,
"end": 67705
} | class ____:
# Carried over from ListApiResponse
# We currently use list API for listing the resources
total: int
# Carried over from ListApiResponse
# Number of resources returned by data sources after truncation
num_after_truncation: int
# Number of resources after filtering
num_filtered: int
result: StateSummary = None
partial_failure_warning: Optional[str] = ""
# A list of warnings to print.
warnings: Optional[List[str]] = None
def resource_to_schema(resource: StateResource) -> StateSchema:
if resource == StateResource.ACTORS:
return ActorState
elif resource == StateResource.JOBS:
return JobState
elif resource == StateResource.NODES:
return NodeState
elif resource == StateResource.OBJECTS:
return ObjectState
elif resource == StateResource.PLACEMENT_GROUPS:
return PlacementGroupState
elif resource == StateResource.RUNTIME_ENVS:
return RuntimeEnvState
elif resource == StateResource.TASKS:
return TaskState
elif resource == StateResource.WORKERS:
return WorkerState
elif resource == StateResource.CLUSTER_EVENTS:
return ClusterEventState
else:
assert False, "Unreachable"
def protobuf_message_to_dict(
message,
fields_to_decode: List[str],
preserving_proto_field_name: bool = True,
) -> dict:
"""Convert a protobuf message to dict
Args:
fields_to_decode: field names which will be decoded from binary to hex.
preserving_proto_field_name: a pass-through option for protobuf message
method. See google.protobuf MessageToDict
Return:
Dictionary of the converted rpc protobuf.
"""
return dashboard_utils.message_to_dict(
message,
fields_to_decode,
always_print_fields_with_no_presence=True,
preserving_proto_field_name=preserving_proto_field_name,
)
def protobuf_to_task_state_dict(message: TaskEvents) -> dict:
"""
Convert a TaskEvents to a dic repr of `TaskState`
"""
task_attempt = protobuf_message_to_dict(
message=message,
fields_to_decode=[
"task_id",
"job_id",
"node_id",
"actor_id",
"parent_task_id",
"worker_id",
"placement_group_id",
"component_id",
],
)
task_state = {}
task_info = task_attempt.get("task_info", {})
state_updates = task_attempt.get("state_updates", {})
profiling_data = task_attempt.get("profile_events", {})
if profiling_data:
for event in profiling_data["events"]:
# End/start times are recorded in ns. We convert them to ms.
event["end_time"] = int(event["end_time"]) / 1e6
event["start_time"] = int(event["start_time"]) / 1e6
event["extra_data"] = json.loads(event["extra_data"])
task_state["profiling_data"] = profiling_data
# Convert those settable fields
mappings = [
(
task_info,
[
"task_id",
"name",
"actor_id",
"type",
"func_or_class_name",
"language",
"required_resources",
"runtime_env_info",
"parent_task_id",
"placement_group_id",
"call_site",
"label_selector",
],
),
(task_attempt, ["task_id", "attempt_number", "job_id"]),
(
state_updates,
[
"node_id",
"worker_id",
"task_log_info",
"actor_repr_name",
"worker_pid",
"is_debugger_paused",
],
),
]
for src, keys in mappings:
for key in keys:
task_state[key] = src.get(key)
task_state["creation_time_ms"] = None
task_state["start_time_ms"] = None
task_state["end_time_ms"] = None
events = []
if "state_ts_ns" in state_updates:
state_ts_ns = state_updates["state_ts_ns"]
for state_name, state in TaskStatus.items():
# state_ts_ns is Map[str, str] after protobuf MessageToDict
key = str(state)
if key in state_ts_ns:
# timestamp is recorded as nanosecond from the backend.
# We need to convert it to the second.
ts_ms = int(state_ts_ns[key]) // 1e6
events.append(
{
"state": state_name,
"created_ms": ts_ms,
}
)
if state == TaskStatus.PENDING_ARGS_AVAIL:
task_state["creation_time_ms"] = ts_ms
if state == TaskStatus.RUNNING:
task_state["start_time_ms"] = ts_ms
if state == TaskStatus.FINISHED or state == TaskStatus.FAILED:
task_state["end_time_ms"] = ts_ms
task_state["events"] = events
if len(events) > 0:
latest_state = events[-1]["state"]
else:
latest_state = "NIL"
task_state["state"] = latest_state
# Parse error info
if latest_state == "FAILED":
error_info = state_updates.get("error_info", None)
if error_info:
# We captured colored error message printed to console, e.g.
# "\x1b[31mTraceback (most recent call last):\x1b[0m",
# this is to remove the ANSI escape codes.
task_state["error_message"] = remove_ansi_escape_codes(
error_info.get("error_message", "")
)
task_state["error_type"] = error_info.get("error_type", "")
# Parse actor task name for actor with repr name.
if (
state_updates.get("actor_repr_name")
and task_state["type"] == "ACTOR_TASK"
and task_state["name"]
== task_state["func_or_class_name"] # no name option provided.
):
# If it's an actor task with no name override, and has repr name defined
# for the actor, we override the name.
method_name = task_state["name"].split(".")[-1]
actor_repr_task_name = f"{state_updates['actor_repr_name']}.{method_name}"
task_state["name"] = actor_repr_task_name
return task_state
def remove_ansi_escape_codes(text: str) -> str:
"""Remove ANSI escape codes from a string."""
import re
return re.sub(r"\x1b[^m]*m", "", text)
def dict_to_state(d: Dict, state_resource: StateResource) -> StateSchema:
"""Convert a dict to a state schema.
Args:
d: a dict to convert.
state_resource: the state resource to convert to.
Returns:
A state schema.
"""
try:
return resource_to_schema(state_resource)(**d)
except Exception as e:
raise RayStateApiException(f"Failed to convert {d} to StateSchema: {e}") from e
| SummaryApiResponse |
python | xlwings__xlwings | tests/test_conversion.py | {
"start": 7648,
"end": 23195
} | class ____(TestBase):
def test_dataframe_1(self):
df_expected = pd.DataFrame(
[[1, "test1"], [2, "test2"], [np.nan, None], [3.3, "test3"]],
columns=["a", "b"],
)
self.wb1.sheets[0].range("A1").value = df_expected
df_result = self.wb1.sheets[0].range("A1:C5").options(pd.DataFrame).value
df_result.index = pd.Int64Index(df_result.index)
assert_frame_equal(df_expected, df_result)
def test_dataframe_2(self):
"""test_dataframe_2: Covers GH Issue #31"""
df_expected = pd.DataFrame([1, 3, 5, np.nan, 6, 8], columns=["col1"])
self.wb1.sheets[0].range("A9").value = df_expected
cells = self.wb1.sheets[0].range("B9:B15").value
df_result = DataFrame(cells[1:], columns=[cells[0]])
assert_frame_equal(df_expected, df_result)
def test_dataframe_multiindex(self):
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
["x", "x", "x", "x", "y", "y", "y", "y"],
]
)
)
index = pd.MultiIndex.from_tuples(tuples, names=["first", "second", "third"])
df_expected = pd.DataFrame(
[
[1.1, 2.2],
[3.3, 4.4],
[5.5, 6.6],
[7.7, 8.8],
[9.9, 10.10],
[11.11, 12.12],
[13.13, 14.14],
[15.15, 16.16],
],
index=index,
columns=["one", "two"],
)
self.wb1.sheets[0].range("A20").value = df_expected
cells = self.wb1.sheets[0].range("D20").expand("table").value
multiindex = self.wb1.sheets[0].range("A20:C28").value
ix = pd.MultiIndex.from_tuples(multiindex[1:], names=multiindex[0])
df_result = DataFrame(cells[1:], columns=cells[0], index=ix)
assert_frame_equal(df_expected, df_result)
def test_dataframe_multiheader(self):
header = [["Foo", "Foo", "Bar", "Bar", "Baz"], ["A", "B", "C", "D", "E"]]
df_expected = pd.DataFrame(
[
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
],
columns=pd.MultiIndex.from_arrays(header),
)
self.wb1.sheets[0].range("A52").value = df_expected
cells = self.wb1.sheets[0].range("B52").expand("table").value
df_result = DataFrame(cells[2:], columns=pd.MultiIndex.from_arrays(cells[:2]))
assert_frame_equal(df_expected, df_result)
def test_dataframe_dateindex(self):
rng = pd.date_range("1/1/2012", periods=10, freq="D")
df_expected = pd.DataFrame(
np.arange(50).reshape(10, 5) + 0.1,
index=rng,
columns=["one", "two", "three", "four", "five"],
)
self.wb1.sheets[0].range("A100").value = df_expected
if sys.platform.startswith("win") and self.wb1.app.version == "14.0":
self.wb1.sheets[0].range("A100").expand(
"vertical"
).number_format = "dd/mm/yyyy" # Hack for Excel 2010 bug, see GH #43
cells = self.wb1.sheets[0].range("B100").expand("table").value
index = self.wb1.sheets[0].range("A101").expand("vertical").value
df_result = DataFrame(cells[1:], index=index, columns=cells[0])
assert_frame_equal(df_expected, df_result)
def test_read_df_0header_0index(self):
self.wb1.sheets[0].range("A1").value = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
df1 = pd.DataFrame([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
df2 = (
self.wb1.sheets[0]
.range("A1:C3")
.options(pd.DataFrame, header=0, index=False)
.value
)
assert_frame_equal(df1, df2)
def test_df_1header_0index(self):
self.wb1.sheets[0].range("A1").options(
pd.DataFrame, index=False, header=True
).value = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], columns=["a", "b"])
df = (
self.wb1.sheets[0]
.range("A1")
.options(pd.DataFrame, index=False, header=True, expand="table")
.value
)
assert_frame_equal(
df, pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], columns=["a", "b"])
)
def test_df_0header_1index(self):
self.wb1.sheets[0].range("A1").options(
pd.DataFrame, index=True, header=False
).value = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], index=[10.0, 20.0])
df = (
self.wb1.sheets[0]
.range("A1")
.options(pd.DataFrame, index=True, header=False, expand="table")
.value
)
assert_frame_equal(
df, pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], index=[10.0, 20.0])
)
def test_read_df_1header_1namedindex(self):
self.wb1.sheets[0].range("A1").value = [
["ix1", "c", "d", "c"],
[1, 1, 2, 3],
[2, 4, 5, 6],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
index=[1.0, 2.0],
columns=["c", "d", "c"],
)
df1.index.name = "ix1"
df2 = self.wb1.sheets[0].range("A1:D3").options(pd.DataFrame).value
assert_frame_equal(df1, df2)
def test_read_df_1header_1unnamedindex(self):
self.wb1.sheets[0].range("A1").value = [
[None, "c", "d", "c"],
[1, 1, 2, 3],
[2, 4, 5, 6],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
index=pd.Index([1.0, 2.0]),
columns=["c", "d", "c"],
)
df2 = self.wb1.sheets[0].range("A1:D3").options(pd.DataFrame).value
assert_frame_equal(df1, df2)
def test_read_df_2header_1namedindex(self):
self.wb1.sheets[0].range("A1").value = [
[None, "a", "a", "b"],
["ix1", "c", "d", "c"],
[1, 1, 2, 3],
[2, 4, 5, 6],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
index=[1.0, 2.0],
columns=pd.MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "c"]]),
)
df1.index.name = "ix1"
df2 = self.wb1.sheets[0].range("A1:D4").options(pd.DataFrame, header=2).value
assert_frame_equal(df1, df2)
def test_read_df_2header_1unnamedindex(self):
self.wb1.sheets[0].range("A1").value = [
[None, "a", "a", "b"],
[None, "c", "d", "c"],
[1, 1, 2, 3],
[2, 4, 5, 6],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
index=pd.Index([1, 2]),
columns=pd.MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "c"]]),
)
df2 = self.wb1.sheets[0].range("A1:D4").options(pd.DataFrame, header=2).value
df2.index = pd.Int64Index(df2.index)
assert_frame_equal(df1, df2)
def test_read_df_2header_2namedindex(self):
self.wb1.sheets[0].range("A1").value = [
[None, None, "a", "a", "b"],
["x1", "x2", "c", "d", "c"],
["a", 1, 1, 2, 3],
["a", 2, 4, 5, 6],
["b", 1, 7, 8, 9],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
index=pd.MultiIndex.from_arrays(
[["a", "a", "b"], [1.0, 2.0, 1.0]], names=["x1", "x2"]
),
columns=pd.MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "c"]]),
)
df2 = (
self.wb1.sheets[0]
.range("A1:E5")
.options(pd.DataFrame, header=2, index=2)
.value
)
assert_frame_equal(df1, df2)
def test_read_df_2header_2unnamedindex(self):
self.wb1.sheets[0].range("A1").value = [
[None, None, "a", "a", "b"],
[None, None, "c", "d", "c"],
["a", 1, 1, 2, 3],
["a", 2, 4, 5, 6],
["b", 1, 7, 8, 9],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
index=pd.MultiIndex.from_arrays([["a", "a", "b"], [1.0, 2.0, 1.0]]),
columns=pd.MultiIndex.from_arrays([["a", "a", "b"], ["c", "d", "c"]]),
)
df2 = (
self.wb1.sheets[0]
.range("A1:E5")
.options(pd.DataFrame, header=2, index=2)
.value
)
assert_frame_equal(df1, df2)
def test_read_df_1header_2namedindex(self):
self.wb1.sheets[0].range("A1").value = [
["x1", "x2", "a", "d", "c"],
["a", 1, 1, 2, 3],
["a", 2, 4, 5, 6],
["b", 1, 7, 8, 9],
]
df1 = pd.DataFrame(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
index=pd.MultiIndex.from_arrays(
[["a", "a", "b"], [1.0, 2.0, 1.0]], names=["x1", "x2"]
),
columns=["a", "d", "c"],
)
df2 = (
self.wb1.sheets[0]
.range("A1:E4")
.options(pd.DataFrame, header=1, index=2)
.value
)
assert_frame_equal(df1, df2)
def test_timeseries_1(self):
rng = pd.date_range("1/1/2012", periods=10, freq="D")
series_expected = pd.Series(np.arange(len(rng)) + 0.1, rng)
self.wb1.sheets[0].range("A40").options(header=False).value = series_expected
if sys.platform.startswith("win") and self.wb1.app.version == "14.0":
self.wb1.sheets[0].range("A40").expand(
"vertical"
).number_format = "dd/mm/yyyy" # Hack for Excel 2010 bug, see GH #43
series_result = (
self.wb1.sheets[0].range("A40:B49").options(pd.Series, header=False).value
)
assert_series_equal(series_expected, series_result)
def test_read_series_noheader_noindex(self):
self.wb1.sheets[0].range("A1").value = [[1.0], [2.0], [3.0]]
s = (
self.wb1.sheets[0]
.range("A1:A3")
.options(pd.Series, index=False, header=False)
.value
)
assert_series_equal(s, pd.Series([1.0, 2.0, 3.0]))
def test_read_series_noheader_index(self):
self.wb1.sheets[0].range("A1").value = [[10.0, 1.0], [20.0, 2.0], [30.0, 3.0]]
s = (
self.wb1.sheets[0]
.range("A1:B3")
.options(pd.Series, index=True, header=False)
.value
)
assert_series_equal(s, pd.Series([1.0, 2.0, 3.0], index=[10.0, 20.0, 30.0]))
def test_read_series_header_noindex(self):
self.wb1.sheets[0].range("A1").value = [["name"], [1.0], [2.0], [3.0]]
s = (
self.wb1.sheets[0]
.range("A1:A4")
.options(pd.Series, index=False, header=True)
.value
)
assert_series_equal(s, pd.Series([1.0, 2.0, 3.0], name="name"))
def test_read_series_header_index(self):
# Named index
self.wb1.sheets[0].range("A1").value = [
["ix", "name"],
[10.0, 1.0],
[20.0, 2.0],
[30.0, 3.0],
]
s = (
self.wb1.sheets[0]
.range("A1:B4")
.options(pd.Series, index=True, header=True)
.value
)
assert_series_equal(
s,
pd.Series(
[1.0, 2.0, 3.0],
name="name",
index=pd.Index([10.0, 20.0, 30.0], name="ix"),
),
)
# Nameless index
self.wb1.sheets[0].range("A1").value = [
[None, "name"],
[10.0, 1.0],
[20.0, 2.0],
[30.0, 3.0],
]
s = (
self.wb1.sheets[0]
.range("A1:B4")
.options(pd.Series, index=True, header=True)
.value
)
assert_series_equal(
s, pd.Series([1.0, 2.0, 3.0], name="name", index=[10.0, 20.0, 30.0])
)
def test_write_series_noheader_noindex(self):
self.wb1.sheets[0].range("A1").options(index=False).value = pd.Series(
[1.0, 2.0, 3.0]
)
self.assertEqual(
[[1.0], [2.0], [3.0]],
self.wb1.sheets[0].range("A1").options(ndim=2, expand="table").value,
)
def test_write_series_noheader_index(self):
self.wb1.sheets[0].range("A1").options(index=True).value = pd.Series(
[1.0, 2.0, 3.0], index=[10.0, 20.0, 30.0]
)
self.assertEqual(
[[10.0, 1.0], [20.0, 2.0], [30.0, 3.0]],
self.wb1.sheets[0].range("A1").options(ndim=2, expand="table").value,
)
def test_write_series_header_noindex(self):
self.wb1.sheets[0].range("A1").options(index=False).value = pd.Series(
[1.0, 2.0, 3.0], name="name"
)
self.assertEqual(
[["name"], [1.0], [2.0], [3.0]],
self.wb1.sheets[0].range("A1").options(ndim=2, expand="table").value,
)
def test_write_series_header_index(self):
# Named index
self.wb1.sheets[0].range("A1").value = pd.Series(
[1.0, 2.0, 3.0], name="name", index=pd.Index([10.0, 20.0, 30.0], name="ix")
)
self.assertEqual(
[["ix", "name"], [10.0, 1.0], [20.0, 2.0], [30.0, 3.0]],
self.wb1.sheets[0].range("A1").options(ndim=2, expand="table").value,
)
# Nameless index
self.wb1.sheets[0].range("A1").value = pd.Series(
[1.0, 2.0, 3.0], name="name", index=[10.0, 20.0, 30.0]
)
self.assertEqual(
[[None, "name"], [10.0, 1.0], [20.0, 2.0], [30.0, 3.0]],
self.wb1.sheets[0].range("A1:B4").options(ndim=2).value,
)
def test_dataframe_timezone(self):
np_dt = np.datetime64(1434149887000, "ms")
ix = pd.DatetimeIndex(data=[np_dt], tz="GMT")
df = pd.DataFrame(data=[1], index=ix, columns=["A"])
self.wb1.sheets[0].range("A1").value = df
self.assertEqual(
self.wb1.sheets[0].range("A2").value, dt.datetime(2015, 6, 12, 22, 58, 7)
)
def test_NaT(self):
df = pd.DataFrame(
[pd.Timestamp("20120102"), np.nan], index=[0.0, 1.0], columns=["one"]
)
self.wb1.sheets[0].range("A1").value = df
assert_frame_equal(
df,
self.wb1.sheets[0].range("A1").options(pd.DataFrame, expand="table").value,
)
def test_period_index(self):
idx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3])
df = pd.DataFrame(index=idx, data=[1, 2])
self.wb1.sheets[0].range("A1").value = df
self.assertEqual(
[[None, 0.0], ["2000Q1", 1.0], ["2002Q3", 2.0]],
self.wb1.sheets[0]["A1"].options(expand="table").value,
)
def test_NA(self):
df = pd.DataFrame([pd.NA, np.nan], index=[0.0, 1.0], columns=["one"])
self.wb1.sheets[0].range("A1").value = df
assert_frame_equal(
df,
self.wb1.sheets[0].range("A1").options(pd.DataFrame, expand="table").value,
)
if __name__ == "__main__":
unittest.main()
| TestPandas |
python | astropy__astropy | astropy/modeling/_fitting_parallel.py | {
"start": 1528,
"end": 3536
} | class ____:
"""
This class is intended to contain the object array of all fit_info values
and provide a convenience method to access specific items from fit_info
as arrays.
"""
def __init__(self, fit_info_array):
self._fit_info_array = fit_info_array
@property
def shape(self):
return self._fit_info_array.shape
@property
def ndim(self):
return self._fit_info_array.ndim
def __getitem__(self, item):
result = self._fit_info_array[item]
if hasattr(result, "ndim"):
return FitInfoArrayContainer(result)
else:
return result
def get_property_as_array(self, name):
"""
Return an array of one of the fit information properties
Parameters
----------
name : str
The name of a property present on the individual fit information
objects.
"""
array = None
for index in np.ndindex(self._fit_info_array.shape):
fit_info = self._fit_info_array[index]
if fit_info is not None:
value = np.array(getattr(fit_info, name))
if array is None:
array = np.zeros(self.shape + value.shape, dtype=value.dtype)
if value.shape != array.shape[self.ndim :]:
raise ValueError(
"Property {name} does not have consistent shape in all fit_info"
)
array[index] = value
return array
@property
def properties(self):
"""
The properties available to query with :meth:`~astropy.modeling.fitting.FitInfoArrayContainer.get_property_as_array`
"""
# Find the first non-None .fit_info
for index in np.ndindex(self._fit_info_array.shape):
fit_info = self._fit_info_array[index]
if fit_info is not None:
return tuple(sorted(fit_info))
return ()
| FitInfoArrayContainer |
python | sympy__sympy | sympy/printing/tests/test_latex.py | {
"start": 6007,
"end": 140094
} | class ____(sym.lowergamma):
pass # testing notation inheritance by a subclass with same name
x, y, z, t, w, a, b, c, s, p = symbols('x y z t w a b c s p')
k, m, n = symbols('k m n', integer=True)
def test_printmethod():
class R(Abs):
def _latex(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert latex(R(x)) == r"foo(x)"
class R(Abs):
def _latex(self, printer):
return "foo"
assert latex(R(x)) == r"foo"
def test_latex_basic():
assert latex(1 + x) == r"x + 1"
assert latex(x**2) == r"x^{2}"
assert latex(x**(1 + x)) == r"x^{x + 1}"
assert latex(x**3 + x + 1 + x**2) == r"x^{3} + x^{2} + x + 1"
assert latex(2*x*y) == r"2 x y"
assert latex(2*x*y, mul_symbol='dot') == r"2 \cdot x \cdot y"
assert latex(3*x**2*y, mul_symbol='\\,') == r"3\,x^{2}\,y"
assert latex(1.5*3**x, mul_symbol='\\,') == r"1.5 \cdot 3^{x}"
assert latex(x**S.Half**5) == r"\sqrt[32]{x}"
assert latex(Mul(S.Half, x**2, -5, evaluate=False)) == r"\frac{1}{2} x^{2} \left(-5\right)"
assert latex(Mul(S.Half, x**2, 5, evaluate=False)) == r"\frac{1}{2} x^{2} \cdot 5"
assert latex(Mul(-5, -5, evaluate=False)) == r"\left(-5\right) \left(-5\right)"
assert latex(Mul(5, -5, evaluate=False)) == r"5 \left(-5\right)"
assert latex(Mul(S.Half, -5, S.Half, evaluate=False)) == r"\frac{1}{2} \left(-5\right) \frac{1}{2}"
assert latex(Mul(5, I, 5, evaluate=False)) == r"5 i 5"
assert latex(Mul(5, I, -5, evaluate=False)) == r"5 i \left(-5\right)"
assert latex(Mul(Pow(x, 2), S.Half*x + 1)) == r"x^{2} \left(\frac{x}{2} + 1\right)"
assert latex(Mul(Pow(x, 3), Rational(2, 3)*x + 1)) == r"x^{3} \left(\frac{2 x}{3} + 1\right)"
assert latex(Mul(Pow(x, 11), 2*x + 1)) == r"x^{11} \left(2 x + 1\right)"
assert latex(Mul(0, 1, evaluate=False)) == r'0 \cdot 1'
assert latex(Mul(1, 0, evaluate=False)) == r'1 \cdot 0'
assert latex(Mul(1, 1, evaluate=False)) == r'1 \cdot 1'
assert latex(Mul(-1, 1, evaluate=False)) == r'\left(-1\right) 1'
assert latex(Mul(1, 1, 1, evaluate=False)) == r'1 \cdot 1 \cdot 1'
assert latex(Mul(1, 2, evaluate=False)) == r'1 \cdot 2'
assert latex(Mul(1, S.Half, evaluate=False)) == r'1 \cdot \frac{1}{2}'
assert latex(Mul(1, 1, S.Half, evaluate=False)) == \
r'1 \cdot 1 \cdot \frac{1}{2}'
assert latex(Mul(1, 1, 2, 3, x, evaluate=False)) == \
r'1 \cdot 1 \cdot 2 \cdot 3 x'
assert latex(Mul(1, -1, evaluate=False)) == r'1 \left(-1\right)'
assert latex(Mul(4, 3, 2, 1, 0, y, x, evaluate=False)) == \
r'4 \cdot 3 \cdot 2 \cdot 1 \cdot 0 y x'
assert latex(Mul(4, 3, 2, 1+z, 0, y, x, evaluate=False)) == \
r'4 \cdot 3 \cdot 2 \left(z + 1\right) 0 y x'
assert latex(Mul(Rational(2, 3), Rational(5, 7), evaluate=False)) == \
r'\frac{2}{3} \cdot \frac{5}{7}'
assert latex(1/x) == r"\frac{1}{x}"
assert latex(1/x, fold_short_frac=True) == r"1 / x"
assert latex(-S(3)/2) == r"- \frac{3}{2}"
assert latex(-S(3)/2, fold_short_frac=True) == r"- 3 / 2"
assert latex(1/x**2) == r"\frac{1}{x^{2}}"
assert latex(1/(x + y)/2) == r"\frac{1}{2 \left(x + y\right)}"
assert latex(x/2) == r"\frac{x}{2}"
assert latex(x/2, fold_short_frac=True) == r"x / 2"
assert latex((x + y)/(2*x)) == r"\frac{x + y}{2 x}"
assert latex((x + y)/(2*x), fold_short_frac=True) == \
r"\left(x + y\right) / 2 x"
assert latex((x + y)/(2*x), long_frac_ratio=0) == \
r"\frac{1}{2 x} \left(x + y\right)"
assert latex((x + y)/x) == r"\frac{x + y}{x}"
assert latex((x + y)/x, long_frac_ratio=3) == r"\frac{x + y}{x}"
assert latex((2*sqrt(2)*x)/3) == r"\frac{2 \sqrt{2} x}{3}"
assert latex((2*sqrt(2)*x)/3, long_frac_ratio=2) == \
r"\frac{2 x}{3} \sqrt{2}"
assert latex(binomial(x, y)) == r"{\binom{x}{y}}"
x_star = Symbol('x^*')
f = Function('f')
assert latex(x_star**2) == r"\left(x^{*}\right)^{2}"
assert latex(x_star**2, parenthesize_super=False) == r"{x^{*}}^{2}"
assert latex(Derivative(f(x_star), x_star,2)) == r"\frac{d^{2}}{d \left(x^{*}\right)^{2}} f{\left(x^{*} \right)}"
assert latex(Derivative(f(x_star), x_star,2), parenthesize_super=False) == r"\frac{d^{2}}{d {x^{*}}^{2}} f{\left(x^{*} \right)}"
assert latex(2*Integral(x, x)/3) == r"\frac{2 \int x\, dx}{3}"
assert latex(2*Integral(x, x)/3, fold_short_frac=True) == \
r"\left(2 \int x\, dx\right) / 3"
assert latex(sqrt(x)) == r"\sqrt{x}"
assert latex(x**Rational(1, 3)) == r"\sqrt[3]{x}"
assert latex(x**Rational(1, 3), root_notation=False) == r"x^{\frac{1}{3}}"
assert latex(sqrt(x)**3) == r"x^{\frac{3}{2}}"
assert latex(sqrt(x), itex=True) == r"\sqrt{x}"
assert latex(x**Rational(1, 3), itex=True) == r"\root{3}{x}"
assert latex(sqrt(x)**3, itex=True) == r"x^{\frac{3}{2}}"
assert latex(x**Rational(3, 4)) == r"x^{\frac{3}{4}}"
assert latex(x**Rational(3, 4), fold_frac_powers=True) == r"x^{3/4}"
assert latex((x + 1)**Rational(3, 4)) == \
r"\left(x + 1\right)^{\frac{3}{4}}"
assert latex((x + 1)**Rational(3, 4), fold_frac_powers=True) == \
r"\left(x + 1\right)^{3/4}"
assert latex(AlgebraicNumber(sqrt(2))) == r"\sqrt{2}"
assert latex(AlgebraicNumber(sqrt(2), [3, -7])) == r"-7 + 3 \sqrt{2}"
assert latex(AlgebraicNumber(sqrt(2), alias='alpha')) == r"\alpha"
assert latex(AlgebraicNumber(sqrt(2), [3, -7], alias='alpha')) == \
r"3 \alpha - 7"
assert latex(AlgebraicNumber(2**(S(1)/3), [1, 3, -7], alias='beta')) == \
r"\beta^{2} + 3 \beta - 7"
k = ZZ.cyclotomic_field(5)
assert latex(k.ext.field_element([1, 2, 3, 4])) == \
r"\zeta^{3} + 2 \zeta^{2} + 3 \zeta + 4"
assert latex(k.ext.field_element([1, 2, 3, 4]), order='old') == \
r"4 + 3 \zeta + 2 \zeta^{2} + \zeta^{3}"
assert latex(k.primes_above(19)[0]) == \
r"\left(19, \zeta^{2} + 5 \zeta + 1\right)"
assert latex(k.primes_above(19)[0], order='old') == \
r"\left(19, 1 + 5 \zeta + \zeta^{2}\right)"
assert latex(k.primes_above(7)[0]) == r"\left(7\right)"
assert latex(1.5e20*x) == r"1.5 \cdot 10^{20} x"
assert latex(1.5e20*x, mul_symbol='dot') == r"1.5 \cdot 10^{20} \cdot x"
assert latex(1.5e20*x, mul_symbol='times') == \
r"1.5 \times 10^{20} \times x"
assert latex(1/sin(x)) == r"\frac{1}{\sin{\left(x \right)}}"
assert latex(sin(x)**-1) == r"\frac{1}{\sin{\left(x \right)}}"
assert latex(sin(x)**Rational(3, 2)) == \
r"\sin^{\frac{3}{2}}{\left(x \right)}"
assert latex(sin(x)**Rational(3, 2), fold_frac_powers=True) == \
r"\sin^{3/2}{\left(x \right)}"
assert latex(~x) == r"\neg x"
assert latex(x & y) == r"x \wedge y"
assert latex(x & y & z) == r"x \wedge y \wedge z"
assert latex(x | y) == r"x \vee y"
assert latex(x | y | z) == r"x \vee y \vee z"
assert latex((x & y) | z) == r"z \vee \left(x \wedge y\right)"
assert latex(Implies(x, y)) == r"x \Rightarrow y"
assert latex(~(x >> ~y)) == r"x \not\Rightarrow \neg y"
assert latex(Implies(Or(x,y), z)) == r"\left(x \vee y\right) \Rightarrow z"
assert latex(Implies(z, Or(x,y))) == r"z \Rightarrow \left(x \vee y\right)"
assert latex(~(x & y)) == r"\neg \left(x \wedge y\right)"
assert latex(~x, symbol_names={x: "x_i"}) == r"\neg x_i"
assert latex(x & y, symbol_names={x: "x_i", y: "y_i"}) == \
r"x_i \wedge y_i"
assert latex(x & y & z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"x_i \wedge y_i \wedge z_i"
assert latex(x | y, symbol_names={x: "x_i", y: "y_i"}) == r"x_i \vee y_i"
assert latex(x | y | z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"x_i \vee y_i \vee z_i"
assert latex((x & y) | z, symbol_names={x: "x_i", y: "y_i", z: "z_i"}) == \
r"z_i \vee \left(x_i \wedge y_i\right)"
assert latex(Implies(x, y), symbol_names={x: "x_i", y: "y_i"}) == \
r"x_i \Rightarrow y_i"
assert latex(Pow(Rational(1, 3), -1, evaluate=False)) == r"\frac{1}{\frac{1}{3}}"
assert latex(Pow(Rational(1, 3), -2, evaluate=False)) == r"\frac{1}{(\frac{1}{3})^{2}}"
assert latex(Pow(Integer(1)/100, -1, evaluate=False)) == r"\frac{1}{\frac{1}{100}}"
p = Symbol('p', positive=True)
assert latex(exp(-p)*log(p)) == r"e^{- p} \log{\left(p \right)}"
assert latex(Pow(Rational(2, 3), -1, evaluate=False)) == r'\frac{1}{\frac{2}{3}}'
assert latex(Pow(Rational(4, 3), -1, evaluate=False)) == r'\frac{1}{\frac{4}{3}}'
assert latex(Pow(Rational(-3, 4), -1, evaluate=False)) == r'\frac{1}{- \frac{3}{4}}'
assert latex(Pow(Rational(-4, 4), -1, evaluate=False)) == r'\frac{1}{-1}'
assert latex(Pow(Rational(1, 3), -1, evaluate=False)) == r'\frac{1}{\frac{1}{3}}'
assert latex(Pow(Rational(-1, 3), -1, evaluate=False)) == r'\frac{1}{- \frac{1}{3}}'
def test_latex_builtins():
assert latex(True) == r"\text{True}"
assert latex(False) == r"\text{False}"
assert latex(None) == r"\text{None}"
assert latex(true) == r"\text{True}"
assert latex(false) == r'\text{False}'
def test_latex_SingularityFunction():
assert latex(SingularityFunction(x, 4, 5)) == \
r"{\left\langle x - 4 \right\rangle}^{5}"
assert latex(SingularityFunction(x, -3, 4)) == \
r"{\left\langle x + 3 \right\rangle}^{4}"
assert latex(SingularityFunction(x, 0, 4)) == \
r"{\left\langle x \right\rangle}^{4}"
assert latex(SingularityFunction(x, a, n)) == \
r"{\left\langle - a + x \right\rangle}^{n}"
assert latex(SingularityFunction(x, 4, -2)) == \
r"{\left\langle x - 4 \right\rangle}^{-2}"
assert latex(SingularityFunction(x, 4, -1)) == \
r"{\left\langle x - 4 \right\rangle}^{-1}"
assert latex(SingularityFunction(x, 4, 5)**3) == \
r"{\left({\langle x - 4 \rangle}^{5}\right)}^{3}"
assert latex(SingularityFunction(x, -3, 4)**3) == \
r"{\left({\langle x + 3 \rangle}^{4}\right)}^{3}"
assert latex(SingularityFunction(x, 0, 4)**3) == \
r"{\left({\langle x \rangle}^{4}\right)}^{3}"
assert latex(SingularityFunction(x, a, n)**3) == \
r"{\left({\langle - a + x \rangle}^{n}\right)}^{3}"
assert latex(SingularityFunction(x, 4, -2)**3) == \
r"{\left({\langle x - 4 \rangle}^{-2}\right)}^{3}"
assert latex((SingularityFunction(x, 4, -1)**3)**3) == \
r"{\left({\langle x - 4 \rangle}^{-1}\right)}^{9}"
def test_latex_cycle():
assert latex(Cycle(1, 2, 4)) == r"\left( 1\; 2\; 4\right)"
assert latex(Cycle(1, 2)(4, 5, 6)) == \
r"\left( 1\; 2\right)\left( 4\; 5\; 6\right)"
assert latex(Cycle()) == r"\left( \right)"
def test_latex_permutation():
assert latex(Permutation(1, 2, 4)) == r"\left( 1\; 2\; 4\right)"
assert latex(Permutation(1, 2)(4, 5, 6)) == \
r"\left( 1\; 2\right)\left( 4\; 5\; 6\right)"
assert latex(Permutation()) == r"\left( \right)"
assert latex(Permutation(2, 4)*Permutation(5)) == \
r"\left( 2\; 4\right)\left( 5\right)"
assert latex(Permutation(5)) == r"\left( 5\right)"
assert latex(Permutation(0, 1), perm_cyclic=False) == \
r"\begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}"
assert latex(Permutation(0, 1)(2, 3), perm_cyclic=False) == \
r"\begin{pmatrix} 0 & 1 & 2 & 3 \\ 1 & 0 & 3 & 2 \end{pmatrix}"
assert latex(Permutation(), perm_cyclic=False) == \
r"\left( \right)"
with warns_deprecated_sympy():
old_print_cyclic = Permutation.print_cyclic
Permutation.print_cyclic = False
assert latex(Permutation(0, 1)(2, 3)) == \
r"\begin{pmatrix} 0 & 1 & 2 & 3 \\ 1 & 0 & 3 & 2 \end{pmatrix}"
Permutation.print_cyclic = old_print_cyclic
def test_latex_Float():
assert latex(Float(1.0e100)) == r"1.0 \cdot 10^{100}"
assert latex(Float(1.0e-100)) == r"1.0 \cdot 10^{-100}"
assert latex(Float(1.0e-100), mul_symbol="times") == \
r"1.0 \times 10^{-100}"
assert latex(Float('10000.0'), full_prec=False, min=-2, max=2) == \
r"1.0 \cdot 10^{4}"
assert latex(Float('10000.0'), full_prec=False, min=-2, max=4) == \
r"1.0 \cdot 10^{4}"
assert latex(Float('10000.0'), full_prec=False, min=-2, max=5) == \
r"10000.0"
assert latex(Float('0.099999'), full_prec=True, min=-2, max=5) == \
r"9.99990000000000 \cdot 10^{-2}"
def test_latex_vector_expressions():
A = CoordSys3D('A')
assert latex(Cross(A.i, A.j*A.x*3+A.k)) == \
r"\mathbf{\hat{i}_{A}} \times \left(\left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}} + \mathbf{\hat{k}_{A}}\right)"
assert latex(Cross(A.i, A.j)) == \
r"\mathbf{\hat{i}_{A}} \times \mathbf{\hat{j}_{A}}"
assert latex(x*Cross(A.i, A.j)) == \
r"x \left(\mathbf{\hat{i}_{A}} \times \mathbf{\hat{j}_{A}}\right)"
assert latex(Cross(x*A.i, A.j)) == \
r'- \mathbf{\hat{j}_{A}} \times \left(\left(x\right)\mathbf{\hat{i}_{A}}\right)'
assert latex(Curl(3*A.x*A.j)) == \
r"\nabla\times \left(\left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}}\right)"
assert latex(Curl(3*A.x*A.j+A.i)) == \
r"\nabla\times \left(\mathbf{\hat{i}_{A}} + \left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}}\right)"
assert latex(Curl(3*x*A.x*A.j)) == \
r"\nabla\times \left(\left(3 \mathbf{{x}_{A}} x\right)\mathbf{\hat{j}_{A}}\right)"
assert latex(x*Curl(3*A.x*A.j)) == \
r"x \left(\nabla\times \left(\left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}}\right)\right)"
assert latex(Divergence(3*A.x*A.j+A.i)) == \
r"\nabla\cdot \left(\mathbf{\hat{i}_{A}} + \left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}}\right)"
assert latex(Divergence(3*A.x*A.j)) == \
r"\nabla\cdot \left(\left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}}\right)"
assert latex(x*Divergence(3*A.x*A.j)) == \
r"x \left(\nabla\cdot \left(\left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}}\right)\right)"
assert latex(Dot(A.i, A.j*A.x*3+A.k)) == \
r"\mathbf{\hat{i}_{A}} \cdot \left(\left(3 \mathbf{{x}_{A}}\right)\mathbf{\hat{j}_{A}} + \mathbf{\hat{k}_{A}}\right)"
assert latex(Dot(A.i, A.j)) == \
r"\mathbf{\hat{i}_{A}} \cdot \mathbf{\hat{j}_{A}}"
assert latex(Dot(x*A.i, A.j)) == \
r"\mathbf{\hat{j}_{A}} \cdot \left(\left(x\right)\mathbf{\hat{i}_{A}}\right)"
assert latex(x*Dot(A.i, A.j)) == \
r"x \left(\mathbf{\hat{i}_{A}} \cdot \mathbf{\hat{j}_{A}}\right)"
assert latex(Gradient(A.x)) == r"\nabla \mathbf{{x}_{A}}"
assert latex(Gradient(A.x + 3*A.y)) == \
r"\nabla \left(\mathbf{{x}_{A}} + 3 \mathbf{{y}_{A}}\right)"
assert latex(x*Gradient(A.x)) == r"x \left(\nabla \mathbf{{x}_{A}}\right)"
assert latex(Gradient(x*A.x)) == r"\nabla \left(\mathbf{{x}_{A}} x\right)"
assert latex(Laplacian(A.x)) == r"\Delta \mathbf{{x}_{A}}"
assert latex(Laplacian(A.x + 3*A.y)) == \
r"\Delta \left(\mathbf{{x}_{A}} + 3 \mathbf{{y}_{A}}\right)"
assert latex(x*Laplacian(A.x)) == r"x \left(\Delta \mathbf{{x}_{A}}\right)"
assert latex(Laplacian(x*A.x)) == r"\Delta \left(\mathbf{{x}_{A}} x\right)"
def test_latex_symbols():
Gamma, lmbda, rho = symbols('Gamma, lambda, rho')
tau, Tau, TAU, taU = symbols('tau, Tau, TAU, taU')
assert latex(tau) == r"\tau"
assert latex(Tau) == r"\mathrm{T}"
assert latex(TAU) == r"\tau"
assert latex(taU) == r"\tau"
# Check that all capitalized greek letters are handled explicitly
capitalized_letters = {l.capitalize() for l in greek_letters_set}
assert len(capitalized_letters - set(tex_greek_dictionary.keys())) == 0
assert latex(Gamma + lmbda) == r"\Gamma + \lambda"
assert latex(Gamma * lmbda) == r"\Gamma \lambda"
assert latex(Symbol('q1')) == r"q_{1}"
assert latex(Symbol('q21')) == r"q_{21}"
assert latex(Symbol('epsilon0')) == r"\epsilon_{0}"
assert latex(Symbol('omega1')) == r"\omega_{1}"
assert latex(Symbol('91')) == r"91"
assert latex(Symbol('alpha_new')) == r"\alpha_{new}"
assert latex(Symbol('C^orig')) == r"C^{orig}"
assert latex(Symbol('x^alpha')) == r"x^{\alpha}"
assert latex(Symbol('beta^alpha')) == r"\beta^{\alpha}"
assert latex(Symbol('e^Alpha')) == r"e^{\mathrm{A}}"
assert latex(Symbol('omega_alpha^beta')) == r"\omega^{\beta}_{\alpha}"
assert latex(Symbol('omega') ** Symbol('beta')) == r"\omega^{\beta}"
@XFAIL
def test_latex_symbols_failing():
rho, mass, volume = symbols('rho, mass, volume')
assert latex(
volume * rho == mass) == r"\rho \mathrm{volume} = \mathrm{mass}"
assert latex(volume / mass * rho == 1) == \
r"\rho \mathrm{volume} {\mathrm{mass}}^{(-1)} = 1"
assert latex(mass**3 * volume**3) == \
r"{\mathrm{mass}}^{3} \cdot {\mathrm{volume}}^{3}"
@_both_exp_pow
def test_latex_functions():
assert latex(exp(x)) == r"e^{x}"
assert latex(exp(1) + exp(2)) == r"e + e^{2}"
f = Function('f')
assert latex(f(x)) == r'f{\left(x \right)}'
assert latex(f) == r'f'
g = Function('g')
assert latex(g(x, y)) == r'g{\left(x,y \right)}'
assert latex(g) == r'g'
h = Function('h')
assert latex(h(x, y, z)) == r'h{\left(x,y,z \right)}'
assert latex(h) == r'h'
Li = Function('Li')
assert latex(Li) == r'\operatorname{Li}'
assert latex(Li(x)) == r'\operatorname{Li}{\left(x \right)}'
mybeta = Function('beta')
# not to be confused with the beta function
assert latex(mybeta(x, y, z)) == r"\beta{\left(x,y,z \right)}"
assert latex(beta(x, y)) == r'\operatorname{B}\left(x, y\right)'
assert latex(beta(x, evaluate=False)) == r'\operatorname{B}\left(x, x\right)'
assert latex(beta(x, y)**2) == r'\operatorname{B}^{2}\left(x, y\right)'
assert latex(mybeta(x)) == r"\beta{\left(x \right)}"
assert latex(mybeta) == r"\beta"
g = Function('gamma')
# not to be confused with the gamma function
assert latex(g(x, y, z)) == r"\gamma{\left(x,y,z \right)}"
assert latex(g(x)) == r"\gamma{\left(x \right)}"
assert latex(g) == r"\gamma"
a_1 = Function('a_1')
assert latex(a_1) == r"a_{1}"
assert latex(a_1(x)) == r"a_{1}{\left(x \right)}"
assert latex(Function('a_1')) == r"a_{1}"
# Issue #16925
# multi letter function names
# > simple
assert latex(Function('ab')) == r"\operatorname{ab}"
assert latex(Function('ab1')) == r"\operatorname{ab}_{1}"
assert latex(Function('ab12')) == r"\operatorname{ab}_{12}"
assert latex(Function('ab_1')) == r"\operatorname{ab}_{1}"
assert latex(Function('ab_12')) == r"\operatorname{ab}_{12}"
assert latex(Function('ab_c')) == r"\operatorname{ab}_{c}"
assert latex(Function('ab_cd')) == r"\operatorname{ab}_{cd}"
# > with argument
assert latex(Function('ab')(Symbol('x'))) == r"\operatorname{ab}{\left(x \right)}"
assert latex(Function('ab1')(Symbol('x'))) == r"\operatorname{ab}_{1}{\left(x \right)}"
assert latex(Function('ab12')(Symbol('x'))) == r"\operatorname{ab}_{12}{\left(x \right)}"
assert latex(Function('ab_1')(Symbol('x'))) == r"\operatorname{ab}_{1}{\left(x \right)}"
assert latex(Function('ab_c')(Symbol('x'))) == r"\operatorname{ab}_{c}{\left(x \right)}"
assert latex(Function('ab_cd')(Symbol('x'))) == r"\operatorname{ab}_{cd}{\left(x \right)}"
# > with power
# does not work on functions without brackets
# > with argument and power combined
assert latex(Function('ab')()**2) == r"\operatorname{ab}^{2}{\left( \right)}"
assert latex(Function('ab1')()**2) == r"\operatorname{ab}_{1}^{2}{\left( \right)}"
assert latex(Function('ab12')()**2) == r"\operatorname{ab}_{12}^{2}{\left( \right)}"
assert latex(Function('ab_1')()**2) == r"\operatorname{ab}_{1}^{2}{\left( \right)}"
assert latex(Function('ab_12')()**2) == r"\operatorname{ab}_{12}^{2}{\left( \right)}"
assert latex(Function('ab')(Symbol('x'))**2) == r"\operatorname{ab}^{2}{\left(x \right)}"
assert latex(Function('ab1')(Symbol('x'))**2) == r"\operatorname{ab}_{1}^{2}{\left(x \right)}"
assert latex(Function('ab12')(Symbol('x'))**2) == r"\operatorname{ab}_{12}^{2}{\left(x \right)}"
assert latex(Function('ab_1')(Symbol('x'))**2) == r"\operatorname{ab}_{1}^{2}{\left(x \right)}"
assert latex(Function('ab_12')(Symbol('x'))**2) == \
r"\operatorname{ab}_{12}^{2}{\left(x \right)}"
# single letter function names
# > simple
assert latex(Function('a')) == r"a"
assert latex(Function('a1')) == r"a_{1}"
assert latex(Function('a12')) == r"a_{12}"
assert latex(Function('a_1')) == r"a_{1}"
assert latex(Function('a_12')) == r"a_{12}"
# > with argument
assert latex(Function('a')()) == r"a{\left( \right)}"
assert latex(Function('a1')()) == r"a_{1}{\left( \right)}"
assert latex(Function('a12')()) == r"a_{12}{\left( \right)}"
assert latex(Function('a_1')()) == r"a_{1}{\left( \right)}"
assert latex(Function('a_12')()) == r"a_{12}{\left( \right)}"
# > with power
# does not work on functions without brackets
# > with argument and power combined
assert latex(Function('a')()**2) == r"a^{2}{\left( \right)}"
assert latex(Function('a1')()**2) == r"a_{1}^{2}{\left( \right)}"
assert latex(Function('a12')()**2) == r"a_{12}^{2}{\left( \right)}"
assert latex(Function('a_1')()**2) == r"a_{1}^{2}{\left( \right)}"
assert latex(Function('a_12')()**2) == r"a_{12}^{2}{\left( \right)}"
assert latex(Function('a')(Symbol('x'))**2) == r"a^{2}{\left(x \right)}"
assert latex(Function('a1')(Symbol('x'))**2) == r"a_{1}^{2}{\left(x \right)}"
assert latex(Function('a12')(Symbol('x'))**2) == r"a_{12}^{2}{\left(x \right)}"
assert latex(Function('a_1')(Symbol('x'))**2) == r"a_{1}^{2}{\left(x \right)}"
assert latex(Function('a_12')(Symbol('x'))**2) == r"a_{12}^{2}{\left(x \right)}"
assert latex(Function('a')()**32) == r"a^{32}{\left( \right)}"
assert latex(Function('a1')()**32) == r"a_{1}^{32}{\left( \right)}"
assert latex(Function('a12')()**32) == r"a_{12}^{32}{\left( \right)}"
assert latex(Function('a_1')()**32) == r"a_{1}^{32}{\left( \right)}"
assert latex(Function('a_12')()**32) == r"a_{12}^{32}{\left( \right)}"
assert latex(Function('a')(Symbol('x'))**32) == r"a^{32}{\left(x \right)}"
assert latex(Function('a1')(Symbol('x'))**32) == r"a_{1}^{32}{\left(x \right)}"
assert latex(Function('a12')(Symbol('x'))**32) == r"a_{12}^{32}{\left(x \right)}"
assert latex(Function('a_1')(Symbol('x'))**32) == r"a_{1}^{32}{\left(x \right)}"
assert latex(Function('a_12')(Symbol('x'))**32) == r"a_{12}^{32}{\left(x \right)}"
assert latex(Function('a')()**a) == r"a^{a}{\left( \right)}"
assert latex(Function('a1')()**a) == r"a_{1}^{a}{\left( \right)}"
assert latex(Function('a12')()**a) == r"a_{12}^{a}{\left( \right)}"
assert latex(Function('a_1')()**a) == r"a_{1}^{a}{\left( \right)}"
assert latex(Function('a_12')()**a) == r"a_{12}^{a}{\left( \right)}"
assert latex(Function('a')(Symbol('x'))**a) == r"a^{a}{\left(x \right)}"
assert latex(Function('a1')(Symbol('x'))**a) == r"a_{1}^{a}{\left(x \right)}"
assert latex(Function('a12')(Symbol('x'))**a) == r"a_{12}^{a}{\left(x \right)}"
assert latex(Function('a_1')(Symbol('x'))**a) == r"a_{1}^{a}{\left(x \right)}"
assert latex(Function('a_12')(Symbol('x'))**a) == r"a_{12}^{a}{\left(x \right)}"
ab = Symbol('ab')
assert latex(Function('a')()**ab) == r"a^{ab}{\left( \right)}"
assert latex(Function('a1')()**ab) == r"a_{1}^{ab}{\left( \right)}"
assert latex(Function('a12')()**ab) == r"a_{12}^{ab}{\left( \right)}"
assert latex(Function('a_1')()**ab) == r"a_{1}^{ab}{\left( \right)}"
assert latex(Function('a_12')()**ab) == r"a_{12}^{ab}{\left( \right)}"
assert latex(Function('a')(Symbol('x'))**ab) == r"a^{ab}{\left(x \right)}"
assert latex(Function('a1')(Symbol('x'))**ab) == r"a_{1}^{ab}{\left(x \right)}"
assert latex(Function('a12')(Symbol('x'))**ab) == r"a_{12}^{ab}{\left(x \right)}"
assert latex(Function('a_1')(Symbol('x'))**ab) == r"a_{1}^{ab}{\left(x \right)}"
assert latex(Function('a_12')(Symbol('x'))**ab) == r"a_{12}^{ab}{\left(x \right)}"
assert latex(Function('a^12')(x)) == R"a^{12}{\left(x \right)}"
assert latex(Function('a^12')(x) ** ab) == R"\left(a^{12}\right)^{ab}{\left(x \right)}"
assert latex(Function('a__12')(x)) == R"a^{12}{\left(x \right)}"
assert latex(Function('a__12')(x) ** ab) == R"\left(a^{12}\right)^{ab}{\left(x \right)}"
assert latex(Function('a_1__1_2')(x)) == R"a^{1}_{1 2}{\left(x \right)}"
# issue 5868
omega1 = Function('omega1')
assert latex(omega1) == r"\omega_{1}"
assert latex(omega1(x)) == r"\omega_{1}{\left(x \right)}"
assert latex(sin(x)) == r"\sin{\left(x \right)}"
assert latex(sin(x), fold_func_brackets=True) == r"\sin {x}"
assert latex(sin(2*x**2), fold_func_brackets=True) == \
r"\sin {2 x^{2}}"
assert latex(sin(x**2), fold_func_brackets=True) == \
r"\sin {x^{2}}"
assert latex(asin(x)**2) == r"\operatorname{asin}^{2}{\left(x \right)}"
assert latex(asin(x)**2, inv_trig_style="full") == \
r"\arcsin^{2}{\left(x \right)}"
assert latex(asin(x)**2, inv_trig_style="power") == \
r"\sin^{-1}{\left(x \right)}^{2}"
assert latex(asin(x**2), inv_trig_style="power",
fold_func_brackets=True) == \
r"\sin^{-1} {x^{2}}"
assert latex(acsc(x), inv_trig_style="full") == \
r"\operatorname{arccsc}{\left(x \right)}"
assert latex(asinh(x), inv_trig_style="full") == \
r"\operatorname{arsinh}{\left(x \right)}"
assert latex(factorial(k)) == r"k!"
assert latex(factorial(-k)) == r"\left(- k\right)!"
assert latex(factorial(k)**2) == r"k!^{2}"
assert latex(subfactorial(k)) == r"!k"
assert latex(subfactorial(-k)) == r"!\left(- k\right)"
assert latex(subfactorial(k)**2) == r"\left(!k\right)^{2}"
assert latex(factorial2(k)) == r"k!!"
assert latex(factorial2(-k)) == r"\left(- k\right)!!"
assert latex(factorial2(k)**2) == r"k!!^{2}"
assert latex(binomial(2, k)) == r"{\binom{2}{k}}"
assert latex(binomial(2, k)**2) == r"{\binom{2}{k}}^{2}"
assert latex(FallingFactorial(3, k)) == r"{\left(3\right)}_{k}"
assert latex(RisingFactorial(3, k)) == r"{3}^{\left(k\right)}"
assert latex(floor(x)) == r"\left\lfloor{x}\right\rfloor"
assert latex(ceiling(x)) == r"\left\lceil{x}\right\rceil"
assert latex(frac(x)) == r"\operatorname{frac}{\left(x\right)}"
assert latex(floor(x)**2) == r"\left\lfloor{x}\right\rfloor^{2}"
assert latex(ceiling(x)**2) == r"\left\lceil{x}\right\rceil^{2}"
assert latex(frac(x)**2) == r"\operatorname{frac}{\left(x\right)}^{2}"
assert latex(Min(x, 2, x**3)) == r"\min\left(2, x, x^{3}\right)"
assert latex(Min(x, y)**2) == r"\min\left(x, y\right)^{2}"
assert latex(Max(x, 2, x**3)) == r"\max\left(2, x, x^{3}\right)"
assert latex(Max(x, y)**2) == r"\max\left(x, y\right)^{2}"
assert latex(Abs(x)) == r"\left|{x}\right|"
assert latex(Abs(x)**2) == r"\left|{x}\right|^{2}"
assert latex(re(x)) == r"\operatorname{re}{\left(x\right)}"
assert latex(re(x + y)) == \
r"\operatorname{re}{\left(x\right)} + \operatorname{re}{\left(y\right)}"
assert latex(im(x)) == r"\operatorname{im}{\left(x\right)}"
assert latex(conjugate(x)) == r"\overline{x}"
assert latex(conjugate(x)**2) == r"\overline{x}^{2}"
assert latex(conjugate(x**2)) == r"\overline{x}^{2}"
assert latex(gamma(x)) == r"\Gamma\left(x\right)"
w = Wild('w')
assert latex(gamma(w)) == r"\Gamma\left(w\right)"
assert latex(Order(x)) == r"O\left(x\right)"
assert latex(Order(x, x)) == r"O\left(x\right)"
assert latex(Order(x, (x, 0))) == r"O\left(x\right)"
assert latex(Order(x, (x, oo))) == r"O\left(x; x\rightarrow \infty\right)"
assert latex(Order(x - y, (x, y))) == \
r"O\left(x - y; x\rightarrow y\right)"
assert latex(Order(x, x, y)) == \
r"O\left(x; \left( x, \ y\right)\rightarrow \left( 0, \ 0\right)\right)"
assert latex(Order(x, x, y)) == \
r"O\left(x; \left( x, \ y\right)\rightarrow \left( 0, \ 0\right)\right)"
assert latex(Order(x, (x, oo), (y, oo))) == \
r"O\left(x; \left( x, \ y\right)\rightarrow \left( \infty, \ \infty\right)\right)"
assert latex(lowergamma(x, y)) == r'\gamma\left(x, y\right)'
assert latex(lowergamma(x, y)**2) == r'\gamma^{2}\left(x, y\right)'
assert latex(uppergamma(x, y)) == r'\Gamma\left(x, y\right)'
assert latex(uppergamma(x, y)**2) == r'\Gamma^{2}\left(x, y\right)'
assert latex(cot(x)) == r'\cot{\left(x \right)}'
assert latex(coth(x)) == r'\coth{\left(x \right)}'
assert latex(re(x)) == r'\operatorname{re}{\left(x\right)}'
assert latex(im(x)) == r'\operatorname{im}{\left(x\right)}'
assert latex(root(x, y)) == r'x^{\frac{1}{y}}'
assert latex(arg(x)) == r'\arg{\left(x \right)}'
assert latex(zeta(x)) == r"\zeta\left(x\right)"
assert latex(zeta(x)**2) == r"\zeta^{2}\left(x\right)"
assert latex(zeta(x, y)) == r"\zeta\left(x, y\right)"
assert latex(zeta(x, y)**2) == r"\zeta^{2}\left(x, y\right)"
assert latex(dirichlet_eta(x)) == r"\eta\left(x\right)"
assert latex(dirichlet_eta(x)**2) == r"\eta^{2}\left(x\right)"
assert latex(polylog(x, y)) == r"\operatorname{Li}_{x}\left(y\right)"
assert latex(
polylog(x, y)**2) == r"\operatorname{Li}_{x}^{2}\left(y\right)"
assert latex(lerchphi(x, y, n)) == r"\Phi\left(x, y, n\right)"
assert latex(lerchphi(x, y, n)**2) == r"\Phi^{2}\left(x, y, n\right)"
assert latex(stieltjes(x)) == r"\gamma_{x}"
assert latex(stieltjes(x)**2) == r"\gamma_{x}^{2}"
assert latex(stieltjes(x, y)) == r"\gamma_{x}\left(y\right)"
assert latex(stieltjes(x, y)**2) == r"\gamma_{x}\left(y\right)^{2}"
assert latex(elliptic_k(z)) == r"K\left(z\right)"
assert latex(elliptic_k(z)**2) == r"K^{2}\left(z\right)"
assert latex(elliptic_f(x, y)) == r"F\left(x\middle| y\right)"
assert latex(elliptic_f(x, y)**2) == r"F^{2}\left(x\middle| y\right)"
assert latex(elliptic_e(x, y)) == r"E\left(x\middle| y\right)"
assert latex(elliptic_e(x, y)**2) == r"E^{2}\left(x\middle| y\right)"
assert latex(elliptic_e(z)) == r"E\left(z\right)"
assert latex(elliptic_e(z)**2) == r"E^{2}\left(z\right)"
assert latex(elliptic_pi(x, y, z)) == r"\Pi\left(x; y\middle| z\right)"
assert latex(elliptic_pi(x, y, z)**2) == \
r"\Pi^{2}\left(x; y\middle| z\right)"
assert latex(elliptic_pi(x, y)) == r"\Pi\left(x\middle| y\right)"
assert latex(elliptic_pi(x, y)**2) == r"\Pi^{2}\left(x\middle| y\right)"
assert latex(Ei(x)) == r'\operatorname{Ei}{\left(x \right)}'
assert latex(Ei(x)**2) == r'\operatorname{Ei}^{2}{\left(x \right)}'
assert latex(expint(x, y)) == r'\operatorname{E}_{x}\left(y\right)'
assert latex(expint(x, y)**2) == r'\operatorname{E}_{x}^{2}\left(y\right)'
assert latex(Shi(x)**2) == r'\operatorname{Shi}^{2}{\left(x \right)}'
assert latex(Si(x)**2) == r'\operatorname{Si}^{2}{\left(x \right)}'
assert latex(Ci(x)**2) == r'\operatorname{Ci}^{2}{\left(x \right)}'
assert latex(Chi(x)**2) == r'\operatorname{Chi}^{2}\left(x\right)'
assert latex(Chi(x)) == r'\operatorname{Chi}\left(x\right)'
assert latex(jacobi(n, a, b, x)) == \
r'P_{n}^{\left(a,b\right)}\left(x\right)'
assert latex(jacobi(n, a, b, x)**2) == \
r'\left(P_{n}^{\left(a,b\right)}\left(x\right)\right)^{2}'
assert latex(gegenbauer(n, a, x)) == \
r'C_{n}^{\left(a\right)}\left(x\right)'
assert latex(gegenbauer(n, a, x)**2) == \
r'\left(C_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(chebyshevt(n, x)) == r'T_{n}\left(x\right)'
assert latex(chebyshevt(n, x)**2) == \
r'\left(T_{n}\left(x\right)\right)^{2}'
assert latex(chebyshevu(n, x)) == r'U_{n}\left(x\right)'
assert latex(chebyshevu(n, x)**2) == \
r'\left(U_{n}\left(x\right)\right)^{2}'
assert latex(legendre(n, x)) == r'P_{n}\left(x\right)'
assert latex(legendre(n, x)**2) == r'\left(P_{n}\left(x\right)\right)^{2}'
assert latex(assoc_legendre(n, a, x)) == \
r'P_{n}^{\left(a\right)}\left(x\right)'
assert latex(assoc_legendre(n, a, x)**2) == \
r'\left(P_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(laguerre(n, x)) == r'L_{n}\left(x\right)'
assert latex(laguerre(n, x)**2) == r'\left(L_{n}\left(x\right)\right)^{2}'
assert latex(assoc_laguerre(n, a, x)) == \
r'L_{n}^{\left(a\right)}\left(x\right)'
assert latex(assoc_laguerre(n, a, x)**2) == \
r'\left(L_{n}^{\left(a\right)}\left(x\right)\right)^{2}'
assert latex(hermite(n, x)) == r'H_{n}\left(x\right)'
assert latex(hermite(n, x)**2) == r'\left(H_{n}\left(x\right)\right)^{2}'
theta = Symbol("theta", real=True)
phi = Symbol("phi", real=True)
assert latex(Ynm(n, m, theta, phi)) == r'Y_{n}^{m}\left(\theta,\phi\right)'
assert latex(Ynm(n, m, theta, phi)**3) == \
r'\left(Y_{n}^{m}\left(\theta,\phi\right)\right)^{3}'
assert latex(Znm(n, m, theta, phi)) == r'Z_{n}^{m}\left(\theta,\phi\right)'
assert latex(Znm(n, m, theta, phi)**3) == \
r'\left(Z_{n}^{m}\left(\theta,\phi\right)\right)^{3}'
# Test latex printing of function names with "_"
assert latex(polar_lift(0)) == \
r"\operatorname{polar\_lift}{\left(0 \right)}"
assert latex(polar_lift(0)**3) == \
r"\operatorname{polar\_lift}^{3}{\left(0 \right)}"
assert latex(totient(n)) == r'\phi\left(n\right)'
assert latex(totient(n) ** 2) == r'\left(\phi\left(n\right)\right)^{2}'
assert latex(reduced_totient(n)) == r'\lambda\left(n\right)'
assert latex(reduced_totient(n) ** 2) == \
r'\left(\lambda\left(n\right)\right)^{2}'
assert latex(divisor_sigma(x)) == r"\sigma\left(x\right)"
assert latex(divisor_sigma(x)**2) == r"\sigma^{2}\left(x\right)"
assert latex(divisor_sigma(x, y)) == r"\sigma_y\left(x\right)"
assert latex(divisor_sigma(x, y)**2) == r"\sigma^{2}_y\left(x\right)"
assert latex(udivisor_sigma(x)) == r"\sigma^*\left(x\right)"
assert latex(udivisor_sigma(x)**2) == r"\sigma^*^{2}\left(x\right)"
assert latex(udivisor_sigma(x, y)) == r"\sigma^*_y\left(x\right)"
assert latex(udivisor_sigma(x, y)**2) == r"\sigma^*^{2}_y\left(x\right)"
assert latex(primenu(n)) == r'\nu\left(n\right)'
assert latex(primenu(n) ** 2) == r'\left(\nu\left(n\right)\right)^{2}'
assert latex(primeomega(n)) == r'\Omega\left(n\right)'
assert latex(primeomega(n) ** 2) == \
r'\left(\Omega\left(n\right)\right)^{2}'
assert latex(LambertW(n)) == r'W\left(n\right)'
assert latex(LambertW(n, -1)) == r'W_{-1}\left(n\right)'
assert latex(LambertW(n, k)) == r'W_{k}\left(n\right)'
assert latex(LambertW(n) * LambertW(n)) == r"W^{2}\left(n\right)"
assert latex(Pow(LambertW(n), 2)) == r"W^{2}\left(n\right)"
assert latex(LambertW(n)**k) == r"W^{k}\left(n\right)"
assert latex(LambertW(n, k)**p) == r"W^{p}_{k}\left(n\right)"
assert latex(Mod(x, 7)) == r'x \bmod 7'
assert latex(Mod(x + 1, 7)) == r'\left(x + 1\right) \bmod 7'
assert latex(Mod(7, x + 1)) == r'7 \bmod \left(x + 1\right)'
assert latex(Mod(2 * x, 7)) == r'2 x \bmod 7'
assert latex(Mod(7, 2 * x)) == r'7 \bmod 2 x'
assert latex(Mod(x, 7) + 1) == r'\left(x \bmod 7\right) + 1'
assert latex(2 * Mod(x, 7)) == r'2 \left(x \bmod 7\right)'
assert latex(Mod(7, 2 * x)**n) == r'\left(7 \bmod 2 x\right)^{n}'
# some unknown function name should get rendered with \operatorname
fjlkd = Function('fjlkd')
assert latex(fjlkd(x)) == r'\operatorname{fjlkd}{\left(x \right)}'
# even when it is referred to without an argument
assert latex(fjlkd) == r'\operatorname{fjlkd}'
# test that notation passes to subclasses of the same name only
def test_function_subclass_different_name():
class mygamma(gamma):
pass
assert latex(mygamma) == r"\operatorname{mygamma}"
assert latex(mygamma(x)) == r"\operatorname{mygamma}{\left(x \right)}"
def test_hyper_printing():
from sympy.abc import x, z
assert latex(meijerg(Tuple(pi, pi, x), Tuple(1),
(0, 1), Tuple(1, 2, 3/pi), z)) == \
r'{G_{4, 5}^{2, 3}\left(\begin{matrix} \pi, \pi, x & 1 \\0, 1 & 1, 2, '\
r'\frac{3}{\pi} \end{matrix} \middle| {z} \right)}'
assert latex(meijerg(Tuple(), Tuple(1), (0,), Tuple(), z)) == \
r'{G_{1, 1}^{1, 0}\left(\begin{matrix} & 1 \\0 & \end{matrix} \middle| {z} \right)}'
assert latex(hyper((x, 2), (3,), z)) == \
r'{{}_{2}F_{1}\left(\begin{matrix} 2, x ' \
r'\\ 3 \end{matrix}\middle| {z} \right)}'
assert latex(hyper(Tuple(), Tuple(1), z)) == \
r'{{}_{0}F_{1}\left(\begin{matrix} ' \
r'\\ 1 \end{matrix}\middle| {z} \right)}'
def test_latex_bessel():
from sympy.functions.special.bessel import (besselj, bessely, besseli,
besselk, hankel1, hankel2,
jn, yn, hn1, hn2)
from sympy.abc import z
assert latex(besselj(n, z**2)**k) == r'J^{k}_{n}\left(z^{2}\right)'
assert latex(bessely(n, z)) == r'Y_{n}\left(z\right)'
assert latex(besseli(n, z)) == r'I_{n}\left(z\right)'
assert latex(besselk(n, z)) == r'K_{n}\left(z\right)'
assert latex(hankel1(n, z**2)**2) == \
r'\left(H^{(1)}_{n}\left(z^{2}\right)\right)^{2}'
assert latex(hankel2(n, z)) == r'H^{(2)}_{n}\left(z\right)'
assert latex(jn(n, z)) == r'j_{n}\left(z\right)'
assert latex(yn(n, z)) == r'y_{n}\left(z\right)'
assert latex(hn1(n, z)) == r'h^{(1)}_{n}\left(z\right)'
assert latex(hn2(n, z)) == r'h^{(2)}_{n}\left(z\right)'
def test_latex_fresnel():
from sympy.functions.special.error_functions import (fresnels, fresnelc)
from sympy.abc import z
assert latex(fresnels(z)) == r'S\left(z\right)'
assert latex(fresnelc(z)) == r'C\left(z\right)'
assert latex(fresnels(z)**2) == r'S^{2}\left(z\right)'
assert latex(fresnelc(z)**2) == r'C^{2}\left(z\right)'
def test_latex_brackets():
assert latex((-1)**x) == r"\left(-1\right)^{x}"
def test_latex_indexed():
Psi_symbol = Symbol('Psi_0', complex=True, real=False)
Psi_indexed = IndexedBase(Symbol('Psi', complex=True, real=False))
symbol_latex = latex(Psi_symbol * conjugate(Psi_symbol))
indexed_latex = latex(Psi_indexed[0] * conjugate(Psi_indexed[0]))
# \\overline{{\\Psi}_{0}} {\\Psi}_{0} vs. \\Psi_{0} \\overline{\\Psi_{0}}
assert symbol_latex == r'\Psi_{0} \overline{\Psi_{0}}'
assert indexed_latex == r'\overline{{\Psi}_{0}} {\Psi}_{0}'
# Symbol('gamma') gives r'\gamma'
interval = '\\mathrel{..}\\nobreak '
assert latex(Indexed('x1', Symbol('i'))) == r'{x_{1}}_{i}'
assert latex(Indexed('x2', Idx('i'))) == r'{x_{2}}_{i}'
assert latex(Indexed('x3', Idx('i', Symbol('N')))) == r'{x_{3}}_{{i}_{0'+interval+'N - 1}}'
assert latex(Indexed('x3', Idx('i', Symbol('N')+1))) == r'{x_{3}}_{{i}_{0'+interval+'N}}'
assert latex(Indexed('x4', Idx('i', (Symbol('a'),Symbol('b'))))) == r'{x_{4}}_{{i}_{a'+interval+'b}}'
assert latex(IndexedBase('gamma')) == r'\gamma'
assert latex(IndexedBase('a b')) == r'a b'
assert latex(IndexedBase('a_b')) == r'a_{b}'
def test_latex_derivatives():
# regular "d" for ordinary derivatives
assert latex(diff(x**3, x, evaluate=False)) == \
r"\frac{d}{d x} x^{3}"
assert latex(diff(sin(x) + x**2, x, evaluate=False)) == \
r"\frac{d}{d x} \left(x^{2} + \sin{\left(x \right)}\right)"
assert latex(diff(diff(sin(x) + x**2, x, evaluate=False), evaluate=False))\
== \
r"\frac{d^{2}}{d x^{2}} \left(x^{2} + \sin{\left(x \right)}\right)"
assert latex(diff(diff(diff(sin(x) + x**2, x, evaluate=False), evaluate=False), evaluate=False)) == \
r"\frac{d^{3}}{d x^{3}} \left(x^{2} + \sin{\left(x \right)}\right)"
# \partial for partial derivatives
assert latex(diff(sin(x * y), x, evaluate=False)) == \
r"\frac{\partial}{\partial x} \sin{\left(x y \right)}"
assert latex(diff(sin(x * y) + x**2, x, evaluate=False)) == \
r"\frac{\partial}{\partial x} \left(x^{2} + \sin{\left(x y \right)}\right)"
assert latex(diff(diff(sin(x*y) + x**2, x, evaluate=False), x, evaluate=False)) == \
r"\frac{\partial^{2}}{\partial x^{2}} \left(x^{2} + \sin{\left(x y \right)}\right)"
assert latex(diff(diff(diff(sin(x*y) + x**2, x, evaluate=False), x, evaluate=False), x, evaluate=False)) == \
r"\frac{\partial^{3}}{\partial x^{3}} \left(x^{2} + \sin{\left(x y \right)}\right)"
# mixed partial derivatives
f = Function("f")
assert latex(diff(diff(f(x, y), x, evaluate=False), y, evaluate=False)) == \
r"\frac{\partial^{2}}{\partial y\partial x} " + latex(f(x, y))
assert latex(diff(diff(diff(f(x, y), x, evaluate=False), x, evaluate=False), y, evaluate=False)) == \
r"\frac{\partial^{3}}{\partial y\partial x^{2}} " + latex(f(x, y))
# for negative nested Derivative
assert latex(diff(-diff(y**2,x,evaluate=False),x,evaluate=False)) == r'\frac{d}{d x} \left(- \frac{d}{d x} y^{2}\right)'
assert latex(diff(diff(-diff(diff(y,x,evaluate=False),x,evaluate=False),x,evaluate=False),x,evaluate=False)) == \
r'\frac{d^{2}}{d x^{2}} \left(- \frac{d^{2}}{d x^{2}} y\right)'
# use ordinary d when one of the variables has been integrated out
assert latex(diff(Integral(exp(-x*y), (x, 0, oo)), y, evaluate=False)) == \
r"\frac{d}{d y} \int\limits_{0}^{\infty} e^{- x y}\, dx"
# Derivative wrapped in power:
assert latex(diff(x, x, evaluate=False)**2) == \
r"\left(\frac{d}{d x} x\right)^{2}"
assert latex(diff(f(x), x)**2) == \
r"\left(\frac{d}{d x} f{\left(x \right)}\right)^{2}"
assert latex(diff(f(x), (x, n))) == \
r"\frac{d^{n}}{d x^{n}} f{\left(x \right)}"
x1 = Symbol('x1')
x2 = Symbol('x2')
assert latex(diff(f(x1, x2), x1)) == r'\frac{\partial}{\partial x_{1}} f{\left(x_{1},x_{2} \right)}'
n1 = Symbol('n1')
assert latex(diff(f(x), (x, n1))) == r'\frac{d^{n_{1}}}{d x^{n_{1}}} f{\left(x \right)}'
n2 = Symbol('n2')
assert latex(diff(f(x), (x, Max(n1, n2)))) == \
r'\frac{d^{\max\left(n_{1}, n_{2}\right)}}{d x^{\max\left(n_{1}, n_{2}\right)}} f{\left(x \right)}'
# parenthesizing of the argument
g = Function("g")
# addition always parenthesized
for mul_symbol in (None, 'dot'):
assert latex(Derivative(f(x) + g(x), x), mul_symbol=mul_symbol) == \
r"\frac{d}{d x} \left(f{\left(x \right)} + g{\left(x \right)}\right)"
# multiplication parenthesized only if mul_symbol isn't None
assert latex(Derivative(f(x) * g(x), x)) == \
r"\frac{d}{d x} f{\left(x \right)} g{\left(x \right)}"
assert latex(Derivative(f(x) * g(x), x), mul_symbol='dot') == \
r"\frac{d}{d x} \left(f{\left(x \right)} \cdot g{\left(x \right)}\right)"
# set diff operator
assert latex(diff(f(x), x), diff_operator="rd") == r'\frac{\mathrm{d}}{\mathrm{d} x} f{\left(x \right)}'
def test_latex_subs():
assert latex(Subs(x*y, (x, y), (1, 2))) == r'\left. x y \right|_{\substack{ x=1\\ y=2 }}'
def test_latex_integrals():
assert latex(Integral(log(x), x)) == r"\int \log{\left(x \right)}\, dx"
assert latex(Integral(x**2, (x, 0, 1))) == \
r"\int\limits_{0}^{1} x^{2}\, dx"
assert latex(Integral(x**2, (x, 10, 20))) == \
r"\int\limits_{10}^{20} x^{2}\, dx"
assert latex(Integral(y*x**2, (x, 0, 1), y)) == \
r"\int\int\limits_{0}^{1} x^{2} y\, dx\, dy"
assert latex(Integral(y*x**2, (x, 0, 1), y), mode='equation*') == \
r"\begin{equation*}\int\int\limits_{0}^{1} x^{2} y\, dx\, dy\end{equation*}"
assert latex(Integral(y*x**2, (x, 0, 1), y), mode='equation*', itex=True) \
== r"$$\int\int_{0}^{1} x^{2} y\, dx\, dy$$"
assert latex(Integral(x, (x, 0))) == r"\int\limits^{0} x\, dx"
assert latex(Integral(x*y, x, y)) == r"\iint x y\, dx\, dy"
assert latex(Integral(x*y*z, x, y, z)) == r"\iiint x y z\, dx\, dy\, dz"
assert latex(Integral(x*y*z*t, x, y, z, t)) == \
r"\iiiint t x y z\, dx\, dy\, dz\, dt"
assert latex(Integral(x, x, x, x, x, x, x)) == \
r"\int\int\int\int\int\int x\, dx\, dx\, dx\, dx\, dx\, dx"
assert latex(Integral(x, x, y, (z, 0, 1))) == \
r"\int\limits_{0}^{1}\int\int x\, dx\, dy\, dz"
# for negative nested Integral
assert latex(Integral(-Integral(y**2,x),x)) == \
r'\int \left(- \int y^{2}\, dx\right)\, dx'
assert latex(Integral(-Integral(-Integral(y,x),x),x)) == \
r'\int \left(- \int \left(- \int y\, dx\right)\, dx\right)\, dx'
# fix issue #10806
assert latex(Integral(z, z)**2) == r"\left(\int z\, dz\right)^{2}"
assert latex(Integral(x + z, z)) == r"\int \left(x + z\right)\, dz"
assert latex(Integral(x+z/2, z)) == \
r"\int \left(x + \frac{z}{2}\right)\, dz"
assert latex(Integral(x**y, z)) == r"\int x^{y}\, dz"
# set diff operator
assert latex(Integral(x, x), diff_operator="rd") == r'\int x\, \mathrm{d}x'
assert latex(Integral(x, (x, 0, 1)), diff_operator="rd") == r'\int\limits_{0}^{1} x\, \mathrm{d}x'
def test_latex_sets():
for s in (frozenset, set):
assert latex(s([x*y, x**2])) == r"\left\{x^{2}, x y\right\}"
assert latex(s(range(1, 6))) == r"\left\{1, 2, 3, 4, 5\right\}"
assert latex(s(range(1, 13))) == \
r"\left\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\right\}"
s = FiniteSet
assert latex(s(*[x*y, x**2])) == r"\left\{x^{2}, x y\right\}"
assert latex(s(*range(1, 6))) == r"\left\{1, 2, 3, 4, 5\right\}"
assert latex(s(*range(1, 13))) == \
r"\left\{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12\right\}"
def test_latex_SetExpr():
iv = Interval(1, 3)
se = SetExpr(iv)
assert latex(se) == r"SetExpr\left(\left[1, 3\right]\right)"
def test_latex_Range():
assert latex(Range(1, 51)) == r'\left\{1, 2, \ldots, 50\right\}'
assert latex(Range(1, 4)) == r'\left\{1, 2, 3\right\}'
assert latex(Range(0, 3, 1)) == r'\left\{0, 1, 2\right\}'
assert latex(Range(0, 30, 1)) == r'\left\{0, 1, \ldots, 29\right\}'
assert latex(Range(30, 1, -1)) == r'\left\{30, 29, \ldots, 2\right\}'
assert latex(Range(0, oo, 2)) == r'\left\{0, 2, \ldots\right\}'
assert latex(Range(oo, -2, -2)) == r'\left\{\ldots, 2, 0\right\}'
assert latex(Range(-2, -oo, -1)) == r'\left\{-2, -3, \ldots\right\}'
assert latex(Range(-oo, oo)) == r'\left\{\ldots, -1, 0, 1, \ldots\right\}'
assert latex(Range(oo, -oo, -1)) == r'\left\{\ldots, 1, 0, -1, \ldots\right\}'
a, b, c = symbols('a:c')
assert latex(Range(a, b, c)) == r'\text{Range}\left(a, b, c\right)'
assert latex(Range(a, 10, 1)) == r'\text{Range}\left(a, 10\right)'
assert latex(Range(0, b, 1)) == r'\text{Range}\left(b\right)'
assert latex(Range(0, 10, c)) == r'\text{Range}\left(0, 10, c\right)'
i = Symbol('i', integer=True)
n = Symbol('n', negative=True, integer=True)
p = Symbol('p', positive=True, integer=True)
assert latex(Range(i, i + 3)) == r'\left\{i, i + 1, i + 2\right\}'
assert latex(Range(-oo, n, 2)) == r'\left\{\ldots, n - 4, n - 2\right\}'
assert latex(Range(p, oo)) == r'\left\{p, p + 1, \ldots\right\}'
# The following will work if __iter__ is improved
# assert latex(Range(-3, p + 7)) == r'\left\{-3, -2, \ldots, p + 6\right\}'
# Must have integer assumptions
assert latex(Range(a, a + 3)) == r'\text{Range}\left(a, a + 3\right)'
def test_latex_sequences():
s1 = SeqFormula(a**2, (0, oo))
s2 = SeqPer((1, 2))
latex_str = r'\left[0, 1, 4, 9, \ldots\right]'
assert latex(s1) == latex_str
latex_str = r'\left[1, 2, 1, 2, \ldots\right]'
assert latex(s2) == latex_str
s3 = SeqFormula(a**2, (0, 2))
s4 = SeqPer((1, 2), (0, 2))
latex_str = r'\left[0, 1, 4\right]'
assert latex(s3) == latex_str
latex_str = r'\left[1, 2, 1\right]'
assert latex(s4) == latex_str
s5 = SeqFormula(a**2, (-oo, 0))
s6 = SeqPer((1, 2), (-oo, 0))
latex_str = r'\left[\ldots, 9, 4, 1, 0\right]'
assert latex(s5) == latex_str
latex_str = r'\left[\ldots, 2, 1, 2, 1\right]'
assert latex(s6) == latex_str
latex_str = r'\left[1, 3, 5, 11, \ldots\right]'
assert latex(SeqAdd(s1, s2)) == latex_str
latex_str = r'\left[1, 3, 5\right]'
assert latex(SeqAdd(s3, s4)) == latex_str
latex_str = r'\left[\ldots, 11, 5, 3, 1\right]'
assert latex(SeqAdd(s5, s6)) == latex_str
latex_str = r'\left[0, 2, 4, 18, \ldots\right]'
assert latex(SeqMul(s1, s2)) == latex_str
latex_str = r'\left[0, 2, 4\right]'
assert latex(SeqMul(s3, s4)) == latex_str
latex_str = r'\left[\ldots, 18, 4, 2, 0\right]'
assert latex(SeqMul(s5, s6)) == latex_str
# Sequences with symbolic limits, issue 12629
s7 = SeqFormula(a**2, (a, 0, x))
latex_str = r'\left\{a^{2}\right\}_{a=0}^{x}'
assert latex(s7) == latex_str
b = Symbol('b')
s8 = SeqFormula(b*a**2, (a, 0, 2))
latex_str = r'\left[0, b, 4 b\right]'
assert latex(s8) == latex_str
def test_latex_FourierSeries():
latex_str = \
r'2 \sin{\left(x \right)} - \sin{\left(2 x \right)} + \frac{2 \sin{\left(3 x \right)}}{3} + \ldots'
assert latex(fourier_series(x, (x, -pi, pi))) == latex_str
def test_latex_FormalPowerSeries():
latex_str = r'\sum_{k=1}^{\infty} - \frac{\left(-1\right)^{- k} x^{k}}{k}'
assert latex(fps(log(1 + x))) == latex_str
def test_latex_intervals():
a = Symbol('a', real=True)
assert latex(Interval(0, 0)) == r"\left\{0\right\}"
assert latex(Interval(0, a)) == r"\left[0, a\right]"
assert latex(Interval(0, a, False, False)) == r"\left[0, a\right]"
assert latex(Interval(0, a, True, False)) == r"\left(0, a\right]"
assert latex(Interval(0, a, False, True)) == r"\left[0, a\right)"
assert latex(Interval(0, a, True, True)) == r"\left(0, a\right)"
def test_latex_AccumuBounds():
a = Symbol('a', real=True)
assert latex(AccumBounds(0, 1)) == r"\left\langle 0, 1\right\rangle"
assert latex(AccumBounds(0, a)) == r"\left\langle 0, a\right\rangle"
assert latex(AccumBounds(a + 1, a + 2)) == \
r"\left\langle a + 1, a + 2\right\rangle"
def test_latex_emptyset():
assert latex(S.EmptySet) == r"\emptyset"
def test_latex_universalset():
assert latex(S.UniversalSet) == r"\mathbb{U}"
def test_latex_commutator():
A = Operator('A')
B = Operator('B')
comm = Commutator(B, A)
assert latex(comm.doit()) == r"- (A B - B A)"
def test_latex_union():
assert latex(Union(Interval(0, 1), Interval(2, 3))) == \
r"\left[0, 1\right] \cup \left[2, 3\right]"
assert latex(Union(Interval(1, 1), Interval(2, 2), Interval(3, 4))) == \
r"\left\{1, 2\right\} \cup \left[3, 4\right]"
def test_latex_intersection():
assert latex(Intersection(Interval(0, 1), Interval(x, y))) == \
r"\left[0, 1\right] \cap \left[x, y\right]"
def test_latex_symmetric_difference():
assert latex(SymmetricDifference(Interval(2, 5), Interval(4, 7),
evaluate=False)) == \
r'\left[2, 5\right] \triangle \left[4, 7\right]'
def test_latex_Complement():
assert latex(Complement(S.Reals, S.Naturals)) == \
r"\mathbb{R} \setminus \mathbb{N}"
def test_latex_productset():
line = Interval(0, 1)
bigline = Interval(0, 10)
fset = FiniteSet(1, 2, 3)
assert latex(line**2) == r"%s^{2}" % latex(line)
assert latex(line**10) == r"%s^{10}" % latex(line)
assert latex((line * bigline * fset).flatten()) == r"%s \times %s \times %s" % (
latex(line), latex(bigline), latex(fset))
def test_latex_powerset():
fset = FiniteSet(1, 2, 3)
assert latex(PowerSet(fset)) == r'\mathcal{P}\left(\left\{1, 2, 3\right\}\right)'
def test_latex_ordinals():
w = OrdinalOmega()
assert latex(w) == r"\omega"
wp = OmegaPower(2, 3)
assert latex(wp) == r'3 \omega^{2}'
assert latex(Ordinal(wp, OmegaPower(1, 1))) == r'3 \omega^{2} + \omega'
assert latex(Ordinal(OmegaPower(2, 1), OmegaPower(1, 2))) == r'\omega^{2} + 2 \omega'
def test_set_operators_parenthesis():
a, b, c, d = symbols('a:d')
A = FiniteSet(a)
B = FiniteSet(b)
C = FiniteSet(c)
D = FiniteSet(d)
U1 = Union(A, B, evaluate=False)
U2 = Union(C, D, evaluate=False)
I1 = Intersection(A, B, evaluate=False)
I2 = Intersection(C, D, evaluate=False)
C1 = Complement(A, B, evaluate=False)
C2 = Complement(C, D, evaluate=False)
D1 = SymmetricDifference(A, B, evaluate=False)
D2 = SymmetricDifference(C, D, evaluate=False)
# XXX ProductSet does not support evaluate keyword
P1 = ProductSet(A, B)
P2 = ProductSet(C, D)
assert latex(Intersection(A, U2, evaluate=False)) == \
r'\left\{a\right\} \cap ' \
r'\left(\left\{c\right\} \cup \left\{d\right\}\right)'
assert latex(Intersection(U1, U2, evaluate=False)) == \
r'\left(\left\{a\right\} \cup \left\{b\right\}\right) ' \
r'\cap \left(\left\{c\right\} \cup \left\{d\right\}\right)'
assert latex(Intersection(C1, C2, evaluate=False)) == \
r'\left(\left\{a\right\} \setminus ' \
r'\left\{b\right\}\right) \cap \left(\left\{c\right\} ' \
r'\setminus \left\{d\right\}\right)'
assert latex(Intersection(D1, D2, evaluate=False)) == \
r'\left(\left\{a\right\} \triangle ' \
r'\left\{b\right\}\right) \cap \left(\left\{c\right\} ' \
r'\triangle \left\{d\right\}\right)'
assert latex(Intersection(P1, P2, evaluate=False)) == \
r'\left(\left\{a\right\} \times \left\{b\right\}\right) ' \
r'\cap \left(\left\{c\right\} \times ' \
r'\left\{d\right\}\right)'
assert latex(Union(A, I2, evaluate=False)) == \
r'\left\{a\right\} \cup ' \
r'\left(\left\{c\right\} \cap \left\{d\right\}\right)'
assert latex(Union(I1, I2, evaluate=False)) == \
r'\left(\left\{a\right\} \cap \left\{b\right\}\right) ' \
r'\cup \left(\left\{c\right\} \cap \left\{d\right\}\right)'
assert latex(Union(C1, C2, evaluate=False)) == \
r'\left(\left\{a\right\} \setminus ' \
r'\left\{b\right\}\right) \cup \left(\left\{c\right\} ' \
r'\setminus \left\{d\right\}\right)'
assert latex(Union(D1, D2, evaluate=False)) == \
r'\left(\left\{a\right\} \triangle ' \
r'\left\{b\right\}\right) \cup \left(\left\{c\right\} ' \
r'\triangle \left\{d\right\}\right)'
assert latex(Union(P1, P2, evaluate=False)) == \
r'\left(\left\{a\right\} \times \left\{b\right\}\right) ' \
r'\cup \left(\left\{c\right\} \times ' \
r'\left\{d\right\}\right)'
assert latex(Complement(A, C2, evaluate=False)) == \
r'\left\{a\right\} \setminus \left(\left\{c\right\} ' \
r'\setminus \left\{d\right\}\right)'
assert latex(Complement(U1, U2, evaluate=False)) == \
r'\left(\left\{a\right\} \cup \left\{b\right\}\right) ' \
r'\setminus \left(\left\{c\right\} \cup ' \
r'\left\{d\right\}\right)'
assert latex(Complement(I1, I2, evaluate=False)) == \
r'\left(\left\{a\right\} \cap \left\{b\right\}\right) ' \
r'\setminus \left(\left\{c\right\} \cap ' \
r'\left\{d\right\}\right)'
assert latex(Complement(D1, D2, evaluate=False)) == \
r'\left(\left\{a\right\} \triangle ' \
r'\left\{b\right\}\right) \setminus ' \
r'\left(\left\{c\right\} \triangle \left\{d\right\}\right)'
assert latex(Complement(P1, P2, evaluate=False)) == \
r'\left(\left\{a\right\} \times \left\{b\right\}\right) '\
r'\setminus \left(\left\{c\right\} \times '\
r'\left\{d\right\}\right)'
assert latex(SymmetricDifference(A, D2, evaluate=False)) == \
r'\left\{a\right\} \triangle \left(\left\{c\right\} ' \
r'\triangle \left\{d\right\}\right)'
assert latex(SymmetricDifference(U1, U2, evaluate=False)) == \
r'\left(\left\{a\right\} \cup \left\{b\right\}\right) ' \
r'\triangle \left(\left\{c\right\} \cup ' \
r'\left\{d\right\}\right)'
assert latex(SymmetricDifference(I1, I2, evaluate=False)) == \
r'\left(\left\{a\right\} \cap \left\{b\right\}\right) ' \
r'\triangle \left(\left\{c\right\} \cap ' \
r'\left\{d\right\}\right)'
assert latex(SymmetricDifference(C1, C2, evaluate=False)) == \
r'\left(\left\{a\right\} \setminus ' \
r'\left\{b\right\}\right) \triangle ' \
r'\left(\left\{c\right\} \setminus \left\{d\right\}\right)'
assert latex(SymmetricDifference(P1, P2, evaluate=False)) == \
r'\left(\left\{a\right\} \times \left\{b\right\}\right) ' \
r'\triangle \left(\left\{c\right\} \times ' \
r'\left\{d\right\}\right)'
# XXX This can be incorrect since cartesian product is not associative
assert latex(ProductSet(A, P2).flatten()) == \
r'\left\{a\right\} \times \left\{c\right\} \times ' \
r'\left\{d\right\}'
assert latex(ProductSet(U1, U2)) == \
r'\left(\left\{a\right\} \cup \left\{b\right\}\right) ' \
r'\times \left(\left\{c\right\} \cup ' \
r'\left\{d\right\}\right)'
assert latex(ProductSet(I1, I2)) == \
r'\left(\left\{a\right\} \cap \left\{b\right\}\right) ' \
r'\times \left(\left\{c\right\} \cap ' \
r'\left\{d\right\}\right)'
assert latex(ProductSet(C1, C2)) == \
r'\left(\left\{a\right\} \setminus ' \
r'\left\{b\right\}\right) \times \left(\left\{c\right\} ' \
r'\setminus \left\{d\right\}\right)'
assert latex(ProductSet(D1, D2)) == \
r'\left(\left\{a\right\} \triangle ' \
r'\left\{b\right\}\right) \times \left(\left\{c\right\} ' \
r'\triangle \left\{d\right\}\right)'
def test_latex_Complexes():
assert latex(S.Complexes) == r"\mathbb{C}"
def test_latex_Naturals():
assert latex(S.Naturals) == r"\mathbb{N}"
def test_latex_Naturals0():
assert latex(S.Naturals0) == r"\mathbb{N}_0"
def test_latex_Integers():
assert latex(S.Integers) == r"\mathbb{Z}"
def test_latex_ImageSet():
x = Symbol('x')
assert latex(ImageSet(Lambda(x, x**2), S.Naturals)) == \
r"\left\{x^{2}\; \middle|\; x \in \mathbb{N}\right\}"
y = Symbol('y')
imgset = ImageSet(Lambda((x, y), x + y), {1, 2, 3}, {3, 4})
assert latex(imgset) == \
r"\left\{x + y\; \middle|\; x \in \left\{1, 2, 3\right\}, y \in \left\{3, 4\right\}\right\}"
imgset = ImageSet(Lambda(((x, y),), x + y), ProductSet({1, 2, 3}, {3, 4}))
assert latex(imgset) == \
r"\left\{x + y\; \middle|\; \left( x, \ y\right) \in \left\{1, 2, 3\right\} \times \left\{3, 4\right\}\right\}"
def test_latex_ConditionSet():
x = Symbol('x')
assert latex(ConditionSet(x, Eq(x**2, 1), S.Reals)) == \
r"\left\{x\; \middle|\; x \in \mathbb{R} \wedge x^{2} = 1 \right\}"
assert latex(ConditionSet(x, Eq(x**2, 1), S.UniversalSet)) == \
r"\left\{x\; \middle|\; x^{2} = 1 \right\}"
def test_latex_ComplexRegion():
assert latex(ComplexRegion(Interval(3, 5)*Interval(4, 6))) == \
r"\left\{x + y i\; \middle|\; x, y \in \left[3, 5\right] \times \left[4, 6\right] \right\}"
assert latex(ComplexRegion(Interval(0, 1)*Interval(0, 2*pi), polar=True)) == \
r"\left\{r \left(i \sin{\left(\theta \right)} + \cos{\left(\theta "\
r"\right)}\right)\; \middle|\; r, \theta \in \left[0, 1\right] \times \left[0, 2 \pi\right) \right\}"
def test_latex_Contains():
x = Symbol('x')
assert latex(Contains(x, S.Naturals)) == r"x \in \mathbb{N}"
def test_latex_sum():
assert latex(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
r"\sum_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}"
assert latex(Sum(x**2, (x, -2, 2))) == \
r"\sum_{x=-2}^{2} x^{2}"
assert latex(Sum(x**2 + y, (x, -2, 2))) == \
r"\sum_{x=-2}^{2} \left(x^{2} + y\right)"
assert latex(Sum(x**2 + y, (x, -2, 2))**2) == \
r"\left(\sum_{x=-2}^{2} \left(x^{2} + y\right)\right)^{2}"
def test_latex_product():
assert latex(Product(x*y**2, (x, -2, 2), (y, -5, 5))) == \
r"\prod_{\substack{-2 \leq x \leq 2\\-5 \leq y \leq 5}} x y^{2}"
assert latex(Product(x**2, (x, -2, 2))) == \
r"\prod_{x=-2}^{2} x^{2}"
assert latex(Product(x**2 + y, (x, -2, 2))) == \
r"\prod_{x=-2}^{2} \left(x^{2} + y\right)"
assert latex(Product(x, (x, -2, 2))**2) == \
r"\left(\prod_{x=-2}^{2} x\right)^{2}"
def test_latex_limits():
assert latex(Limit(x, x, oo)) == r"\lim_{x \to \infty} x"
# issue 8175
f = Function('f')
assert latex(Limit(f(x), x, 0)) == r"\lim_{x \to 0^+} f{\left(x \right)}"
assert latex(Limit(f(x), x, 0, "-")) == \
r"\lim_{x \to 0^-} f{\left(x \right)}"
# issue #10806
assert latex(Limit(f(x), x, 0)**2) == \
r"\left(\lim_{x \to 0^+} f{\left(x \right)}\right)^{2}"
# bi-directional limit
assert latex(Limit(f(x), x, 0, dir='+-')) == \
r"\lim_{x \to 0} f{\left(x \right)}"
def test_latex_log():
assert latex(log(x)) == r"\log{\left(x \right)}"
assert latex(log(x), ln_notation=True) == r"\ln{\left(x \right)}"
assert latex(log(x) + log(y)) == \
r"\log{\left(x \right)} + \log{\left(y \right)}"
assert latex(log(x) + log(y), ln_notation=True) == \
r"\ln{\left(x \right)} + \ln{\left(y \right)}"
assert latex(pow(log(x), x)) == r"\log{\left(x \right)}^{x}"
assert latex(pow(log(x), x), ln_notation=True) == \
r"\ln{\left(x \right)}^{x}"
assert latex(log(x, y, evaluate=False)) == r"\log_y{\left(x \right)}"
assert latex(log(x, 10, evaluate=False)) == r"\log_{10}{\left(x \right)}"
assert latex(log(x, y, evaluate=False), ln_notation=True) == r"\log_y{\left(x \right)}"
assert latex(log(x, 10, evaluate=False), ln_notation=True) == r"\log_{10}{\left(x \right)}"
def test_issue_3568():
beta = Symbol(r'\beta')
y = beta + x
assert latex(y) in [r'\beta + x', r'x + \beta']
beta = Symbol(r'beta')
y = beta + x
assert latex(y) in [r'\beta + x', r'x + \beta']
def test_latex():
assert latex((2*tau)**Rational(7, 2)) == r"8 \sqrt{2} \tau^{\frac{7}{2}}"
assert latex((2*mu)**Rational(7, 2), mode='equation*') == \
r"\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}"
assert latex((2*mu)**Rational(7, 2), mode='equation', itex=True) == \
r"$$8 \sqrt{2} \mu^{\frac{7}{2}}$$"
assert latex([2/x, y]) == r"\left[ \frac{2}{x}, \ y\right]"
def test_latex_dict():
d = {Rational(1): 1, x**2: 2, x: 3, x**3: 4}
assert latex(d) == \
r'\left\{ 1 : 1, \ x : 3, \ x^{2} : 2, \ x^{3} : 4\right\}'
D = Dict(d)
assert latex(D) == \
r'\left\{ 1 : 1, \ x : 3, \ x^{2} : 2, \ x^{3} : 4\right\}'
def test_latex_list():
ll = [Symbol('omega1'), Symbol('a'), Symbol('alpha')]
assert latex(ll) == r'\left[ \omega_{1}, \ a, \ \alpha\right]'
def test_latex_NumberSymbols():
assert latex(S.Catalan) == "G"
assert latex(S.EulerGamma) == r"\gamma"
assert latex(S.Exp1) == "e"
assert latex(S.GoldenRatio) == r"\phi"
assert latex(S.Pi) == r"\pi"
assert latex(S.TribonacciConstant) == r"\text{TribonacciConstant}"
def test_latex_rational():
# tests issue 3973
assert latex(-Rational(1, 2)) == r"- \frac{1}{2}"
assert latex(Rational(-1, 2)) == r"- \frac{1}{2}"
assert latex(Rational(1, -2)) == r"- \frac{1}{2}"
assert latex(-Rational(-1, 2)) == r"\frac{1}{2}"
assert latex(-Rational(1, 2)*x) == r"- \frac{x}{2}"
assert latex(-Rational(1, 2)*x + Rational(-2, 3)*y) == \
r"- \frac{x}{2} - \frac{2 y}{3}"
def test_latex_inverse():
# tests issue 4129
assert latex(1/x) == r"\frac{1}{x}"
assert latex(1/(x + y)) == r"\frac{1}{x + y}"
def test_latex_DiracDelta():
assert latex(DiracDelta(x)) == r"\delta\left(x\right)"
assert latex(DiracDelta(x)**2) == r"\left(\delta\left(x\right)\right)^{2}"
assert latex(DiracDelta(x, 0)) == r"\delta\left(x\right)"
assert latex(DiracDelta(x, 5)) == \
r"\delta^{\left( 5 \right)}\left( x \right)"
assert latex(DiracDelta(x, 5)**2) == \
r"\left(\delta^{\left( 5 \right)}\left( x \right)\right)^{2}"
def test_latex_Heaviside():
assert latex(Heaviside(x)) == r"\theta\left(x\right)"
assert latex(Heaviside(x)**2) == r"\left(\theta\left(x\right)\right)^{2}"
def test_latex_KroneckerDelta():
assert latex(KroneckerDelta(x, y)) == r"\delta_{x y}"
assert latex(KroneckerDelta(x, y + 1)) == r"\delta_{x, y + 1}"
# issue 6578
assert latex(KroneckerDelta(x + 1, y)) == r"\delta_{y, x + 1}"
assert latex(Pow(KroneckerDelta(x, y), 2, evaluate=False)) == \
r"\left(\delta_{x y}\right)^{2}"
def test_latex_LeviCivita():
assert latex(LeviCivita(x, y, z)) == r"\varepsilon_{x y z}"
assert latex(LeviCivita(x, y, z)**2) == \
r"\left(\varepsilon_{x y z}\right)^{2}"
assert latex(LeviCivita(x, y, z + 1)) == r"\varepsilon_{x, y, z + 1}"
assert latex(LeviCivita(x, y + 1, z)) == r"\varepsilon_{x, y + 1, z}"
assert latex(LeviCivita(x + 1, y, z)) == r"\varepsilon_{x + 1, y, z}"
def test_mode():
expr = x + y
assert latex(expr) == r'x + y'
assert latex(expr, mode='plain') == r'x + y'
assert latex(expr, mode='inline') == r'$x + y$'
assert latex(
expr, mode='equation*') == r'\begin{equation*}x + y\end{equation*}'
assert latex(
expr, mode='equation') == r'\begin{equation}x + y\end{equation}'
raises(ValueError, lambda: latex(expr, mode='foo'))
def test_latex_mathieu():
assert latex(mathieuc(x, y, z)) == r"C\left(x, y, z\right)"
assert latex(mathieus(x, y, z)) == r"S\left(x, y, z\right)"
assert latex(mathieuc(x, y, z)**2) == r"C\left(x, y, z\right)^{2}"
assert latex(mathieus(x, y, z)**2) == r"S\left(x, y, z\right)^{2}"
assert latex(mathieucprime(x, y, z)) == r"C^{\prime}\left(x, y, z\right)"
assert latex(mathieusprime(x, y, z)) == r"S^{\prime}\left(x, y, z\right)"
assert latex(mathieucprime(x, y, z)**2) == r"C^{\prime}\left(x, y, z\right)^{2}"
assert latex(mathieusprime(x, y, z)**2) == r"S^{\prime}\left(x, y, z\right)^{2}"
def test_latex_Piecewise():
p = Piecewise((x, x < 1), (x**2, True))
assert latex(p) == r"\begin{cases} x & \text{for}\: x < 1 \\x^{2} &" \
r" \text{otherwise} \end{cases}"
assert latex(p, itex=True) == \
r"\begin{cases} x & \text{for}\: x \lt 1 \\x^{2} &" \
r" \text{otherwise} \end{cases}"
p = Piecewise((x, x < 0), (0, x >= 0))
assert latex(p) == r'\begin{cases} x & \text{for}\: x < 0 \\0 &' \
r' \text{otherwise} \end{cases}'
A, B = symbols("A B", commutative=False)
p = Piecewise((A**2, Eq(A, B)), (A*B, True))
s = r"\begin{cases} A^{2} & \text{for}\: A = B \\A B & \text{otherwise} \end{cases}"
assert latex(p) == s
assert latex(A*p) == r"A \left(%s\right)" % s
assert latex(p*A) == r"\left(%s\right) A" % s
assert latex(Piecewise((x, x < 1), (x**2, x < 2))) == \
r'\begin{cases} x & ' \
r'\text{for}\: x < 1 \\x^{2} & \text{for}\: x < 2 \end{cases}'
def test_latex_Matrix():
M = Matrix([[1 + x, y], [y, x - 1]])
assert latex(M) == \
r'\left[\begin{matrix}x + 1 & y\\y & x - 1\end{matrix}\right]'
assert latex(M, mode='inline') == \
r'$\left[\begin{smallmatrix}x + 1 & y\\' \
r'y & x - 1\end{smallmatrix}\right]$'
assert latex(M, mat_str='array') == \
r'\left[\begin{array}{cc}x + 1 & y\\y & x - 1\end{array}\right]'
assert latex(M, mat_str='bmatrix') == \
r'\left[\begin{bmatrix}x + 1 & y\\y & x - 1\end{bmatrix}\right]'
assert latex(M, mat_delim=None, mat_str='bmatrix') == \
r'\begin{bmatrix}x + 1 & y\\y & x - 1\end{bmatrix}'
M2 = Matrix(1, 11, range(11))
assert latex(M2) == \
r'\left[\begin{array}{ccccccccccc}' \
r'0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10\end{array}\right]'
def test_latex_matrix_with_functions():
t = symbols('t')
theta1 = symbols('theta1', cls=Function)
M = Matrix([[sin(theta1(t)), cos(theta1(t))],
[cos(theta1(t).diff(t)), sin(theta1(t).diff(t))]])
expected = (r'\left[\begin{matrix}\sin{\left('
r'\theta_{1}{\left(t \right)} \right)} & '
r'\cos{\left(\theta_{1}{\left(t \right)} \right)'
r'}\\\cos{\left(\frac{d}{d t} \theta_{1}{\left(t '
r'\right)} \right)} & \sin{\left(\frac{d}{d t} '
r'\theta_{1}{\left(t \right)} \right'
r')}\end{matrix}\right]')
assert latex(M) == expected
def test_latex_NDimArray():
x, y, z, w = symbols("x y z w")
for ArrayType in (ImmutableDenseNDimArray, ImmutableSparseNDimArray,
MutableDenseNDimArray, MutableSparseNDimArray):
# Basic: scalar array
M = ArrayType(x)
assert latex(M) == r"x"
M = ArrayType([[1 / x, y], [z, w]])
M1 = ArrayType([1 / x, y, z])
M2 = tensorproduct(M1, M)
M3 = tensorproduct(M, M)
assert latex(M) == \
r'\left[\begin{matrix}\frac{1}{x} & y\\z & w\end{matrix}\right]'
assert latex(M1) == \
r"\left[\begin{matrix}\frac{1}{x} & y & z\end{matrix}\right]"
assert latex(M2) == \
r"\left[\begin{matrix}" \
r"\left[\begin{matrix}\frac{1}{x^{2}} & \frac{y}{x}\\\frac{z}{x} & \frac{w}{x}\end{matrix}\right] & " \
r"\left[\begin{matrix}\frac{y}{x} & y^{2}\\y z & w y\end{matrix}\right] & " \
r"\left[\begin{matrix}\frac{z}{x} & y z\\z^{2} & w z\end{matrix}\right]" \
r"\end{matrix}\right]"
assert latex(M3) == \
r"""\left[\begin{matrix}"""\
r"""\left[\begin{matrix}\frac{1}{x^{2}} & \frac{y}{x}\\\frac{z}{x} & \frac{w}{x}\end{matrix}\right] & """\
r"""\left[\begin{matrix}\frac{y}{x} & y^{2}\\y z & w y\end{matrix}\right]\\"""\
r"""\left[\begin{matrix}\frac{z}{x} & y z\\z^{2} & w z\end{matrix}\right] & """\
r"""\left[\begin{matrix}\frac{w}{x} & w y\\w z & w^{2}\end{matrix}\right]"""\
r"""\end{matrix}\right]"""
Mrow = ArrayType([[x, y, 1/z]])
Mcolumn = ArrayType([[x], [y], [1/z]])
Mcol2 = ArrayType([Mcolumn.tolist()])
assert latex(Mrow) == \
r"\left[\left[\begin{matrix}x & y & \frac{1}{z}\end{matrix}\right]\right]"
assert latex(Mcolumn) == \
r"\left[\begin{matrix}x\\y\\\frac{1}{z}\end{matrix}\right]"
assert latex(Mcol2) == \
r'\left[\begin{matrix}\left[\begin{matrix}x\\y\\\frac{1}{z}\end{matrix}\right]\end{matrix}\right]'
def test_latex_mul_symbol():
assert latex(4*4**x, mul_symbol='times') == r"4 \times 4^{x}"
assert latex(4*4**x, mul_symbol='dot') == r"4 \cdot 4^{x}"
assert latex(4*4**x, mul_symbol='ldot') == r"4 \,.\, 4^{x}"
assert latex(4*x, mul_symbol='times') == r"4 \times x"
assert latex(4*x, mul_symbol='dot') == r"4 \cdot x"
assert latex(4*x, mul_symbol='ldot') == r"4 \,.\, x"
def test_latex_issue_4381():
y = 4*4**log(2)
assert latex(y) == r'4 \cdot 4^{\log{\left(2 \right)}}'
assert latex(1/y) == r'\frac{1}{4 \cdot 4^{\log{\left(2 \right)}}}'
def test_latex_issue_4576():
assert latex(Symbol("beta_13_2")) == r"\beta_{13 2}"
assert latex(Symbol("beta_132_20")) == r"\beta_{132 20}"
assert latex(Symbol("beta_13")) == r"\beta_{13}"
assert latex(Symbol("x_a_b")) == r"x_{a b}"
assert latex(Symbol("x_1_2_3")) == r"x_{1 2 3}"
assert latex(Symbol("x_a_b1")) == r"x_{a b1}"
assert latex(Symbol("x_a_1")) == r"x_{a 1}"
assert latex(Symbol("x_1_a")) == r"x_{1 a}"
assert latex(Symbol("x_1^aa")) == r"x^{aa}_{1}"
assert latex(Symbol("x_1__aa")) == r"x^{aa}_{1}"
assert latex(Symbol("x_11^a")) == r"x^{a}_{11}"
assert latex(Symbol("x_11__a")) == r"x^{a}_{11}"
assert latex(Symbol("x_a_a_a_a")) == r"x_{a a a a}"
assert latex(Symbol("x_a_a^a^a")) == r"x^{a a}_{a a}"
assert latex(Symbol("x_a_a__a__a")) == r"x^{a a}_{a a}"
assert latex(Symbol("alpha_11")) == r"\alpha_{11}"
assert latex(Symbol("alpha_11_11")) == r"\alpha_{11 11}"
assert latex(Symbol("alpha_alpha")) == r"\alpha_{\alpha}"
assert latex(Symbol("alpha^aleph")) == r"\alpha^{\aleph}"
assert latex(Symbol("alpha__aleph")) == r"\alpha^{\aleph}"
def test_latex_pow_fraction():
x = Symbol('x')
# Testing exp
assert r'e^{-x}' in latex(exp(-x)/2).replace(' ', '') # Remove Whitespace
# Testing e^{-x} in case future changes alter behavior of muls or fracs
# In particular current output is \frac{1}{2}e^{- x} but perhaps this will
# change to \frac{e^{-x}}{2}
# Testing general, non-exp, power
assert r'3^{-x}' in latex(3**-x/2).replace(' ', '')
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert latex(A*B*C**-1) == r"A B C^{-1}"
assert latex(C**-1*A*B) == r"C^{-1} A B"
assert latex(A*C**-1*B) == r"A C^{-1} B"
def test_latex_order():
expr = x**3 + x**2*y + y**4 + 3*x*y**3
assert latex(expr, order='lex') == r"x^{3} + x^{2} y + 3 x y^{3} + y^{4}"
assert latex(
expr, order='rev-lex') == r"y^{4} + 3 x y^{3} + x^{2} y + x^{3}"
assert latex(expr, order='none') == r"x^{3} + y^{4} + y x^{2} + 3 x y^{3}"
def test_latex_Lambda():
assert latex(Lambda(x, x + 1)) == r"\left( x \mapsto x + 1 \right)"
assert latex(Lambda((x, y), x + 1)) == r"\left( \left( x, \ y\right) \mapsto x + 1 \right)"
assert latex(Lambda(x, x)) == r"\left( x \mapsto x \right)"
def test_latex_PolyElement():
Ruv, u, v = ring("u,v", ZZ)
Rxyz, x, y, z = ring("x,y,z", Ruv)
assert latex(x - x) == r"0"
assert latex(x - 1) == r"x - 1"
assert latex(x + 1) == r"x + 1"
assert latex((u**2 + 3*u*v + 1)*x**2*y + u + 1) == \
r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + u + 1"
assert latex((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x) == \
r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + \left(u + 1\right) x"
assert latex((u**2 + 3*u*v + 1)*x**2*y + (u + 1)*x + 1) == \
r"\left({u}^{2} + 3 u v + 1\right) {x}^{2} y + \left(u + 1\right) x + 1"
assert latex((-u**2 + 3*u*v - 1)*x**2*y - (u + 1)*x - 1) == \
r"-\left({u}^{2} - 3 u v + 1\right) {x}^{2} y - \left(u + 1\right) x - 1"
assert latex(-(v**2 + v + 1)*x + 3*u*v + 1) == \
r"-\left({v}^{2} + v + 1\right) x + 3 u v + 1"
assert latex(-(v**2 + v + 1)*x - 3*u*v + 1) == \
r"-\left({v}^{2} + v + 1\right) x - 3 u v + 1"
def test_latex_FracElement():
Fuv, u, v = field("u,v", ZZ)
Fxyzt, x, y, z, t = field("x,y,z,t", Fuv)
assert latex(x - x) == r"0"
assert latex(x - 1) == r"x - 1"
assert latex(x + 1) == r"x + 1"
assert latex(x/3) == r"\frac{x}{3}"
assert latex(x/z) == r"\frac{x}{z}"
assert latex(x*y/z) == r"\frac{x y}{z}"
assert latex(x/(z*t)) == r"\frac{x}{z t}"
assert latex(x*y/(z*t)) == r"\frac{x y}{z t}"
assert latex((x - 1)/y) == r"\frac{x - 1}{y}"
assert latex((x + 1)/y) == r"\frac{x + 1}{y}"
assert latex((-x - 1)/y) == r"\frac{-x - 1}{y}"
assert latex((x + 1)/(y*z)) == r"\frac{x + 1}{y z}"
assert latex(-y/(x + 1)) == r"\frac{-y}{x + 1}"
assert latex(y*z/(x + 1)) == r"\frac{y z}{x + 1}"
assert latex(((u + 1)*x*y + 1)/((v - 1)*z - 1)) == \
r"\frac{\left(u + 1\right) x y + 1}{\left(v - 1\right) z - 1}"
assert latex(((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1)) == \
r"\frac{\left(u + 1\right) x y + 1}{\left(v - 1\right) z - u v t - 1}"
def test_latex_Poly():
assert latex(Poly(x**2 + 2 * x, x)) == \
r"\operatorname{Poly}{\left( x^{2} + 2 x, x, domain=\mathbb{Z} \right)}"
assert latex(Poly(x/y, x)) == \
r"\operatorname{Poly}{\left( \frac{1}{y} x, x, domain=\mathbb{Z}\left(y\right) \right)}"
assert latex(Poly(2.0*x + y)) == \
r"\operatorname{Poly}{\left( 2.0 x + 1.0 y, x, y, domain=\mathbb{R} \right)}"
def test_latex_Poly_order():
assert latex(Poly([a, 1, b, 2, c, 3], x)) == \
r'\operatorname{Poly}{\left( a x^{5} + x^{4} + b x^{3} + 2 x^{2} + c'\
r' x + 3, x, domain=\mathbb{Z}\left[a, b, c\right] \right)}'
assert latex(Poly([a, 1, b+c, 2, 3], x)) == \
r'\operatorname{Poly}{\left( a x^{4} + x^{3} + \left(b + c\right) '\
r'x^{2} + 2 x + 3, x, domain=\mathbb{Z}\left[a, b, c\right] \right)}'
assert latex(Poly(a*x**3 + x**2*y - x*y - c*y**3 - b*x*y**2 + y - a*x + b,
(x, y))) == \
r'\operatorname{Poly}{\left( a x^{3} + x^{2}y - b xy^{2} - xy - '\
r'a x - c y^{3} + y + b, x, y, domain=\mathbb{Z}\left[a, b, c\right] \right)}'
def test_latex_ComplexRootOf():
assert latex(rootof(x**5 + x + 3, 0)) == \
r"\operatorname{CRootOf} {\left(x^{5} + x + 3, 0\right)}"
def test_latex_RootSum():
assert latex(RootSum(x**5 + x + 3, sin)) == \
r"\operatorname{RootSum} {\left(x^{5} + x + 3, \left( x \mapsto \sin{\left(x \right)} \right)\right)}"
def test_settings():
raises(TypeError, lambda: latex(x*y, method="garbage"))
def test_latex_numbers():
assert latex(catalan(n)) == r"C_{n}"
assert latex(catalan(n)**2) == r"C_{n}^{2}"
assert latex(bernoulli(n)) == r"B_{n}"
assert latex(bernoulli(n, x)) == r"B_{n}\left(x\right)"
assert latex(bernoulli(n)**2) == r"B_{n}^{2}"
assert latex(bernoulli(n, x)**2) == r"B_{n}^{2}\left(x\right)"
assert latex(genocchi(n)) == r"G_{n}"
assert latex(genocchi(n, x)) == r"G_{n}\left(x\right)"
assert latex(genocchi(n)**2) == r"G_{n}^{2}"
assert latex(genocchi(n, x)**2) == r"G_{n}^{2}\left(x\right)"
assert latex(bell(n)) == r"B_{n}"
assert latex(bell(n, x)) == r"B_{n}\left(x\right)"
assert latex(bell(n, m, (x, y))) == r"B_{n, m}\left(x, y\right)"
assert latex(bell(n)**2) == r"B_{n}^{2}"
assert latex(bell(n, x)**2) == r"B_{n}^{2}\left(x\right)"
assert latex(bell(n, m, (x, y))**2) == r"B_{n, m}^{2}\left(x, y\right)"
assert latex(fibonacci(n)) == r"F_{n}"
assert latex(fibonacci(n, x)) == r"F_{n}\left(x\right)"
assert latex(fibonacci(n)**2) == r"F_{n}^{2}"
assert latex(fibonacci(n, x)**2) == r"F_{n}^{2}\left(x\right)"
assert latex(lucas(n)) == r"L_{n}"
assert latex(lucas(n)**2) == r"L_{n}^{2}"
assert latex(tribonacci(n)) == r"T_{n}"
assert latex(tribonacci(n, x)) == r"T_{n}\left(x\right)"
assert latex(tribonacci(n)**2) == r"T_{n}^{2}"
assert latex(tribonacci(n, x)**2) == r"T_{n}^{2}\left(x\right)"
assert latex(mobius(n)) == r"\mu\left(n\right)"
assert latex(mobius(n)**2) == r"\mu^{2}\left(n\right)"
def test_latex_euler():
assert latex(euler(n)) == r"E_{n}"
assert latex(euler(n, x)) == r"E_{n}\left(x\right)"
assert latex(euler(n, x)**2) == r"E_{n}^{2}\left(x\right)"
def test_lamda():
assert latex(Symbol('lamda')) == r"\lambda"
assert latex(Symbol('Lamda')) == r"\Lambda"
def test_custom_symbol_names():
x = Symbol('x')
y = Symbol('y')
assert latex(x) == r"x"
assert latex(x, symbol_names={x: "x_i"}) == r"x_i"
assert latex(x + y, symbol_names={x: "x_i"}) == r"x_i + y"
assert latex(x**2, symbol_names={x: "x_i"}) == r"x_i^{2}"
assert latex(x + y, symbol_names={x: "x_i", y: "y_j"}) == r"x_i + y_j"
def test_matAdd():
C = MatrixSymbol('C', 5, 5)
B = MatrixSymbol('B', 5, 5)
n = symbols("n")
h = MatrixSymbol("h", 1, 1)
assert latex(C - 2*B) in [r'- 2 B + C', r'C -2 B']
assert latex(C + 2*B) in [r'2 B + C', r'C + 2 B']
assert latex(B - 2*C) in [r'B - 2 C', r'- 2 C + B']
assert latex(B + 2*C) in [r'B + 2 C', r'2 C + B']
assert latex(n * h - (-h + h.T) * (h + h.T)) == 'n h - \\left(- h + h^{T}\\right) \\left(h + h^{T}\\right)'
assert latex(MatAdd(MatAdd(h, h), MatAdd(h, h))) == '\\left(h + h\\right) + \\left(h + h\\right)'
assert latex(MatMul(MatMul(h, h), MatMul(h, h))) == '\\left(h h\\right) \\left(h h\\right)'
def test_matMul():
A = MatrixSymbol('A', 5, 5)
B = MatrixSymbol('B', 5, 5)
x = Symbol('x')
assert latex(2*A) == r'2 A'
assert latex(2*x*A) == r'2 x A'
assert latex(-2*A) == r'- 2 A'
assert latex(1.5*A) == r'1.5 A'
assert latex(sqrt(2)*A) == r'\sqrt{2} A'
assert latex(-sqrt(2)*A) == r'- \sqrt{2} A'
assert latex(2*sqrt(2)*x*A) == r'2 \sqrt{2} x A'
assert latex(-2*A*(A + 2*B)) in [r'- 2 A \left(A + 2 B\right)',
r'- 2 A \left(2 B + A\right)']
def test_latex_MatrixSlice():
n = Symbol('n', integer=True)
x, y, z, w, t, = symbols('x y z w t')
X = MatrixSymbol('X', n, n)
Y = MatrixSymbol('Y', 10, 10)
Z = MatrixSymbol('Z', 10, 10)
assert latex(MatrixSlice(X, (None, None, None), (None, None, None))) == r'X\left[:, :\right]'
assert latex(X[x:x + 1, y:y + 1]) == r'X\left[x:x + 1, y:y + 1\right]'
assert latex(X[x:x + 1:2, y:y + 1:2]) == r'X\left[x:x + 1:2, y:y + 1:2\right]'
assert latex(X[:x, y:]) == r'X\left[:x, y:\right]'
assert latex(X[:x, y:]) == r'X\left[:x, y:\right]'
assert latex(X[x:, :y]) == r'X\left[x:, :y\right]'
assert latex(X[x:y, z:w]) == r'X\left[x:y, z:w\right]'
assert latex(X[x:y:t, w:t:x]) == r'X\left[x:y:t, w:t:x\right]'
assert latex(X[x::y, t::w]) == r'X\left[x::y, t::w\right]'
assert latex(X[:x:y, :t:w]) == r'X\left[:x:y, :t:w\right]'
assert latex(X[::x, ::y]) == r'X\left[::x, ::y\right]'
assert latex(MatrixSlice(X, (0, None, None), (0, None, None))) == r'X\left[:, :\right]'
assert latex(MatrixSlice(X, (None, n, None), (None, n, None))) == r'X\left[:, :\right]'
assert latex(MatrixSlice(X, (0, n, None), (0, n, None))) == r'X\left[:, :\right]'
assert latex(MatrixSlice(X, (0, n, 2), (0, n, 2))) == r'X\left[::2, ::2\right]'
assert latex(X[1:2:3, 4:5:6]) == r'X\left[1:2:3, 4:5:6\right]'
assert latex(X[1:3:5, 4:6:8]) == r'X\left[1:3:5, 4:6:8\right]'
assert latex(X[1:10:2]) == r'X\left[1:10:2, :\right]'
assert latex(Y[:5, 1:9:2]) == r'Y\left[:5, 1:9:2\right]'
assert latex(Y[:5, 1:10:2]) == r'Y\left[:5, 1::2\right]'
assert latex(Y[5, :5:2]) == r'Y\left[5:6, :5:2\right]'
assert latex(X[0:1, 0:1]) == r'X\left[:1, :1\right]'
assert latex(X[0:1:2, 0:1:2]) == r'X\left[:1:2, :1:2\right]'
assert latex((Y + Z)[2:, 2:]) == r'\left(Y + Z\right)\left[2:, 2:\right]'
def test_latex_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
from sympy.stats.rv import RandomDomain
X = Normal('x1', 0, 1)
assert latex(where(X > 0)) == r"\text{Domain: }0 < x_{1} \wedge x_{1} < \infty"
D = Die('d1', 6)
assert latex(where(D > 4)) == r"\text{Domain: }d_{1} = 5 \vee d_{1} = 6"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert latex(
pspace(Tuple(A, B)).domain) == \
r"\text{Domain: }0 \leq a \wedge 0 \leq b \wedge a < \infty \wedge b < \infty"
assert latex(RandomDomain(FiniteSet(x), FiniteSet(1, 2))) == \
r'\text{Domain: }\left\{x\right\} \in \left\{1, 2\right\}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert latex(F.convert(x/(x + y))) == latex(x/(x + y))
assert latex(R.convert(x + y)) == latex(x + y)
def test_integral_transforms():
x = Symbol("x")
k = Symbol("k")
f = Function("f")
a = Symbol("a")
b = Symbol("b")
assert latex(MellinTransform(f(x), x, k)) == \
r"\mathcal{M}_{x}\left[f{\left(x \right)}\right]\left(k\right)"
assert latex(InverseMellinTransform(f(k), k, x, a, b)) == \
r"\mathcal{M}^{-1}_{k}\left[f{\left(k \right)}\right]\left(x\right)"
assert latex(LaplaceTransform(f(x), x, k)) == \
r"\mathcal{L}_{x}\left[f{\left(x \right)}\right]\left(k\right)"
assert latex(InverseLaplaceTransform(f(k), k, x, (a, b))) == \
r"\mathcal{L}^{-1}_{k}\left[f{\left(k \right)}\right]\left(x\right)"
assert latex(FourierTransform(f(x), x, k)) == \
r"\mathcal{F}_{x}\left[f{\left(x \right)}\right]\left(k\right)"
assert latex(InverseFourierTransform(f(k), k, x)) == \
r"\mathcal{F}^{-1}_{k}\left[f{\left(k \right)}\right]\left(x\right)"
assert latex(CosineTransform(f(x), x, k)) == \
r"\mathcal{COS}_{x}\left[f{\left(x \right)}\right]\left(k\right)"
assert latex(InverseCosineTransform(f(k), k, x)) == \
r"\mathcal{COS}^{-1}_{k}\left[f{\left(k \right)}\right]\left(x\right)"
assert latex(SineTransform(f(x), x, k)) == \
r"\mathcal{SIN}_{x}\left[f{\left(x \right)}\right]\left(k\right)"
assert latex(InverseSineTransform(f(k), k, x)) == \
r"\mathcal{SIN}^{-1}_{k}\left[f{\left(k \right)}\right]\left(x\right)"
def test_PolynomialRingBase():
from sympy.polys.domains import QQ
assert latex(QQ.old_poly_ring(x, y)) == r"\mathbb{Q}\left[x, y\right]"
assert latex(QQ.old_poly_ring(x, y, order="ilex")) == \
r"S_<^{-1}\mathbb{Q}\left[x, y\right]"
def test_categories():
from sympy.categories import (Object, IdentityMorphism,
NamedMorphism, Category, Diagram,
DiagramGrid)
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A2, A3, "f2")
id_A1 = IdentityMorphism(A1)
K1 = Category("K1")
assert latex(A1) == r"A_{1}"
assert latex(f1) == r"f_{1}:A_{1}\rightarrow A_{2}"
assert latex(id_A1) == r"id:A_{1}\rightarrow A_{1}"
assert latex(f2*f1) == r"f_{2}\circ f_{1}:A_{1}\rightarrow A_{3}"
assert latex(K1) == r"\mathbf{K_{1}}"
d = Diagram()
assert latex(d) == r"\emptyset"
d = Diagram({f1: "unique", f2: S.EmptySet})
assert latex(d) == r"\left\{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \emptyset, \ id:A_{1}\rightarrow " \
r"A_{1} : \emptyset, \ id:A_{2}\rightarrow A_{2} : " \
r"\emptyset, \ id:A_{3}\rightarrow A_{3} : \emptyset, " \
r"\ f_{1}:A_{1}\rightarrow A_{2} : \left\{unique\right\}, " \
r"\ f_{2}:A_{2}\rightarrow A_{3} : \emptyset\right\}"
d = Diagram({f1: "unique", f2: S.EmptySet}, {f2 * f1: "unique"})
assert latex(d) == r"\left\{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \emptyset, \ id:A_{1}\rightarrow " \
r"A_{1} : \emptyset, \ id:A_{2}\rightarrow A_{2} : " \
r"\emptyset, \ id:A_{3}\rightarrow A_{3} : \emptyset, " \
r"\ f_{1}:A_{1}\rightarrow A_{2} : \left\{unique\right\}," \
r" \ f_{2}:A_{2}\rightarrow A_{3} : \emptyset\right\}" \
r"\Longrightarrow \left\{ f_{2}\circ f_{1}:A_{1}" \
r"\rightarrow A_{3} : \left\{unique\right\}\right\}"
# A linear diagram.
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d = Diagram([f, g])
grid = DiagramGrid(d)
assert latex(grid) == r"\begin{array}{cc}" + "\n" \
r"A & B \\" + "\n" \
r" & C " + "\n" \
r"\end{array}" + "\n"
def test_Modules():
from sympy.polys.domains import QQ
from sympy.polys.agca import homomorphism
R = QQ.old_poly_ring(x, y)
F = R.free_module(2)
M = F.submodule([x, y], [1, x**2])
assert latex(F) == r"{\mathbb{Q}\left[x, y\right]}^{2}"
assert latex(M) == \
r"\left\langle {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right\rangle"
I = R.ideal(x**2, y)
assert latex(I) == r"\left\langle {x^{2}},{y} \right\rangle"
Q = F / M
assert latex(Q) == \
r"\frac{{\mathbb{Q}\left[x, y\right]}^{2}}{\left\langle {\left[ {x},"\
r"{y} \right]},{\left[ {1},{x^{2}} \right]} \right\rangle}"
assert latex(Q.submodule([1, x**3/2], [2, y])) == \
r"\left\langle {{\left[ {1},{\frac{x^{3}}{2}} \right]} + {\left"\
r"\langle {\left[ {x},{y} \right]},{\left[ {1},{x^{2}} \right]} "\
r"\right\rangle}},{{\left[ {2},{y} \right]} + {\left\langle {\left[ "\
r"{x},{y} \right]},{\left[ {1},{x^{2}} \right]} \right\rangle}} \right\rangle"
h = homomorphism(QQ.old_poly_ring(x).free_module(2),
QQ.old_poly_ring(x).free_module(2), [0, 0])
assert latex(h) == \
r"{\left[\begin{matrix}0 & 0\\0 & 0\end{matrix}\right]} : "\
r"{{\mathbb{Q}\left[x\right]}^{2}} \to {{\mathbb{Q}\left[x\right]}^{2}}"
def test_QuotientRing():
from sympy.polys.domains import QQ
R = QQ.old_poly_ring(x)/[x**2 + 1]
assert latex(R) == \
r"\frac{\mathbb{Q}\left[x\right]}{\left\langle {x^{2} + 1} \right\rangle}"
assert latex(R.one) == r"{1} + {\left\langle {x^{2} + 1} \right\rangle}"
def test_Tr():
#TODO: Handle indices
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert latex(t) == r'\operatorname{tr}\left(A B\right)'
def test_Determinant():
from sympy.matrices import Determinant, Inverse, BlockMatrix, OneMatrix, ZeroMatrix
m = Matrix(((1, 2), (3, 4)))
assert latex(Determinant(m)) == '\\left|{\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}}\\right|'
assert latex(Determinant(Inverse(m))) == \
'\\left|{\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right]^{-1}}\\right|'
X = MatrixSymbol('X', 2, 2)
assert latex(Determinant(X)) == '\\left|{X}\\right|'
assert latex(Determinant(X + m)) == \
'\\left|{\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right] + X}\\right|'
assert latex(Determinant(BlockMatrix(((OneMatrix(2, 2), X),
(m, ZeroMatrix(2, 2)))))) == \
'\\left|{\\begin{matrix}1 & X\\\\\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right] & 0\\end{matrix}}\\right|'
def test_Adjoint():
from sympy.matrices import Adjoint, Inverse, Transpose
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(Adjoint(X)) == r'X^{\dagger}'
assert latex(Adjoint(X + Y)) == r'\left(X + Y\right)^{\dagger}'
assert latex(Adjoint(X) + Adjoint(Y)) == r'X^{\dagger} + Y^{\dagger}'
assert latex(Adjoint(X*Y)) == r'\left(X Y\right)^{\dagger}'
assert latex(Adjoint(Y)*Adjoint(X)) == r'Y^{\dagger} X^{\dagger}'
assert latex(Adjoint(X**2)) == r'\left(X^{2}\right)^{\dagger}'
assert latex(Adjoint(X)**2) == r'\left(X^{\dagger}\right)^{2}'
assert latex(Adjoint(Inverse(X))) == r'\left(X^{-1}\right)^{\dagger}'
assert latex(Inverse(Adjoint(X))) == r'\left(X^{\dagger}\right)^{-1}'
assert latex(Adjoint(Transpose(X))) == r'\left(X^{T}\right)^{\dagger}'
assert latex(Transpose(Adjoint(X))) == r'\left(X^{\dagger}\right)^{T}'
assert latex(Transpose(Adjoint(X) + Y)) == r'\left(X^{\dagger} + Y\right)^{T}'
m = Matrix(((1, 2), (3, 4)))
assert latex(Adjoint(m)) == '\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right]^{\\dagger}'
assert latex(Adjoint(m+X)) == \
'\\left(\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right] + X\\right)^{\\dagger}'
from sympy.matrices import BlockMatrix, OneMatrix, ZeroMatrix
assert latex(Adjoint(BlockMatrix(((OneMatrix(2, 2), X),
(m, ZeroMatrix(2, 2)))))) == \
'\\left[\\begin{matrix}1 & X\\\\\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right] & 0\\end{matrix}\\right]^{\\dagger}'
# Issue 20959
Mx = MatrixSymbol('M^x', 2, 2)
assert latex(Adjoint(Mx)) == r'\left(M^{x}\right)^{\dagger}'
# adjoint style
assert latex(Adjoint(X), adjoint_style="star") == r'X^{\ast}'
assert latex(Adjoint(X + Y), adjoint_style="hermitian") == r'\left(X + Y\right)^{\mathsf{H}}'
assert latex(Adjoint(X) + Adjoint(Y), adjoint_style="dagger") == r'X^{\dagger} + Y^{\dagger}'
assert latex(Adjoint(Y)*Adjoint(X)) == r'Y^{\dagger} X^{\dagger}'
assert latex(Adjoint(X**2), adjoint_style="star") == r'\left(X^{2}\right)^{\ast}'
assert latex(Adjoint(X)**2, adjoint_style="hermitian") == r'\left(X^{\mathsf{H}}\right)^{2}'
def test_Transpose():
from sympy.matrices import Transpose, MatPow, HadamardPower
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(Transpose(X)) == r'X^{T}'
assert latex(Transpose(X + Y)) == r'\left(X + Y\right)^{T}'
assert latex(Transpose(HadamardPower(X, 2))) == r'\left(X^{\circ {2}}\right)^{T}'
assert latex(HadamardPower(Transpose(X), 2)) == r'\left(X^{T}\right)^{\circ {2}}'
assert latex(Transpose(MatPow(X, 2))) == r'\left(X^{2}\right)^{T}'
assert latex(MatPow(Transpose(X), 2)) == r'\left(X^{T}\right)^{2}'
m = Matrix(((1, 2), (3, 4)))
assert latex(Transpose(m)) == '\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right]^{T}'
assert latex(Transpose(m+X)) == \
'\\left(\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right] + X\\right)^{T}'
from sympy.matrices import BlockMatrix, OneMatrix, ZeroMatrix
assert latex(Transpose(BlockMatrix(((OneMatrix(2, 2), X),
(m, ZeroMatrix(2, 2)))))) == \
'\\left[\\begin{matrix}1 & X\\\\\\left[\\begin{matrix}1 & 2\\\\3 & 4\\end{matrix}\\right] & 0\\end{matrix}\\right]^{T}'
# Issue 20959
Mx = MatrixSymbol('M^x', 2, 2)
assert latex(Transpose(Mx)) == r'\left(M^{x}\right)^{T}'
def test_Hadamard():
from sympy.matrices import HadamardProduct, HadamardPower
from sympy.matrices.expressions import MatAdd, MatMul, MatPow
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(HadamardProduct(X, Y*Y)) == r'X \circ Y^{2}'
assert latex(HadamardProduct(X, Y)*Y) == r'\left(X \circ Y\right) Y'
assert latex(HadamardPower(X, 2)) == r'X^{\circ {2}}'
assert latex(HadamardPower(X, -1)) == r'X^{\circ \left({-1}\right)}'
assert latex(HadamardPower(MatAdd(X, Y), 2)) == \
r'\left(X + Y\right)^{\circ {2}}'
assert latex(HadamardPower(MatMul(X, Y), 2)) == \
r'\left(X Y\right)^{\circ {2}}'
assert latex(HadamardPower(MatPow(X, -1), -1)) == \
r'\left(X^{-1}\right)^{\circ \left({-1}\right)}'
assert latex(MatPow(HadamardPower(X, -1), -1)) == \
r'\left(X^{\circ \left({-1}\right)}\right)^{-1}'
assert latex(HadamardPower(X, n+1)) == \
r'X^{\circ \left({n + 1}\right)}'
def test_MatPow():
from sympy.matrices.expressions import MatPow
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert latex(MatPow(X, 2)) == 'X^{2}'
assert latex(MatPow(X*X, 2)) == '\\left(X^{2}\\right)^{2}'
assert latex(MatPow(X*Y, 2)) == '\\left(X Y\\right)^{2}'
assert latex(MatPow(X + Y, 2)) == '\\left(X + Y\\right)^{2}'
assert latex(MatPow(X + X, 2)) == '\\left(2 X\\right)^{2}'
# Issue 20959
Mx = MatrixSymbol('M^x', 2, 2)
assert latex(MatPow(Mx, 2)) == r'\left(M^{x}\right)^{2}'
def test_ElementwiseApplyFunction():
X = MatrixSymbol('X', 2, 2)
expr = (X.T*X).applyfunc(sin)
assert latex(expr) == r"{\left( d \mapsto \sin{\left(d \right)} \right)}_{\circ}\left({X^{T} X}\right)"
expr = X.applyfunc(Lambda(x, 1/x))
assert latex(expr) == r'{\left( x \mapsto \frac{1}{x} \right)}_{\circ}\left({X}\right)'
def test_MatrixUnit():
from sympy.matrices.expressions.special import MatrixUnit
assert latex(MatrixUnit(3, 3, 1, 2), mat_symbol_style='plain') == 'E_{1,2}'
assert latex(MatrixUnit(3, 3, 1, 2), mat_symbol_style='bold') == r'\mathcal{E}_{1,2}'
def test_ZeroMatrix():
from sympy.matrices.expressions.special import ZeroMatrix
assert latex(ZeroMatrix(1, 1), mat_symbol_style='plain') == r"0"
assert latex(ZeroMatrix(1, 1), mat_symbol_style='bold') == r"\mathbf{0}"
def test_OneMatrix():
from sympy.matrices.expressions.special import OneMatrix
assert latex(OneMatrix(3, 4), mat_symbol_style='plain') == r"1"
assert latex(OneMatrix(3, 4), mat_symbol_style='bold') == r"\mathbf{1}"
def test_Identity():
from sympy.matrices.expressions.special import Identity
assert latex(Identity(1), mat_symbol_style='plain') == r"\mathbb{I}"
assert latex(Identity(1), mat_symbol_style='bold') == r"\mathbf{I}"
def test_latex_DFT_IDFT():
from sympy.matrices.expressions.fourier import DFT, IDFT
assert latex(DFT(13)) == r"\text{DFT}_{13}"
assert latex(IDFT(x)) == r"\text{IDFT}_{x}"
def test_boolean_args_order():
syms = symbols('a:f')
expr = And(*syms)
assert latex(expr) == r'a \wedge b \wedge c \wedge d \wedge e \wedge f'
expr = Or(*syms)
assert latex(expr) == r'a \vee b \vee c \vee d \vee e \vee f'
expr = Equivalent(*syms)
assert latex(expr) == \
r'a \Leftrightarrow b \Leftrightarrow c \Leftrightarrow d \Leftrightarrow e \Leftrightarrow f'
expr = Xor(*syms)
assert latex(expr) == \
r'a \veebar b \veebar c \veebar d \veebar e \veebar f'
def test_imaginary():
i = sqrt(-1)
assert latex(i) == r'i'
def test_builtins_without_args():
assert latex(sin) == r'\sin'
assert latex(cos) == r'\cos'
assert latex(tan) == r'\tan'
assert latex(log) == r'\log'
assert latex(Ei) == r'\operatorname{Ei}'
assert latex(zeta) == r'\zeta'
def test_latex_greek_functions():
# bug because capital greeks that have roman equivalents should not use
# \Alpha, \Beta, \Eta, etc.
s = Function('Alpha')
assert latex(s) == r'\mathrm{A}'
assert latex(s(x)) == r'\mathrm{A}{\left(x \right)}'
s = Function('Beta')
assert latex(s) == r'\mathrm{B}'
s = Function('Eta')
assert latex(s) == r'\mathrm{H}'
assert latex(s(x)) == r'\mathrm{H}{\left(x \right)}'
# bug because sympy.core.numbers.Pi is special
p = Function('Pi')
# assert latex(p(x)) == r'\Pi{\left(x \right)}'
assert latex(p) == r'\Pi'
# bug because not all greeks are included
c = Function('chi')
assert latex(c(x)) == r'\chi{\left(x \right)}'
assert latex(c) == r'\chi'
def test_translate():
s = 'Alpha'
assert translate(s) == r'\mathrm{A}'
s = 'Beta'
assert translate(s) == r'\mathrm{B}'
s = 'Eta'
assert translate(s) == r'\mathrm{H}'
s = 'omicron'
assert translate(s) == r'o'
s = 'Pi'
assert translate(s) == r'\Pi'
s = 'pi'
assert translate(s) == r'\pi'
s = 'LamdaHatDOT'
assert translate(s) == r'\dot{\hat{\Lambda}}'
def test_other_symbols():
from sympy.printing.latex import other_symbols
for s in other_symbols:
assert latex(symbols(s)) == r"" "\\" + s
def test_modifiers():
# Test each modifier individually in the simplest case
# (with funny capitalizations)
assert latex(symbols("xMathring")) == r"\mathring{x}"
assert latex(symbols("xCheck")) == r"\check{x}"
assert latex(symbols("xBreve")) == r"\breve{x}"
assert latex(symbols("xAcute")) == r"\acute{x}"
assert latex(symbols("xGrave")) == r"\grave{x}"
assert latex(symbols("xTilde")) == r"\tilde{x}"
assert latex(symbols("xPrime")) == r"{x}'"
assert latex(symbols("xddDDot")) == r"\ddddot{x}"
assert latex(symbols("xDdDot")) == r"\dddot{x}"
assert latex(symbols("xDDot")) == r"\ddot{x}"
assert latex(symbols("xBold")) == r"\boldsymbol{x}"
assert latex(symbols("xnOrM")) == r"\left\|{x}\right\|"
assert latex(symbols("xAVG")) == r"\left\langle{x}\right\rangle"
assert latex(symbols("xHat")) == r"\hat{x}"
assert latex(symbols("xDot")) == r"\dot{x}"
assert latex(symbols("xBar")) == r"\bar{x}"
assert latex(symbols("xVec")) == r"\vec{x}"
assert latex(symbols("xAbs")) == r"\left|{x}\right|"
assert latex(symbols("xMag")) == r"\left|{x}\right|"
assert latex(symbols("xPrM")) == r"{x}'"
assert latex(symbols("xBM")) == r"\boldsymbol{x}"
# Test strings that are *only* the names of modifiers
assert latex(symbols("Mathring")) == r"Mathring"
assert latex(symbols("Check")) == r"Check"
assert latex(symbols("Breve")) == r"Breve"
assert latex(symbols("Acute")) == r"Acute"
assert latex(symbols("Grave")) == r"Grave"
assert latex(symbols("Tilde")) == r"Tilde"
assert latex(symbols("Prime")) == r"Prime"
assert latex(symbols("DDot")) == r"\dot{D}"
assert latex(symbols("Bold")) == r"Bold"
assert latex(symbols("NORm")) == r"NORm"
assert latex(symbols("AVG")) == r"AVG"
assert latex(symbols("Hat")) == r"Hat"
assert latex(symbols("Dot")) == r"Dot"
assert latex(symbols("Bar")) == r"Bar"
assert latex(symbols("Vec")) == r"Vec"
assert latex(symbols("Abs")) == r"Abs"
assert latex(symbols("Mag")) == r"Mag"
assert latex(symbols("PrM")) == r"PrM"
assert latex(symbols("BM")) == r"BM"
assert latex(symbols("hbar")) == r"\hbar"
# Check a few combinations
assert latex(symbols("xvecdot")) == r"\dot{\vec{x}}"
assert latex(symbols("xDotVec")) == r"\vec{\dot{x}}"
assert latex(symbols("xHATNorm")) == r"\left\|{\hat{x}}\right\|"
# Check a couple big, ugly combinations
assert latex(symbols('xMathringBm_yCheckPRM__zbreveAbs')) == \
r"\boldsymbol{\mathring{x}}^{\left|{\breve{z}}\right|}_{{\check{y}}'}"
assert latex(symbols('alphadothat_nVECDOT__tTildePrime')) == \
r"\hat{\dot{\alpha}}^{{\tilde{t}}'}_{\dot{\vec{n}}}"
def test_greek_symbols():
assert latex(Symbol('alpha')) == r'\alpha'
assert latex(Symbol('beta')) == r'\beta'
assert latex(Symbol('gamma')) == r'\gamma'
assert latex(Symbol('delta')) == r'\delta'
assert latex(Symbol('epsilon')) == r'\epsilon'
assert latex(Symbol('zeta')) == r'\zeta'
assert latex(Symbol('eta')) == r'\eta'
assert latex(Symbol('theta')) == r'\theta'
assert latex(Symbol('iota')) == r'\iota'
assert latex(Symbol('kappa')) == r'\kappa'
assert latex(Symbol('lambda')) == r'\lambda'
assert latex(Symbol('mu')) == r'\mu'
assert latex(Symbol('nu')) == r'\nu'
assert latex(Symbol('xi')) == r'\xi'
assert latex(Symbol('omicron')) == r'o'
assert latex(Symbol('pi')) == r'\pi'
assert latex(Symbol('rho')) == r'\rho'
assert latex(Symbol('sigma')) == r'\sigma'
assert latex(Symbol('tau')) == r'\tau'
assert latex(Symbol('upsilon')) == r'\upsilon'
assert latex(Symbol('phi')) == r'\phi'
assert latex(Symbol('chi')) == r'\chi'
assert latex(Symbol('psi')) == r'\psi'
assert latex(Symbol('omega')) == r'\omega'
assert latex(Symbol('Alpha')) == r'\mathrm{A}'
assert latex(Symbol('Beta')) == r'\mathrm{B}'
assert latex(Symbol('Gamma')) == r'\Gamma'
assert latex(Symbol('Delta')) == r'\Delta'
assert latex(Symbol('Epsilon')) == r'\mathrm{E}'
assert latex(Symbol('Zeta')) == r'\mathrm{Z}'
assert latex(Symbol('Eta')) == r'\mathrm{H}'
assert latex(Symbol('Theta')) == r'\Theta'
assert latex(Symbol('Iota')) == r'\mathrm{I}'
assert latex(Symbol('Kappa')) == r'\mathrm{K}'
assert latex(Symbol('Lambda')) == r'\Lambda'
assert latex(Symbol('Mu')) == r'\mathrm{M}'
assert latex(Symbol('Nu')) == r'\mathrm{N}'
assert latex(Symbol('Xi')) == r'\Xi'
assert latex(Symbol('Omicron')) == r'\mathrm{O}'
assert latex(Symbol('Pi')) == r'\Pi'
assert latex(Symbol('Rho')) == r'\mathrm{P}'
assert latex(Symbol('Sigma')) == r'\Sigma'
assert latex(Symbol('Tau')) == r'\mathrm{T}'
assert latex(Symbol('Upsilon')) == r'\Upsilon'
assert latex(Symbol('Phi')) == r'\Phi'
assert latex(Symbol('Chi')) == r'\mathrm{X}'
assert latex(Symbol('Psi')) == r'\Psi'
assert latex(Symbol('Omega')) == r'\Omega'
assert latex(Symbol('varepsilon')) == r'\varepsilon'
assert latex(Symbol('varkappa')) == r'\varkappa'
assert latex(Symbol('varphi')) == r'\varphi'
assert latex(Symbol('varpi')) == r'\varpi'
assert latex(Symbol('varrho')) == r'\varrho'
assert latex(Symbol('varsigma')) == r'\varsigma'
assert latex(Symbol('vartheta')) == r'\vartheta'
def test_fancyset_symbols():
assert latex(S.Rationals) == r'\mathbb{Q}'
assert latex(S.Naturals) == r'\mathbb{N}'
assert latex(S.Naturals0) == r'\mathbb{N}_0'
assert latex(S.Integers) == r'\mathbb{Z}'
assert latex(S.Reals) == r'\mathbb{R}'
assert latex(S.Complexes) == r'\mathbb{C}'
@XFAIL
def test_builtin_without_args_mismatched_names():
assert latex(CosineTransform) == r'\mathcal{COS}'
def test_builtin_no_args():
assert latex(Chi) == r'\operatorname{Chi}'
assert latex(beta) == r'\operatorname{B}'
assert latex(gamma) == r'\Gamma'
assert latex(KroneckerDelta) == r'\delta'
assert latex(DiracDelta) == r'\delta'
assert latex(lowergamma) == r'\gamma'
def test_issue_6853():
p = Function('Pi')
assert latex(p(x)) == r"\Pi{\left(x \right)}"
def test_Mul():
e = Mul(-2, x + 1, evaluate=False)
assert latex(e) == r'- 2 \left(x + 1\right)'
e = Mul(2, x + 1, evaluate=False)
assert latex(e) == r'2 \left(x + 1\right)'
e = Mul(S.Half, x + 1, evaluate=False)
assert latex(e) == r'\frac{x + 1}{2}'
e = Mul(y, x + 1, evaluate=False)
assert latex(e) == r'y \left(x + 1\right)'
e = Mul(-y, x + 1, evaluate=False)
assert latex(e) == r'- y \left(x + 1\right)'
e = Mul(-2, x + 1)
assert latex(e) == r'- 2 x - 2'
e = Mul(2, x + 1)
assert latex(e) == r'2 x + 2'
def test_Pow():
e = Pow(2, 2, evaluate=False)
assert latex(e) == r'2^{2}'
assert latex(x**(Rational(-1, 3))) == r'\frac{1}{\sqrt[3]{x}}'
x2 = Symbol(r'x^2')
assert latex(x2**2) == r'\left(x^{2}\right)^{2}'
# Issue 11011
assert latex(S('1.453e4500')**x) == r'{1.453 \cdot 10^{4500}}^{x}'
def test_issue_7180():
assert latex(Equivalent(x, y)) == r"x \Leftrightarrow y"
assert latex(Not(Equivalent(x, y))) == r"x \not\Leftrightarrow y"
def test_issue_8409():
assert latex(S.Half**n) == r"\left(\frac{1}{2}\right)^{n}"
def test_issue_8470():
from sympy.parsing.sympy_parser import parse_expr
e = parse_expr("-B*A", evaluate=False)
assert latex(e) == r"A \left(- B\right)"
def test_issue_15439():
x = MatrixSymbol('x', 2, 2)
y = MatrixSymbol('y', 2, 2)
assert latex((x * y).subs(y, -y)) == r"x \left(- y\right)"
assert latex((x * y).subs(y, -2*y)) == r"x \left(- 2 y\right)"
assert latex((x * y).subs(x, -x)) == r"\left(- x\right) y"
def test_issue_2934():
assert latex(Symbol(r'\frac{a_1}{b_1}')) == r'\frac{a_1}{b_1}'
def test_issue_10489():
latexSymbolWithBrace = r'C_{x_{0}}'
s = Symbol(latexSymbolWithBrace)
assert latex(s) == latexSymbolWithBrace
assert latex(cos(s)) == r'\cos{\left(C_{x_{0}} \right)}'
def test_issue_12886():
m__1, l__1 = symbols('m__1, l__1')
assert latex(m__1**2 + l__1**2) == \
r'\left(l^{1}\right)^{2} + \left(m^{1}\right)^{2}'
def test_issue_13559():
from sympy.parsing.sympy_parser import parse_expr
expr = parse_expr('5/1', evaluate=False)
assert latex(expr) == r"\frac{5}{1}"
def test_issue_13651():
expr = c + Mul(-1, a + b, evaluate=False)
assert latex(expr) == r"c - \left(a + b\right)"
def test_latex_UnevaluatedExpr():
x = symbols("x")
he = UnevaluatedExpr(1/x)
assert latex(he) == latex(1/x) == r"\frac{1}{x}"
assert latex(he**2) == r"\left(\frac{1}{x}\right)^{2}"
assert latex(he + 1) == r"1 + \frac{1}{x}"
assert latex(x*he) == r"x \frac{1}{x}"
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert latex(A[0, 0]) == r"{A}_{0,0}"
assert latex(3 * A[0, 0]) == r"3 {A}_{0,0}"
F = C[0, 0].subs(C, A - B)
assert latex(F) == r"{\left(A - B\right)}_{0,0}"
i, j, k = symbols("i j k")
M = MatrixSymbol("M", k, k)
N = MatrixSymbol("N", k, k)
assert latex((M*N)[i, j]) == \
r'\sum_{i_{1}=0}^{k - 1} {M}_{i,i_{1}} {N}_{i_{1},j}'
X_a = MatrixSymbol('X_a', 3, 3)
assert latex(X_a[0, 0]) == r"{X_{a}}_{0,0}"
def test_MatrixSymbol_printing():
# test cases for issue #14237
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
assert latex(-A) == r"- A"
assert latex(A - A*B - B) == r"A - A B - B"
assert latex(-A*B - A*B*C - B) == r"- A B - A B C - B"
def test_DotProduct_printing():
X = MatrixSymbol('X', 3, 1)
Y = MatrixSymbol('Y', 3, 1)
a = Symbol('a')
assert latex(DotProduct(X, Y)) == r"X \cdot Y"
assert latex(DotProduct(a * X, Y)) == r"a X \cdot Y"
assert latex(a * DotProduct(X, Y)) == r"a \left(X \cdot Y\right)"
def test_KroneckerProduct_printing():
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 2, 2)
assert latex(KroneckerProduct(A, B)) == r'A \otimes B'
def test_Series_printing():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
tf3 = TransferFunction(t*x**2 - t**w*x + w, t - y, y)
assert latex(Series(tf1, tf2)) == \
r'\left(\frac{x y^{2} - z}{- t^{3} + y^{3}}\right) \left(\frac{x - y}{x + y}\right)'
assert latex(Series(tf1, tf2, tf3)) == \
r'\left(\frac{x y^{2} - z}{- t^{3} + y^{3}}\right) \left(\frac{x - y}{x + y}\right) \left(\frac{t x^{2} - t^{w} x + w}{t - y}\right)'
assert latex(Series(-tf2, tf1)) == \
r'\left(\frac{- x + y}{x + y}\right) \left(\frac{x y^{2} - z}{- t^{3} + y^{3}}\right)'
M_1 = Matrix([[5/s], [5/(2*s)]])
T_1 = TransferFunctionMatrix.from_Matrix(M_1, s)
M_2 = Matrix([[5, 6*s**3]])
T_2 = TransferFunctionMatrix.from_Matrix(M_2, s)
# Brackets
assert latex(T_1*(T_2 + T_2)) == \
r'\left[\begin{matrix}\frac{5}{s}\\\frac{5}{2 s}\end{matrix}\right]_\tau\cdot\left(\left[\begin{matrix}\frac{5}{1} &' \
r' \frac{6 s^{3}}{1}\end{matrix}\right]_\tau + \left[\begin{matrix}\frac{5}{1} & \frac{6 s^{3}}{1}\end{matrix}\right]_\tau\right)' \
== latex(MIMOSeries(MIMOParallel(T_2, T_2), T_1))
# No Brackets
M_3 = Matrix([[5, 6], [6, 5/s]])
T_3 = TransferFunctionMatrix.from_Matrix(M_3, s)
assert latex(T_1*T_2 + T_3) == r'\left[\begin{matrix}\frac{5}{s}\\\frac{5}{2 s}\end{matrix}\right]_\tau\cdot\left[\begin{matrix}' \
r'\frac{5}{1} & \frac{6 s^{3}}{1}\end{matrix}\right]_\tau + \left[\begin{matrix}\frac{5}{1} & \frac{6}{1}\\\frac{6}{1} & ' \
r'\frac{5}{s}\end{matrix}\right]_\tau' == latex(MIMOParallel(MIMOSeries(T_2, T_1), T_3))
def test_TransferFunction_printing():
tf1 = TransferFunction(x - 1, x + 1, x)
assert latex(tf1) == r"\frac{x - 1}{x + 1}"
tf2 = TransferFunction(x + 1, 2 - y, x)
assert latex(tf2) == r"\frac{x + 1}{2 - y}"
tf3 = TransferFunction(y, y**2 + 2*y + 3, y)
assert latex(tf3) == r"\frac{y}{y^{2} + 2 y + 3}"
def test_DiscreteTransferFunction_printing():
tf1 = DiscreteTransferFunction(x - 1, x + 1, x)
assert latex(tf1) == r"\frac{x - 1}{x + 1} \text{ [st: } {1} \text{]}"
tf2 = DiscreteTransferFunction(x + 1, 2 - y, x, Symbol('T'))
assert latex(tf2) == r"\frac{x + 1}{2 - y} \text{ [st: } {T} \text{]}"
tf3 = DiscreteTransferFunction(y, y**2 + 2*y + 3, y, 0.1)
assert latex(tf3) == \
r"\frac{y}{y^{2} + 2 y + 3} \text{ [st: } {0.1} \text{]}"
def test_Parallel_printing():
tf1 = TransferFunction(x*y**2 - z, y**3 - t**3, y)
tf2 = TransferFunction(x - y, x + y, y)
assert latex(Parallel(tf1, tf2)) == \
r'\frac{x y^{2} - z}{- t^{3} + y^{3}} + \frac{x - y}{x + y}'
assert latex(Parallel(-tf2, tf1)) == \
r'\frac{- x + y}{x + y} + \frac{x y^{2} - z}{- t^{3} + y^{3}}'
M_1 = Matrix([[5, 6], [6, 5/s]])
T_1 = TransferFunctionMatrix.from_Matrix(M_1, s)
M_2 = Matrix([[5/s, 6], [6, 5/(s - 1)]])
T_2 = TransferFunctionMatrix.from_Matrix(M_2, s)
M_3 = Matrix([[6, 5/(s*(s - 1))], [5, 6]])
T_3 = TransferFunctionMatrix.from_Matrix(M_3, s)
assert latex(T_1 + T_2 + T_3) == r'\left[\begin{matrix}\frac{5}{1} & \frac{6}{1}\\\frac{6}{1} & \frac{5}{s}\end{matrix}\right]' \
r'_\tau + \left[\begin{matrix}\frac{5}{s} & \frac{6}{1}\\\frac{6}{1} & \frac{5}{s - 1}\end{matrix}\right]_\tau + \left[\begin{matrix}' \
r'\frac{6}{1} & \frac{5}{s \left(s - 1\right)}\\\frac{5}{1} & \frac{6}{1}\end{matrix}\right]_\tau' \
== latex(MIMOParallel(T_1, T_2, T_3)) == latex(MIMOParallel(T_1, MIMOParallel(T_2, T_3))) == latex(MIMOParallel(MIMOParallel(T_1, T_2), T_3))
def test_TransferFunctionMatrix_printing():
tf1 = TransferFunction(p, p + x, p)
tf2 = TransferFunction(-s + p, p + s, p)
tf3 = TransferFunction(p, y**2 + 2*y + 3, p)
assert latex(TransferFunctionMatrix([[tf1], [tf2]])) == \
r'\left[\begin{matrix}\frac{p}{p + x}\\\frac{p - s}{p + s}\end{matrix}\right]_\tau'
assert latex(TransferFunctionMatrix([[tf1, tf2], [tf3, -tf1]])) == \
r'\left[\begin{matrix}\frac{p}{p + x} & \frac{p - s}{p + s}\\\frac{p}{y^{2} + 2 y + 3} & \frac{\left(-1\right) p}{p + x}\end{matrix}\right]_\tau'
dtf1 = DiscreteTransferFunction(p, p + x, p, 0.1)
dtf2 = DiscreteTransferFunction(-s + p, p + s, p, 0.1)
assert latex(TransferFunctionMatrix([[dtf1], [dtf2]])) == \
r'\underset{[st:\ {0.100000000000000}]}{\left[\begin{matrix}\frac{p}{p + x}\\\frac{p - s}{p + s}\end{matrix}\right]_k}'
def test_Feedback_printing():
tf1 = TransferFunction(p, p + x, p)
tf2 = TransferFunction(-s + p, p + s, p)
# Negative Feedback (Default)
assert latex(Feedback(tf1, tf2)) == \
r'\frac{\frac{p}{p + x}}{\frac{1}{1} + \left(\frac{p}{p + x}\right) \left(\frac{p - s}{p + s}\right)}'
assert latex(Feedback(tf1*tf2, TransferFunction(1, 1, p))) == \
r'\frac{\left(\frac{p}{p + x}\right) \left(\frac{p - s}{p + s}\right)}{\frac{1}{1} + \left(\frac{p}{p + x}\right) \left(\frac{p - s}{p + s}\right)}'
# Positive Feedback
assert latex(Feedback(tf1, tf2, 1)) == \
r'\frac{\frac{p}{p + x}}{\frac{1}{1} - \left(\frac{p}{p + x}\right) \left(\frac{p - s}{p + s}\right)}'
assert latex(Feedback(tf1*tf2, sign=1)) == \
r'\frac{\left(\frac{p}{p + x}\right) \left(\frac{p - s}{p + s}\right)}{\frac{1}{1} - \left(\frac{p}{p + x}\right) \left(\frac{p - s}{p + s}\right)}'
def test_MIMOFeedback_printing():
tf1 = TransferFunction(1, s, s)
tf2 = TransferFunction(s, s**2 - 1, s)
tf3 = TransferFunction(s, s - 1, s)
tf4 = TransferFunction(s**2, s**2 - 1, s)
tfm_1 = TransferFunctionMatrix([[tf1, tf2], [tf3, tf4]])
tfm_2 = TransferFunctionMatrix([[tf4, tf3], [tf2, tf1]])
# Negative Feedback (Default)
assert latex(MIMOFeedback(tfm_1, tfm_2)) == \
r'\left(I_{\tau} + \left[\begin{matrix}\frac{1}{s} & \frac{s}{s^{2} - 1}\\\frac{s}{s - 1} & \frac{s^{2}}{s^{2} - 1}\end{matrix}\right]_\tau\cdot\left[' \
r'\begin{matrix}\frac{s^{2}}{s^{2} - 1} & \frac{s}{s - 1}\\\frac{s}{s^{2} - 1} & \frac{1}{s}\end{matrix}\right]_\tau\right)^{-1} \cdot \left[\begin{matrix}' \
r'\frac{1}{s} & \frac{s}{s^{2} - 1}\\\frac{s}{s - 1} & \frac{s^{2}}{s^{2} - 1}\end{matrix}\right]_\tau'
# Positive Feedback
assert latex(MIMOFeedback(tfm_1*tfm_2, tfm_1, 1)) == \
r'\left(I_{\tau} - \left[\begin{matrix}\frac{1}{s} & \frac{s}{s^{2} - 1}\\\frac{s}{s - 1} & \frac{s^{2}}{s^{2} - 1}\end{matrix}\right]_\tau\cdot\left' \
r'[\begin{matrix}\frac{s^{2}}{s^{2} - 1} & \frac{s}{s - 1}\\\frac{s}{s^{2} - 1} & \frac{1}{s}\end{matrix}\right]_\tau\cdot\left[\begin{matrix}\frac{1}{s} & \frac{s}{s^{2} - 1}' \
r'\\\frac{s}{s - 1} & \frac{s^{2}}{s^{2} - 1}\end{matrix}\right]_\tau\right)^{-1} \cdot \left[\begin{matrix}\frac{1}{s} & \frac{s}{s^{2} - 1}' \
r'\\\frac{s}{s - 1} & \frac{s^{2}}{s^{2} - 1}\end{matrix}\right]_\tau\cdot\left[\begin{matrix}\frac{s^{2}}{s^{2} - 1} & \frac{s}{s - 1}\\\frac{s}{s^{2} - 1}' \
r' & \frac{1}{s}\end{matrix}\right]_\tau'
def test_Quaternion_latex_printing():
q = Quaternion(x, y, z, t)
assert latex(q) == r"x + y i + z j + t k"
q = Quaternion(x, y, z, x*t)
assert latex(q) == r"x + y i + z j + t x k"
q = Quaternion(x, y, z, x + t)
assert latex(q) == r"x + y i + z j + \left(t + x\right) k"
def test_TensorProduct_printing():
from sympy.tensor.functions import TensorProduct
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
assert latex(TensorProduct(A, B)) == r"A \otimes B"
def test_WedgeProduct_printing():
from sympy.diffgeom.rn import R2
from sympy.diffgeom import WedgeProduct
wp = WedgeProduct(R2.dx, R2.dy)
assert latex(wp) == r"\operatorname{d}x \wedge \operatorname{d}y"
def test_issue_9216():
expr_1 = Pow(1, -1, evaluate=False)
assert latex(expr_1) == r"1^{-1}"
expr_2 = Pow(1, Pow(1, -1, evaluate=False), evaluate=False)
assert latex(expr_2) == r"1^{1^{-1}}"
expr_3 = Pow(3, -2, evaluate=False)
assert latex(expr_3) == r"\frac{1}{9}"
expr_4 = Pow(1, -2, evaluate=False)
assert latex(expr_4) == r"1^{-2}"
def test_latex_printer_tensor():
from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, tensor_heads
L = TensorIndexType("L")
i, j, k, l = tensor_indices("i j k l", L)
i0 = tensor_indices("i_0", L)
A, B, C, D = tensor_heads("A B C D", [L])
H = TensorHead("H", [L, L])
K = TensorHead("K", [L, L, L, L])
assert latex(i) == r"{}^{i}"
assert latex(-i) == r"{}_{i}"
expr = A(i)
assert latex(expr) == r"A{}^{i}"
expr = A(i0)
assert latex(expr) == r"A{}^{i_{0}}"
expr = A(-i)
assert latex(expr) == r"A{}_{i}"
expr = -3*A(i)
assert latex(expr) == r"-3A{}^{i}"
expr = K(i, j, -k, -i0)
assert latex(expr) == r"K{}^{ij}{}_{ki_{0}}"
expr = K(i, -j, -k, i0)
assert latex(expr) == r"K{}^{i}{}_{jk}{}^{i_{0}}"
expr = K(i, -j, k, -i0)
assert latex(expr) == r"K{}^{i}{}_{j}{}^{k}{}_{i_{0}}"
expr = H(i, -j)
assert latex(expr) == r"H{}^{i}{}_{j}"
expr = H(i, j)
assert latex(expr) == r"H{}^{ij}"
expr = H(-i, -j)
assert latex(expr) == r"H{}_{ij}"
expr = (1+x)*A(i)
assert latex(expr) == r"\left(x + 1\right)A{}^{i}"
expr = H(i, -i)
assert latex(expr) == r"H{}^{L_{0}}{}_{L_{0}}"
expr = H(i, -j)*A(j)*B(k)
assert latex(expr) == r"H{}^{i}{}_{L_{0}}A{}^{L_{0}}B{}^{k}"
expr = A(i) + 3*B(i)
assert latex(expr) == r"3B{}^{i} + A{}^{i}"
# Test ``TensorElement``:
from sympy.tensor.tensor import TensorElement
expr = TensorElement(K(i, j, k, l), {i: 3, k: 2})
assert latex(expr) == r'K{}^{i=3,j,k=2,l}'
expr = TensorElement(K(i, j, k, l), {i: 3})
assert latex(expr) == r'K{}^{i=3,jkl}'
expr = TensorElement(K(i, -j, k, l), {i: 3, k: 2})
assert latex(expr) == r'K{}^{i=3}{}_{j}{}^{k=2,l}'
expr = TensorElement(K(i, -j, k, -l), {i: 3, k: 2})
assert latex(expr) == r'K{}^{i=3}{}_{j}{}^{k=2}{}_{l}'
expr = TensorElement(K(i, j, -k, -l), {i: 3, -k: 2})
assert latex(expr) == r'K{}^{i=3,j}{}_{k=2,l}'
expr = TensorElement(K(i, j, -k, -l), {i: 3})
assert latex(expr) == r'K{}^{i=3,j}{}_{kl}'
expr = PartialDerivative(A(i), A(i))
assert latex(expr) == r"\frac{\partial}{\partial {A{}^{L_{0}}}}{A{}^{L_{0}}}"
expr = PartialDerivative(A(-i), A(-j))
assert latex(expr) == r"\frac{\partial}{\partial {A{}_{j}}}{A{}_{i}}"
expr = PartialDerivative(K(i, j, -k, -l), A(m), A(-n))
assert latex(expr) == r"\frac{\partial^{2}}{\partial {A{}^{m}} \partial {A{}_{n}}}{K{}^{ij}{}_{kl}}"
expr = PartialDerivative(B(-i) + A(-i), A(-j), A(-n))
assert latex(expr) == r"\frac{\partial^{2}}{\partial {A{}_{j}} \partial {A{}_{n}}}{\left(A{}_{i} + B{}_{i}\right)}"
expr = PartialDerivative(3*A(-i), A(-j), A(-n))
assert latex(expr) == r"\frac{\partial^{2}}{\partial {A{}_{j}} \partial {A{}_{n}}}{\left(3A{}_{i}\right)}"
def test_multiline_latex():
a, b, c, d, e, f = symbols('a b c d e f')
expr = -a + 2*b -3*c +4*d -5*e
expected = r"\begin{eqnarray}" + "\n"\
r"f & = &- a \nonumber\\" + "\n"\
r"& & + 2 b \nonumber\\" + "\n"\
r"& & - 3 c \nonumber\\" + "\n"\
r"& & + 4 d \nonumber\\" + "\n"\
r"& & - 5 e " + "\n"\
r"\end{eqnarray}"
assert multiline_latex(f, expr, environment="eqnarray") == expected
expected2 = r'\begin{eqnarray}' + '\n'\
r'f & = &- a + 2 b \nonumber\\' + '\n'\
r'& & - 3 c + 4 d \nonumber\\' + '\n'\
r'& & - 5 e ' + '\n'\
r'\end{eqnarray}'
assert multiline_latex(f, expr, 2, environment="eqnarray") == expected2
expected3 = r'\begin{eqnarray}' + '\n'\
r'f & = &- a + 2 b - 3 c \nonumber\\'+ '\n'\
r'& & + 4 d - 5 e ' + '\n'\
r'\end{eqnarray}'
assert multiline_latex(f, expr, 3, environment="eqnarray") == expected3
expected3dots = r'\begin{eqnarray}' + '\n'\
r'f & = &- a + 2 b - 3 c \dots\nonumber\\'+ '\n'\
r'& & + 4 d - 5 e ' + '\n'\
r'\end{eqnarray}'
assert multiline_latex(f, expr, 3, environment="eqnarray", use_dots=True) == expected3dots
expected3align = r'\begin{align*}' + '\n'\
r'f = &- a + 2 b - 3 c \\'+ '\n'\
r'& + 4 d - 5 e ' + '\n'\
r'\end{align*}'
assert multiline_latex(f, expr, 3) == expected3align
assert multiline_latex(f, expr, 3, environment='align*') == expected3align
expected2ieee = r'\begin{IEEEeqnarray}{rCl}' + '\n'\
r'f & = &- a + 2 b \nonumber\\' + '\n'\
r'& & - 3 c + 4 d \nonumber\\' + '\n'\
r'& & - 5 e ' + '\n'\
r'\end{IEEEeqnarray}'
assert multiline_latex(f, expr, 2, environment="IEEEeqnarray") == expected2ieee
raises(ValueError, lambda: multiline_latex(f, expr, environment="foo"))
def test_issue_15353():
a, x = symbols('a x')
# Obtained from nonlinsolve([(sin(a*x)),cos(a*x)],[x,a])
sol = ConditionSet(
Tuple(x, a), Eq(sin(a*x), 0) & Eq(cos(a*x), 0), S.Complexes**2)
assert latex(sol) == \
r'\left\{\left( x, \ a\right)\; \middle|\; \left( x, \ a\right) \in ' \
r'\mathbb{C}^{2} \wedge \sin{\left(a x \right)} = 0 \wedge ' \
r'\cos{\left(a x \right)} = 0 \right\}'
def test_latex_symbolic_probability():
mu = symbols("mu")
sigma = symbols("sigma", positive=True)
X = Normal("X", mu, sigma)
assert latex(Expectation(X)) == r'\operatorname{E}\left[X\right]'
assert latex(Variance(X)) == r'\operatorname{Var}\left(X\right)'
assert latex(Probability(X > 0)) == r'\operatorname{P}\left(X > 0\right)'
Y = Normal("Y", mu, sigma)
assert latex(Covariance(X, Y)) == r'\operatorname{Cov}\left(X, Y\right)'
def test_trace():
# Issue 15303
from sympy.matrices.expressions.trace import trace
A = MatrixSymbol("A", 2, 2)
assert latex(trace(A)) == r"\operatorname{tr}\left(A \right)"
assert latex(trace(A**2)) == r"\operatorname{tr}\left(A^{2} \right)"
def test_print_basic():
# Issue 15303
from sympy.core.basic import Basic
from sympy.core.expr import Expr
# dummy class for testing printing where the function is not
# implemented in latex.py
class UnimplementedExpr(Expr):
def __new__(cls, e):
return Basic.__new__(cls, e)
# dummy function for testing
def unimplemented_expr(expr):
return UnimplementedExpr(expr).doit()
# override class name to use superscript / subscript
def unimplemented_expr_sup_sub(expr):
result = UnimplementedExpr(expr)
result.__class__.__name__ = 'UnimplementedExpr_x^1'
return result
assert latex(unimplemented_expr(x)) == r'\operatorname{UnimplementedExpr}\left(x\right)'
assert latex(unimplemented_expr(x**2)) == \
r'\operatorname{UnimplementedExpr}\left(x^{2}\right)'
assert latex(unimplemented_expr_sup_sub(x)) == \
r'\operatorname{UnimplementedExpr^{1}_{x}}\left(x\right)'
def test_MatrixSymbol_bold():
# Issue #15871
from sympy.matrices.expressions.trace import trace
A = MatrixSymbol("A", 2, 2)
assert latex(trace(A), mat_symbol_style='bold') == \
r"\operatorname{tr}\left(\mathbf{A} \right)"
assert latex(trace(A), mat_symbol_style='plain') == \
r"\operatorname{tr}\left(A \right)"
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
assert latex(-A, mat_symbol_style='bold') == r"- \mathbf{A}"
assert latex(A - A*B - B, mat_symbol_style='bold') == \
r"\mathbf{A} - \mathbf{A} \mathbf{B} - \mathbf{B}"
assert latex(-A*B - A*B*C - B, mat_symbol_style='bold') == \
r"- \mathbf{A} \mathbf{B} - \mathbf{A} \mathbf{B} \mathbf{C} - \mathbf{B}"
A_k = MatrixSymbol("A_k", 3, 3)
assert latex(A_k, mat_symbol_style='bold') == r"\mathbf{A}_{k}"
A = MatrixSymbol(r"\nabla_k", 3, 3)
assert latex(A, mat_symbol_style='bold') == r"\mathbf{\nabla}_{k}"
def test_AppliedPermutation():
p = Permutation(0, 1, 2)
x = Symbol('x')
assert latex(AppliedPermutation(p, x)) == \
r'\sigma_{\left( 0\; 1\; 2\right)}(x)'
def test_PermutationMatrix():
p = Permutation(0, 1, 2)
assert latex(PermutationMatrix(p)) == r'P_{\left( 0\; 1\; 2\right)}'
p = Permutation(0, 3)(1, 2)
assert latex(PermutationMatrix(p)) == \
r'P_{\left( 0\; 3\right)\left( 1\; 2\right)}'
def test_issue_21758():
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series.fourier import FourierSeries
x = Symbol('x')
k, n = symbols('k n')
fo = FourierSeries(x, (x, -pi, pi), (0, SeqFormula(0, (k, 1, oo)), SeqFormula(
Piecewise((-2*pi*cos(n*pi)/n + 2*sin(n*pi)/n**2, (n > -oo) & (n < oo) & Ne(n, 0)),
(0, True))*sin(n*x)/pi, (n, 1, oo))))
assert latex(piecewise_fold(fo)) == '\\begin{cases} 2 \\sin{\\left(x \\right)}' \
' - \\sin{\\left(2 x \\right)} + \\frac{2 \\sin{\\left(3 x \\right)}}{3} +' \
' \\ldots & \\text{for}\\: n > -\\infty \\wedge n < \\infty \\wedge ' \
'n \\neq 0 \\\\0 & \\text{otherwise} \\end{cases}'
assert latex(FourierSeries(x, (x, -pi, pi), (0, SeqFormula(0, (k, 1, oo)),
SeqFormula(0, (n, 1, oo))))) == '0'
def test_imaginary_unit():
assert latex(1 + I) == r'1 + i'
assert latex(1 + I, imaginary_unit='i') == r'1 + i'
assert latex(1 + I, imaginary_unit='j') == r'1 + j'
assert latex(1 + I, imaginary_unit='foo') == r'1 + foo'
assert latex(I, imaginary_unit="ti") == r'\text{i}'
assert latex(I, imaginary_unit="tj") == r'\text{j}'
def test_text_re_im():
assert latex(im(x), gothic_re_im=True) == r'\Im{\left(x\right)}'
assert latex(im(x), gothic_re_im=False) == r'\operatorname{im}{\left(x\right)}'
assert latex(re(x), gothic_re_im=True) == r'\Re{\left(x\right)}'
assert latex(re(x), gothic_re_im=False) == r'\operatorname{re}{\left(x\right)}'
def test_latex_diffgeom():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField, Differential
from sympy.diffgeom.rn import R2
x,y = symbols('x y', real=True)
m = Manifold('M', 2)
assert latex(m) == r'\text{M}'
p = Patch('P', m)
assert latex(p) == r'\text{P}_{\text{M}}'
rect = CoordSystem('rect', p, [x, y])
assert latex(rect) == r'\text{rect}^{\text{P}}_{\text{M}}'
b = BaseScalarField(rect, 0)
assert latex(b) == r'\mathbf{x}'
g = Function('g')
s_field = g(R2.x, R2.y)
assert latex(Differential(s_field)) == \
r'\operatorname{d}\left(g{\left(\mathbf{x},\mathbf{y} \right)}\right)'
def test_unit_printing():
assert latex(5*meter) == r'5 \text{m}'
assert latex(3*gibibyte) == r'3 \text{gibibyte}'
assert latex(4*microgram/second) == r'\frac{4 \mu\text{g}}{\text{s}}'
assert latex(4*micro*gram/second) == r'\frac{4 \mu \text{g}}{\text{s}}'
assert latex(5*milli*meter) == r'5 \text{m} \text{m}'
assert latex(milli) == r'\text{m}'
def test_issue_17092():
x_star = Symbol('x^*')
assert latex(Derivative(x_star, x_star,2)) == r'\frac{d^{2}}{d \left(x^{*}\right)^{2}} x^{*}'
def test_latex_decimal_separator():
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
# comma decimal_separator
assert(latex([1, 2.3, 4.5], decimal_separator='comma') == r'\left[ 1; \ 2{,}3; \ 4{,}5\right]')
assert(latex(FiniteSet(1, 2.3, 4.5), decimal_separator='comma') == r'\left\{1; 2{,}3; 4{,}5\right\}')
assert(latex((1, 2.3, 4.6), decimal_separator = 'comma') == r'\left( 1; \ 2{,}3; \ 4{,}6\right)')
assert(latex((1,), decimal_separator='comma') == r'\left( 1;\right)')
# period decimal_separator
assert(latex([1, 2.3, 4.5], decimal_separator='period') == r'\left[ 1, \ 2.3, \ 4.5\right]' )
assert(latex(FiniteSet(1, 2.3, 4.5), decimal_separator='period') == r'\left\{1, 2.3, 4.5\right\}')
assert(latex((1, 2.3, 4.6), decimal_separator = 'period') == r'\left( 1, \ 2.3, \ 4.6\right)')
assert(latex((1,), decimal_separator='period') == r'\left( 1,\right)')
# default decimal_separator
assert(latex([1, 2.3, 4.5]) == r'\left[ 1, \ 2.3, \ 4.5\right]')
assert(latex(FiniteSet(1, 2.3, 4.5)) == r'\left\{1, 2.3, 4.5\right\}')
assert(latex((1, 2.3, 4.6)) == r'\left( 1, \ 2.3, \ 4.6\right)')
assert(latex((1,)) == r'\left( 1,\right)')
assert(latex(Mul(3.4,5.3), decimal_separator = 'comma') == r'18{,}02')
assert(latex(3.4*5.3, decimal_separator = 'comma') == r'18{,}02')
x = symbols('x')
y = symbols('y')
z = symbols('z')
assert(latex(x*5.3 + 2**y**3.4 + 4.5 + z, decimal_separator = 'comma') == r'2^{y^{3{,}4}} + 5{,}3 x + z + 4{,}5')
assert(latex(0.987, decimal_separator='comma') == r'0{,}987')
assert(latex(S(0.987), decimal_separator='comma') == r'0{,}987')
assert(latex(.3, decimal_separator='comma') == r'0{,}3')
assert(latex(S(.3), decimal_separator='comma') == r'0{,}3')
assert(latex(5.8*10**(-7), decimal_separator='comma') == r'5{,}8 \cdot 10^{-7}')
assert(latex(S(5.7)*10**(-7), decimal_separator='comma') == r'5{,}7 \cdot 10^{-7}')
assert(latex(S(5.7*10**(-7)), decimal_separator='comma') == r'5{,}7 \cdot 10^{-7}')
x = symbols('x')
assert(latex(1.2*x+3.4, decimal_separator='comma') == r'1{,}2 x + 3{,}4')
assert(latex(FiniteSet(1, 2.3, 4.5), decimal_separator='period') == r'\left\{1, 2.3, 4.5\right\}')
# Error Handling tests
raises(ValueError, lambda: latex([1,2.3,4.5], decimal_separator='non_existing_decimal_separator_in_list'))
raises(ValueError, lambda: latex(FiniteSet(1,2.3,4.5), decimal_separator='non_existing_decimal_separator_in_set'))
raises(ValueError, lambda: latex((1,2.3,4.5), decimal_separator='non_existing_decimal_separator_in_tuple'))
def test_Str():
from sympy.core.symbol import Str
assert str(Str('x')) == r'x'
def test_latex_escape():
assert latex_escape(r"~^\&%$#_{}") == "".join([
r'\textasciitilde',
r'\textasciicircum',
r'\textbackslash',
r'\&',
r'\%',
r'\$',
r'\#',
r'\_',
r'\{',
r'\}',
])
def test_emptyPrinter():
class MyObject:
def __repr__(self):
return "<MyObject with {...}>"
# unknown objects are monospaced
assert latex(MyObject()) == r"\mathtt{\text{<MyObject with \{...\}>}}"
# even if they are nested within other objects
assert latex((MyObject(),)) == r"\left( \mathtt{\text{<MyObject with \{...\}>}},\right)"
def test_global_settings():
import inspect
# settings should be visible in the signature of `latex`
assert inspect.signature(latex).parameters['imaginary_unit'].default == r'i'
assert latex(I) == r'i'
try:
# but changing the defaults...
LatexPrinter.set_global_settings(imaginary_unit='j')
# ... should change the signature
assert inspect.signature(latex).parameters['imaginary_unit'].default == r'j'
assert latex(I) == r'j'
finally:
# there's no public API to undo this, but we need to make sure we do
# so as not to impact other tests
del LatexPrinter._global_settings['imaginary_unit']
# check we really did undo it
assert inspect.signature(latex).parameters['imaginary_unit'].default == r'i'
assert latex(I) == r'i'
def test_pickleable():
# this tests that the _PrintFunction instance is pickleable
import pickle
assert pickle.loads(pickle.dumps(latex)) is latex
def test_printing_latex_array_expressions():
assert latex(ArraySymbol("A", (2, 3, 4))) == "A"
assert latex(ArrayElement("A", (2, 1/(1-x), 0))) == "{{A}_{2, \\frac{1}{1 - x}, 0}}"
M = MatrixSymbol("M", 3, 3)
N = MatrixSymbol("N", 3, 3)
assert latex(ArrayElement(M*N, [x, 0])) == "{{\\left(M N\\right)}_{x, 0}}"
def test_Array():
arr = Array(range(10))
assert latex(arr) == r'\left[\begin{matrix}0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9\end{matrix}\right]'
arr = Array(range(11))
# fill the empty argument with a bunch of 'c' to avoid latex errors
assert latex(arr) == r'\left[\begin{array}{ccccccccccc}0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10\end{array}\right]'
def test_latex_with_unevaluated():
with evaluate(False):
assert latex(a * a) == r"a a"
def test_latex_disable_split_super_sub():
assert latex(Symbol('u^a_b')) == 'u^{a}_{b}'
assert latex(Symbol('u^a_b'), disable_split_super_sub=False) == 'u^{a}_{b}'
assert latex(Symbol('u^a_b'), disable_split_super_sub=True) == 'u\\^a\\_b'
| lowergamma |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/version.py | {
"start": 20249,
"end": 21929
} | class ____(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile(r'^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
| LegacyMatcher |
python | PrefectHQ__prefect | tests/server/models/test_flow_run_input.py | {
"start": 232,
"end": 2074
} | class ____:
async def test_creates_flow_run_input(self, session: AsyncSession, flow_run):
flow_run_input = await models.flow_run_input.create_flow_run_input(
session=session,
flow_run_input=schemas.core.FlowRunInput(
flow_run_id=flow_run.id,
key="my-key",
value=json.dumps({"complex": True}),
),
)
assert isinstance(flow_run_input, schemas.core.FlowRunInput)
assert flow_run_input.flow_run_id == flow_run.id
assert flow_run_input.key == "my-key"
assert flow_run_input.value == json.dumps({"complex": True})
async def test_flow_run_id_and_key_are_unique(
self, session: AsyncSession, flow_run
):
await models.flow_run_input.create_flow_run_input(
session=session,
flow_run_input=schemas.core.FlowRunInput(
flow_run_id=flow_run.id, key="my-key", value="my-value"
),
)
with pytest.raises(IntegrityError):
await models.flow_run_input.create_flow_run_input(
session=session,
flow_run_input=schemas.core.FlowRunInput(
flow_run_id=flow_run.id, key="my-key", value="my-value"
),
)
@pytest.mark.parametrize(
"key", ["my key", "user!123", "product?description", "my(key)", "my&key"]
)
async def test_non_url_safe_keys_invalid(
self, key, session: AsyncSession, flow_run
):
with pytest.raises(ValidationError):
await models.flow_run_input.create_flow_run_input(
session=session,
flow_run_input=schemas.core.FlowRunInput(
flow_run_id=flow_run.id, key=key, value="my-value"
),
)
| TestCreateFlowRunInput |
python | huggingface__transformers | tests/models/codegen/test_tokenization_codegen.py | {
"start": 248,
"end": 2499
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = ["Salesforce/codegen-350M-mono"]
tokenizer_class = CodeGenTokenizer
integration_expected_tokens = ['This', 'Ġis', 'Ġa', 'Ġtest', 'ĠðŁĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ92', '000', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġfals', 'é', '.', 'Ċ', 'çĶŁ', 'æ', '´', '»', 'çļĦ', 'çľ', 'Ł', 'è', '°', 'Ľ', 'æĺ¯', 'Ċ', 'Hi', ' ', 'Hello', 'Ċ', 'Hi', ' ', 'Hello', 'ĊĊ', 'Ġ', 'Ċ', ' ', 'Ċ', 'ĠHello', 'Ċ', '<', 's', '>', 'Ċ', 'hi', '<', 's', '>', 'there', 'Ċ', 'The', 'Ġfollowing', 'Ġstring', 'Ġshould', 'Ġbe', 'Ġproperly', 'Ġencoded', ':', 'ĠHello', '.', 'Ċ', 'But', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à¸', 'Ľ', 'à¸', 'µ', ' ', 'ird', ' ', 'à¸', 'Ķ', 'Ċ', 'Hey', 'Ġhow', 'Ġare', 'Ġyou', 'Ġdoing'] # fmt: skip
integration_expected_token_ids = [1212, 318, 257, 1332, 30325, 232, 198, 40, 373, 4642, 287, 10190, 830, 11, 290, 428, 318, 27807, 2634, 13, 198, 37955, 162, 112, 119, 21410, 40367, 253, 164, 108, 249, 42468, 198, 17250, 50286, 15496, 198, 17250, 50285, 15496, 628, 220, 198, 50286, 198, 18435, 198, 27, 82, 29, 198, 5303, 27, 82, 29, 8117, 198, 464, 1708, 4731, 815, 307, 6105, 30240, 25, 18435, 13, 198, 1537, 220, 1447, 290, 220, 19567, 249, 19567, 113, 50285, 1447, 50285, 19567, 242, 198, 10814, 703, 389, 345, 1804] # fmt: skip
expected_tokens_from_ids = ['This', 'Ġis', 'Ġa', 'Ġtest', 'ĠðŁĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ92', '000', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġfals', 'é', '.', 'Ċ', 'çĶŁ', 'æ', '´', '»', 'çļĦ', 'çľ', 'Ł', 'è', '°', 'Ľ', 'æĺ¯', 'Ċ', 'Hi', ' ', 'Hello', 'Ċ', 'Hi', ' ', 'Hello', 'ĊĊ', 'Ġ', 'Ċ', ' ', 'Ċ', 'ĠHello', 'Ċ', '<', 's', '>', 'Ċ', 'hi', '<', 's', '>', 'there', 'Ċ', 'The', 'Ġfollowing', 'Ġstring', 'Ġshould', 'Ġbe', 'Ġproperly', 'Ġencoded', ':', 'ĠHello', '.', 'Ċ', 'But', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à¸', 'Ľ', 'à¸', 'µ', ' ', 'ird', ' ', 'à¸', 'Ķ', 'Ċ', 'Hey', 'Ġhow', 'Ġare', 'Ġyou', 'Ġdoing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊\nI was born in 92000, and this is falsé.\n生活的真谛是\nHi Hello\nHi Hello\n\n \n \n Hello\n<s>\nhi<s>there\nThe following string should be properly encoded: Hello.\nBut ird and ปี ird ด\nHey how are you doing"
| CodeGenTokenizationTest |
python | keras-team__keras | examples/demo_custom_jax_workflow.py | {
"start": 858,
"end": 3226
} | class ____(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
def call(self, x):
x = jax.nn.relu(self.dense1(x))
x = jax.nn.relu(self.dense2(x))
return self.dense3(x)
def Dataset():
for _ in range(20):
yield (np.random.random((32, 128)), np.random.random((32, 4)))
def loss_fn(y_true, y_pred):
return ops.sum((y_true - y_pred) ** 2)
model = MyModel(hidden_dim=256, output_dim=4)
optimizer = optimizers.SGD(learning_rate=0.001)
dataset = Dataset()
# Build model
x = np.random.random((1, 128))
model(x)
# Build optimizer
optimizer.build(model.trainable_variables)
######### Custom JAX workflow ###############
def compute_loss_and_updates(
trainable_variables, non_trainable_variables, x, y
):
y_pred, non_trainable_variables = model.stateless_call(
trainable_variables, non_trainable_variables, x
)
loss = loss_fn(y, y_pred)
return loss, non_trainable_variables
grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True)
@jax.jit
def train_step(state, data):
trainable_variables, non_trainable_variables, optimizer_variables = state
x, y = data
(loss, non_trainable_variables), grads = grad_fn(
trainable_variables, non_trainable_variables, x, y
)
trainable_variables, optimizer_variables = optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
# Return updated state
return loss, (
trainable_variables,
non_trainable_variables,
optimizer_variables,
)
trainable_variables = model.trainable_variables
non_trainable_variables = model.non_trainable_variables
optimizer_variables = optimizer.variables
state = trainable_variables, non_trainable_variables, optimizer_variables
# Training loop
for data in dataset:
loss, state = train_step(state, data)
print("Loss:", loss)
# Post-processing model state update
trainable_variables, non_trainable_variables, optimizer_variables = state
for variable, value in zip(model.trainable_variables, trainable_variables):
variable.assign(value)
for variable, value in zip(
model.non_trainable_variables, non_trainable_variables
):
variable.assign(value)
| MyModel |
python | redis__redis-py | redis/maint_notifications.py | {
"start": 213,
"end": 322
} | class ____(enum.Enum):
NONE = "none"
MOVING = "moving"
MAINTENANCE = "maintenance"
| MaintenanceState |
python | modin-project__modin | modin/core/storage_formats/pandas/parsers.py | {
"start": 12914,
"end": 13873
} | class ____(PandasParser):
@staticmethod
@doc(_doc_parse_func, parameters=_doc_parse_parameters_common2)
def parse(fname, common_read_kwargs, **kwargs):
return PandasParser.generic_parse(
fname,
callback=PandasCSVParser.read_callback,
**common_read_kwargs,
**kwargs,
)
@staticmethod
def read_callback(*args, **kwargs):
"""
Parse data on each partition.
Parameters
----------
*args : list
Positional arguments to be passed to the callback function.
**kwargs : dict
Keyword arguments to be passed to the callback function.
Returns
-------
pandas.DataFrame or pandas.io.parsers.TextParser
Function call result.
"""
return pandas.read_csv(*args, **kwargs)
@doc(_doc_pandas_parser_class, data_type="tables with fixed-width formatted lines")
| PandasCSVParser |
python | imageio__imageio | imageio/plugins/grab.py | {
"start": 142,
"end": 1236
} | class ____(Format):
"""Base format for grab formats."""
_pillow_imported = False
_ImageGrab = None
def __init__(self, *args, **kwargs):
super(BaseGrabFormat, self).__init__(*args, **kwargs)
self._lock = threading.RLock()
def _can_write(self, request):
return False
def _init_pillow(self):
with self._lock:
if not self._pillow_imported:
self._pillow_imported = True # more like tried to import
import PIL
if not hasattr(PIL, "__version__"): # pragma: no cover
raise ImportError("Imageio Pillow requires " "Pillow, not PIL!")
try:
from PIL import ImageGrab
except ImportError:
return None
self._ImageGrab = ImageGrab
return self._ImageGrab
class Reader(Format.Reader):
def _open(self):
pass
def _close(self):
pass
def _get_data(self, index):
return self.format._get_data(index)
| BaseGrabFormat |
python | hynek__structlog | tests/processors/test_renderers.py | {
"start": 701,
"end": 3354
} | class ____:
def test_sort_keys(self, event_dict):
"""
Keys are sorted if sort_keys is set.
"""
rv = KeyValueRenderer(sort_keys=True)(None, None, event_dict)
assert r"a=<A(\o/)> b=[3, 4] x=7 y='test' z=(1, 2)" == rv
def test_order_complete(self, event_dict):
"""
Orders keys according to key_order.
"""
rv = KeyValueRenderer(key_order=["y", "b", "a", "z", "x"])(
None, None, event_dict
)
assert r"y='test' b=[3, 4] a=<A(\o/)> z=(1, 2) x=7" == rv
def test_order_missing(self, event_dict):
"""
Missing keys get rendered as None.
"""
rv = KeyValueRenderer(key_order=["c", "y", "b", "a", "z", "x"])(
None, None, event_dict
)
assert r"c=None y='test' b=[3, 4] a=<A(\o/)> z=(1, 2) x=7" == rv
def test_order_missing_dropped(self, event_dict):
"""
Missing keys get dropped
"""
rv = KeyValueRenderer(
key_order=["c", "y", "b", "a", "z", "x"], drop_missing=True
)(None, None, event_dict)
assert r"y='test' b=[3, 4] a=<A(\o/)> z=(1, 2) x=7" == rv
def test_order_extra(self, event_dict):
"""
Extra keys get sorted if sort_keys=True.
"""
event_dict["B"] = "B"
event_dict["A"] = "A"
rv = KeyValueRenderer(
key_order=["c", "y", "b", "a", "z", "x"], sort_keys=True
)(None, None, event_dict)
assert (
r"c=None y='test' b=[3, 4] a=<A(\o/)> z=(1, 2) x=7 A='A' B='B'"
) == rv
def test_order_sorted_missing_dropped(self, event_dict):
"""
Keys get sorted if sort_keys=True and extras get dropped.
"""
event_dict["B"] = "B"
event_dict["A"] = "A"
rv = KeyValueRenderer(
key_order=["c", "y", "b", "a", "z", "x"],
sort_keys=True,
drop_missing=True,
)(None, None, event_dict)
assert r"y='test' b=[3, 4] a=<A(\o/)> z=(1, 2) x=7 A='A' B='B'" == rv
def test_random_order(self, event_dict):
"""
No special ordering doesn't blow up.
"""
rv = KeyValueRenderer()(None, None, event_dict)
assert isinstance(rv, str)
@pytest.mark.parametrize("rns", [True, False])
def test_repr_native_str(self, rns):
"""
repr_native_str=False doesn't repr on native strings.
"""
rv = KeyValueRenderer(repr_native_str=rns)(
None, None, {"event": "哈", "key": 42, "key2": "哈"}
)
cnt = rv.count("哈")
assert 2 == cnt
| TestKeyValueRenderer |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/plugins/test_listener.py | {
"start": 2978,
"end": 3495
} | class ____:
def __init__(self, *args, **kwargs):
self.submitted = False
self.succeeded = False
self.result = None
def submit(self, fn, /, *args, **kwargs):
self.submitted = True
try:
fn(*args, **kwargs)
self.succeeded = True
except Exception:
pass
return MagicMock()
def shutdown(self, *args, **kwargs):
print("Shutting down")
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 2 tests")
| MockExecutor |
python | mlflow__mlflow | mlflow/genai/scorers/base.py | {
"start": 3673,
"end": 43462
} | class ____(BaseModel):
name: str
aggregations: list[_AggregationType] | None = None
description: str | None = None
_cached_dump: dict[str, Any] | None = PrivateAttr(default=None)
_sampling_config: ScorerSamplingConfig | None = PrivateAttr(default=None)
_registered_backend: str | None = PrivateAttr(default=None)
@property
def is_session_level_scorer(self) -> bool:
"""Get whether this scorer is a session-level scorer.
Defaults to False. Child classes can override this property to return True
or compute the value dynamically based on their configuration.
"""
return False
@property
def sample_rate(self) -> float | None:
"""Get the sample rate for this scorer. Available when registered for monitoring."""
return self._sampling_config.sample_rate if self._sampling_config else None
@property
def filter_string(self) -> str | None:
"""Get the filter string for this scorer."""
return self._sampling_config.filter_string if self._sampling_config else None
@property
def status(self) -> ScorerStatus:
"""Get the status of this scorer, using only the local state."""
if self._registered_backend is None:
return ScorerStatus.UNREGISTERED
return ScorerStatus.STARTED if (self.sample_rate or 0) > 0 else ScorerStatus.STOPPED
def __repr__(self) -> str:
# Get the standard representation from the parent class
base_repr = super().__repr__()
filter_string = self.filter_string
if filter_string is not None:
filter_string = f"'{filter_string}'"
# Inject the property's value into the repr string
return f"{base_repr[:-1]}, sample_rate={self.sample_rate}, filter_string={filter_string})"
def model_dump(self, **kwargs) -> dict[str, Any]:
"""Override model_dump to include source code."""
# Return cached dump if available (prevents re-serialization issues with dynamic functions)
if self._cached_dump is not None:
return self._cached_dump
# Check if this is a decorator scorer
if not getattr(self, "_original_func", None):
# BuiltInScorer overrides `model_dump`, so this is neither a builtin scorer nor a
# decorator scorer
raise MlflowException.invalid_parameter_value(
f"Unsupported scorer type: {self.__class__.__name__}. "
f"Scorer serialization only supports:\n"
f"1. Builtin scorers (from mlflow.genai.scorers.builtin_scorers)\n"
f"2. Decorator-created scorers (using @scorer decorator)\n"
f"Direct subclassing of Scorer is not supported for serialization. "
f"Please use the @scorer decorator instead."
)
# Decorator scorer - extract and store source code
source_info = self._extract_source_code_info()
# Create serialized scorer with all fields at once
serialized = SerializedScorer(
name=self.name,
description=self.description,
aggregations=self.aggregations,
mlflow_version=mlflow.__version__,
serialization_version=_SERIALIZATION_VERSION,
call_source=source_info.get("call_source"),
call_signature=source_info.get("call_signature"),
original_func_name=source_info.get("original_func_name"),
)
self._cached_dump = asdict(serialized)
return self._cached_dump
def _extract_source_code_info(self) -> dict[str, str | None]:
"""Extract source code information for the original decorated function."""
from mlflow.genai.scorers.scorer_utils import extract_function_body
result = {"call_source": None, "call_signature": None, "original_func_name": None}
# Extract original function source
call_body, _ = extract_function_body(self._original_func)
result["call_source"] = call_body
result["original_func_name"] = self._original_func.__name__
# Store the signature of the original function
result["call_signature"] = str(inspect.signature(self._original_func))
return result
@classmethod
def model_validate(cls, obj: Any) -> "Scorer":
"""Override model_validate to reconstruct scorer from source code."""
# Handle SerializedScorer object
if isinstance(obj, SerializedScorer):
serialized = obj
# Handle dict object
elif isinstance(obj, dict):
# Parse the serialized data using our dataclass
try:
serialized = SerializedScorer(**obj)
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Failed to parse serialized scorer data: {e}"
)
else:
raise MlflowException.invalid_parameter_value(
f"Invalid scorer data: expected a SerializedScorer object or dictionary, "
f"got {type(obj).__name__}. Scorer data must be either a SerializedScorer object "
"or a dictionary containing serialized scorer information."
)
# Log version information for debugging
if serialized.mlflow_version:
_logger.debug(
f"Deserializing scorer created with MLflow version: {serialized.mlflow_version}"
)
if serialized.serialization_version:
_logger.debug(f"Scorer serialization version: {serialized.serialization_version}")
if serialized.builtin_scorer_class:
# Import here to avoid circular imports
from mlflow.genai.scorers.builtin_scorers import BuiltInScorer
return BuiltInScorer.model_validate(obj)
# Handle decorator scorers
elif serialized.call_source and serialized.call_signature and serialized.original_func_name:
return cls._reconstruct_decorator_scorer(serialized)
# Handle InstructionsJudge scorers
elif serialized.instructions_judge_pydantic_data is not None:
from mlflow.genai.judges.instructions_judge import InstructionsJudge
data = serialized.instructions_judge_pydantic_data
field_specs = {
"instructions": str,
"model": str,
}
errors = []
for field, expected_type in field_specs.items():
if field not in data:
errors.append(f"missing required field '{field}'")
elif not isinstance(data[field], expected_type):
actual_type = type(data[field]).__name__
errors.append(
f"field '{field}' must be {expected_type.__name__}, got {actual_type}"
)
if errors:
raise MlflowException.invalid_parameter_value(
f"Failed to deserialize InstructionsJudge scorer '{serialized.name}': "
f"{'; '.join(errors)}"
)
feedback_value_type = str # default to str
if "feedback_value_type" in data and data["feedback_value_type"] is not None:
feedback_value_type = InstructionsJudge._deserialize_feedback_value_type(
data["feedback_value_type"]
)
try:
return InstructionsJudge(
name=serialized.name,
description=serialized.description,
instructions=data["instructions"],
model=data["model"],
feedback_value_type=feedback_value_type,
# TODO: add aggregations here once we support boolean/numeric judge outputs
)
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Failed to create InstructionsJudge scorer '{serialized.name}': {e}"
)
# Invalid serialized data
else:
raise MlflowException.invalid_parameter_value(
f"Failed to load scorer '{serialized.name}'. The scorer is serialized in an "
f"unknown format that cannot be deserialized. Please make sure you are using "
f"a compatible MLflow version or recreate the scorer. "
f"Scorer was created with MLflow version: "
f"{serialized.mlflow_version or 'unknown'}, "
f"serialization version: {serialized.serialization_version or 'unknown'}, "
f"current MLflow version: {mlflow.__version__}."
)
@classmethod
def _reconstruct_decorator_scorer(cls, serialized: SerializedScorer) -> "Scorer":
from mlflow.genai.scorers.scorer_utils import recreate_function
# NB: Custom (@scorer) scorers use exec() during deserialization, which poses a code
# execution risk. Only allow loading in Databricks runtime environments where the
# execution environment is controlled.
if not is_in_databricks_runtime():
code_snippet = (
"\n\nfrom mlflow.genai import scorer\n\n"
f"@scorer\ndef {serialized.original_func_name}{serialized.call_signature}:\n"
)
for line in serialized.call_source.split("\n"):
code_snippet += f" {line}\n"
is_databricks_remote = is_databricks_uri(get_tracking_uri())
if is_databricks_remote:
error_msg = (
f"Loading custom scorer '{serialized.name}' via remote access is not "
"supported. You are connected to a Databricks workspace but executing code "
"outside of it. Custom scorers require arbitrary code execution during "
"deserialization and must be loaded within the Databricks workspace for "
"security reasons.\n\n"
"To use this scorer:\n"
"1. Run your code inside the Databricks workspace (notebook or job), or\n"
"2. Copy the code below and use it directly in your source code, or\n"
"3. Use built-in scorers or make_judge() scorers instead\n\n"
f"Registered scorer code:\n{code_snippet}"
)
else:
error_msg = (
f"Loading custom scorer '{serialized.name}' is not supported outside of "
"Databricks runtime environments due to security concerns. Custom scorers "
"require arbitrary code execution during deserialization.\n\n"
"To use this scorer, please:\n"
"1. Copy the code below and save it in your source code repository\n"
"2. Import and use it directly in your code, or\n"
"3. Use built-in scorers or make_judge() scorers instead\n\n"
f"Registered scorer code:\n{code_snippet}"
)
raise MlflowException.invalid_parameter_value(error_msg)
try:
recreated_func = recreate_function(
serialized.call_source, serialized.call_signature, serialized.original_func_name
)
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Failed to recreate function from source code. "
f"Scorer was created with MLflow version: "
f"{serialized.mlflow_version or 'unknown'}, "
f"serialization version: {serialized.serialization_version or 'unknown'}. "
f"Current MLflow version: {mlflow.__version__}. "
f"Error: {e}"
)
# Apply the scorer decorator to recreate the scorer
# Rather than serializing and deserializing the `run` method of `Scorer`, we recreate the
# Scorer using the original function and the `@scorer` decorator. This should be safe so
# long as `@scorer` is a stable API.
scorer_instance = scorer(
recreated_func,
name=serialized.name,
description=serialized.description,
aggregations=serialized.aggregations,
)
# Cache the serialized data to prevent re-serialization issues with dynamic functions
original_serialized_data = asdict(serialized)
object.__setattr__(scorer_instance, "_cached_dump", original_serialized_data)
return scorer_instance
def run(self, *, inputs=None, outputs=None, expectations=None, trace=None, session=None):
from mlflow.evaluation import Assessment as LegacyAssessment
merged = {
"inputs": inputs,
"outputs": outputs,
"expectations": expectations,
"trace": trace,
"session": session,
}
# Filter to only the parameters the function actually expects
sig = inspect.signature(self.__call__)
filtered = {k: v for k, v in merged.items() if k in sig.parameters}
result = self(**filtered)
if not (
# TODO: Replace 'Assessment' with 'Feedback' once we migrate from the agent eval harness
isinstance(result, (int, float, bool, str, Assessment, LegacyAssessment))
or (
isinstance(result, list)
and all(isinstance(item, (Assessment, LegacyAssessment)) for item in result)
)
# Allow None to represent an empty assessment from the scorer.
or result is None
):
if isinstance(result, list) and len(result) > 0:
result_type = "list[" + type(result[0]).__name__ + "]"
else:
result_type = type(result).__name__
raise MlflowException.invalid_parameter_value(
f"{self.name} must return one of int, float, bool, str, "
f"Feedback, or list[Feedback]. Got {result_type}"
)
if isinstance(result, Feedback) and result.name == DEFAULT_FEEDBACK_NAME:
# NB: Overwrite the returned feedback name to the scorer name. This is important
# so we show a consistent name for the feedback regardless of whether the scorer
# succeeds or fails. For example, let's say we have a scorer like this:
#
# @scorer
# def my_scorer():
# # do something
# ...
# return Feedback(value=True)
#
# If the scorer succeeds, the returned feedback name will be default "feedback".
# However, if the scorer fails, it doesn't return a Feedback object, and we
# only know the scorer name. To unify this behavior, we overwrite the feedback
# name to the scorer name in the happy path.
# This will not apply when the scorer returns a list of Feedback objects.
# or users explicitly specify the feedback name via Feedback constructor.
result.name = self.name
return result
def __call__(
self,
*,
inputs: Any = None,
outputs: Any = None,
expectations: dict[str, Any] | None = None,
trace: Trace | None = None,
session: list[Trace] | None = None,
) -> int | float | bool | str | Feedback | list[Feedback]:
"""
Implement the custom scorer's logic here.
The scorer will be called for each row in the input evaluation dataset.
Your scorer doesn't need to have all the parameters defined in the base
signature. You can define a custom scorer with only the parameters you need.
See the parameter details below for what values are passed for each parameter.
.. list-table::
:widths: 20 20 20
:header-rows: 1
* - Parameter
- Description
- Source
* - ``inputs``
- A single input to the target model/app.
- Derived from either dataset or trace.
* When the dataset contains ``inputs`` column, the value will be
passed as is.
* When traces are provided as evaluation dataset, this will be derived
from the ``inputs`` field of the trace (i.e. inputs captured as the
root span of the trace).
* - ``outputs``
- A single output from the target model/app.
- Derived from either dataset, trace, or output of ``predict_fn``.
* When the dataset contains ``outputs`` column, the value will be
passed as is.
* When ``predict_fn`` is provided, MLflow will make a prediction using the
``inputs`` and the ``predict_fn``, and pass the result as the ``outputs``.
* When traces are provided as evaluation dataset, this will be derived
from the ``response`` field of the trace (i.e. outputs captured as the
root span of the trace).
* - ``expectations``
- Ground truth or any expectation for each prediction, e.g. expected retrieved docs.
- Derived from either dataset or trace.
* When the dataset contains ``expectations`` column, the value will be
passed as is.
* When traces are provided as evaluation dataset, this will be a dictionary
that contains a set of assessments in the format of
[assessment name]: [assessment value].
* - ``trace``
- A trace object corresponding to the prediction for the row.
- Specified as a ``trace`` column in the dataset, or generated during the prediction.
* - ``session``
- A list of trace objects belonging to the same conversation session.
- Specify this parameter only for session_level scorers
(scorers with ``is_session_level_scorer = True``).
* Only traces with the same ``mlflow.trace.session`` metadata value can be passed in
this parameter, otherwise an error will be raised.
Example:
.. code-block:: python
class NotEmpty(BaseScorer):
name = "not_empty"
def __call__(self, *, outputs) -> bool:
return outputs != ""
class ExactMatch(BaseScorer):
name = "exact_match"
def __call__(self, *, outputs, expectations) -> bool:
return outputs == expectations["expected_response"]
class NumToolCalls(BaseScorer):
name = "num_tool_calls"
def __call__(self, *, trace) -> int:
spans = trace.search_spans(name="tool_call")
return len(spans)
# Use the scorer in an evaluation
mlflow.genai.evaluate(
data=data,
scorers=[NotEmpty(), ExactMatch(), NumToolCalls()],
)
"""
raise NotImplementedError("Implementation of __call__ is required for Scorer class")
@property
def kind(self) -> ScorerKind:
return ScorerKind.CLASS
def register(self, *, name: str | None = None, experiment_id: str | None = None) -> "Scorer":
"""
Register this scorer with the MLflow server.
This method registers the scorer for use with automatic trace evaluation in the
specified experiment. Once registered, the scorer can be started to begin
evaluating traces automatically.
Args:
name: Optional registered name for the scorer. If not provided, the current `name`
property value will be used as a registered name.
experiment_id: The ID of the MLflow experiment to register the scorer for.
If None, uses the currently active experiment.
Returns:
A new Scorer instance with server registration information.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.scorers import RelevanceToQuery
# Register a built-in scorer
mlflow.set_experiment("my_genai_app")
registered_scorer = RelevanceToQuery().register(name="relevance_scorer")
print(f"Registered scorer: {registered_scorer.name}")
# Register a custom scorer
from mlflow.genai.scorers import scorer
@scorer
def custom_length_check(outputs) -> bool:
return len(outputs) > 100
registered_custom = custom_length_check.register(
name="output_length_checker", experiment_id="12345"
)
"""
# Get the current tracking store
from mlflow.genai.scorers.registry import DatabricksStore, _get_scorer_store
self._check_can_be_registered()
store = _get_scorer_store()
# Create a new scorer instance
new_scorer = self._create_copy()
# If name is provided, update the copy's name
if name:
new_scorer.name = name
# Update cached dump to reflect the new name
if new_scorer._cached_dump is not None:
new_scorer._cached_dump["name"] = name
store.register_scorer(experiment_id, new_scorer)
if isinstance(store, DatabricksStore):
new_scorer._registered_backend = "databricks"
else:
new_scorer._registered_backend = "tracking"
return new_scorer
def start(
self,
*,
name: str | None = None,
experiment_id: str | None = None,
sampling_config: ScorerSamplingConfig,
) -> "Scorer":
"""
Start registered scoring with the specified sampling configuration.
This method activates automatic trace evaluation for the scorer. The scorer will
evaluate traces based on the provided sampling configuration, including the
sample rate and optional filter criteria.
Args:
name: Optional scorer name. If not provided, uses the scorer's registered
name or default name.
experiment_id: The ID of the MLflow experiment containing the scorer.
If None, uses the currently active experiment.
sampling_config: Configuration object containing:
- sample_rate: Fraction of traces to evaluate (0.0 to 1.0). Required.
- filter_string: Optional MLflow search_traces compatible filter string.
Returns:
A new Scorer instance with updated sampling configuration.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.scorers import RelevanceToQuery, ScorerSamplingConfig
# Start scorer with 50% sampling rate
mlflow.set_experiment("my_genai_app")
scorer = RelevanceToQuery().register()
active_scorer = scorer.start(sampling_config=ScorerSamplingConfig(sample_rate=0.5))
print(f"Scorer is evaluating {active_scorer.sample_rate * 100}% of traces")
# Start scorer with filter to only evaluate specific traces
filtered_scorer = scorer.start(
sampling_config=ScorerSamplingConfig(
sample_rate=1.0, filter_string="YOUR_FILTER_STRING"
)
)
"""
from mlflow.genai.scorers.registry import DatabricksStore
from mlflow.tracking._tracking_service.utils import get_tracking_uri
from mlflow.utils.uri import is_databricks_uri
if not is_databricks_uri(get_tracking_uri()):
raise MlflowException(
"Scheduling scorers is only supported by Databricks tracking URI."
)
self._check_can_be_registered()
if sampling_config.sample_rate is not None and sampling_config.sample_rate <= 0:
raise MlflowException.invalid_parameter_value(
"When starting a scorer, provided sample rate must be greater than 0"
)
scorer_name = name or self.name
# Update the scorer on the server
return DatabricksStore.update_registered_scorer(
name=scorer_name,
scorer=self,
sample_rate=sampling_config.sample_rate,
filter_string=sampling_config.filter_string,
experiment_id=experiment_id,
)
def update(
self,
*,
name: str | None = None,
experiment_id: str | None = None,
sampling_config: ScorerSamplingConfig,
) -> "Scorer":
"""
Update the sampling configuration for this scorer.
This method modifies the sampling rate and/or filter criteria for an already
registered scorer. It can be used to dynamically adjust how many traces are
evaluated or change the filtering criteria without stopping and restarting
the scorer.
Args:
name: Optional scorer name. If not provided, uses the scorer's registered name
or default name.
experiment_id: The ID of the MLflow experiment containing the scorer.
If None, uses the currently active experiment.
sampling_config: Configuration object containing:
- sample_rate: New fraction of traces to evaluate (0.0 to 1.0). Optional.
- filter_string: New MLflow search_traces compatible filter string. Optional.
Returns:
A new Scorer instance with updated configuration.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.scorers import RelevanceToQuery, ScorerSamplingConfig
# Start scorer with initial configuration
mlflow.set_experiment("my_genai_app")
scorer = RelevanceToQuery().register()
active_scorer = scorer.start(sampling_config=ScorerSamplingConfig(sample_rate=0.1))
# Update to increase sampling rate during high traffic
updated_scorer = active_scorer.update(
sampling_config=ScorerSamplingConfig(sample_rate=0.5)
)
print(f"Updated sample rate: {updated_scorer.sample_rate}")
# Update to add filtering criteria
filtered_scorer = updated_scorer.update(
sampling_config=ScorerSamplingConfig(filter_string="YOUR_FILTER_STRING")
)
print(f"Added filter: {filtered_scorer.filter_string}")
"""
from mlflow.genai.scorers.registry import DatabricksStore
from mlflow.tracking._tracking_service.utils import get_tracking_uri
from mlflow.utils.uri import is_databricks_uri
if not is_databricks_uri(get_tracking_uri()):
raise MlflowException(
"Updating scheduled scorers is only supported by Databricks tracking URI."
)
self._check_can_be_registered()
scorer_name = name or self.name
# Update the scorer on the server
return DatabricksStore.update_registered_scorer(
name=scorer_name,
scorer=self,
sample_rate=sampling_config.sample_rate,
filter_string=sampling_config.filter_string,
experiment_id=experiment_id,
)
def stop(self, *, name: str | None = None, experiment_id: str | None = None) -> "Scorer":
"""
Stop registered scoring by setting sample rate to 0.
This method deactivates automatic trace evaluation for the scorer while keeping
the scorer registered. The scorer can be restarted later using the start() method.
Args:
name: Optional scorer name. If not provided, uses the scorer's registered name
or default name.
experiment_id: The ID of the MLflow experiment containing the scorer.
If None, uses the currently active experiment.
Returns:
A new Scorer instance with sample rate set to 0.
Example:
.. code-block:: python
import mlflow
from mlflow.genai.scorers import RelevanceToQuery, ScorerSamplingConfig
# Start and then stop a scorer
mlflow.set_experiment("my_genai_app")
scorer = RelevanceToQuery().register()
active_scorer = scorer.start(sampling_config=ScorerSamplingConfig(sample_rate=0.5))
print(f"Scorer is active: {active_scorer.sample_rate > 0}")
# Stop the scorer
stopped_scorer = active_scorer.stop()
print(f"Scorer is active: {stopped_scorer.sample_rate > 0}")
# The scorer remains registered and can be restarted later
restarted_scorer = stopped_scorer.start(
sampling_config=ScorerSamplingConfig(sample_rate=0.3)
)
"""
from mlflow.tracking._tracking_service.utils import get_tracking_uri
from mlflow.utils.uri import is_databricks_uri
if not is_databricks_uri(get_tracking_uri()):
raise MlflowException(
"Stopping scheduled scorers is only supported by Databricks tracking URI."
)
self._check_can_be_registered()
scorer_name = name or self.name
return self.update(
name=scorer_name,
experiment_id=experiment_id,
sampling_config=ScorerSamplingConfig(sample_rate=0.0),
)
def _create_copy(self) -> "Scorer":
"""
Create a copy of this scorer instance.
"""
self._check_can_be_registered(
error_message="Scorer must be a builtin or decorator scorer to be copied."
)
copy = self.model_copy(deep=True)
# Duplicate the cached dump so modifications to the copy don't affect the original
if self._cached_dump is not None:
object.__setattr__(copy, "_cached_dump", dict(self._cached_dump))
return copy
def _check_can_be_registered(self, error_message: str | None = None) -> None:
from mlflow.genai.scorers.registry import DatabricksStore, _get_scorer_store
if self.kind not in _ALLOWED_SCORERS_FOR_REGISTRATION:
if error_message is None:
error_message = (
"Scorer must be a builtin or decorator scorer to be registered. "
f"Got {self.kind}."
)
raise MlflowException.invalid_parameter_value(error_message)
# NB: Custom (@scorer) scorers use exec() during deserialization, which poses a code
# execution risk. Only allow registration when using Databricks tracking URI.
# Registration itself is safe (just stores code), but we restrict it to Databricks
# to ensure loaded scorers can only be executed in controlled environments.
if self.kind == ScorerKind.DECORATOR and not is_databricks_uri(get_tracking_uri()):
raise MlflowException.invalid_parameter_value(
"Custom scorer registration (using @scorer decorator) is not supported "
"outside of Databricks tracking environments due to security concerns. "
"Custom scorers require arbitrary code execution during deserialization.\n\n"
"To use custom scorers:\n"
"1. Configure MLflow to use a Databricks tracking URI, or\n"
"2. Manage your custom scorer code in a source code repository "
"(e.g., GitHub) and import it directly, or\n"
"3. Use built-in scorers or make_judge() scorers instead"
)
store = _get_scorer_store()
if (
isinstance(store, DatabricksStore)
and (model := getattr(self, "model", None))
and not model.startswith("databricks")
):
raise MlflowException.invalid_parameter_value(
"The scorer's judge model must use Databricks as a model provider "
"in order to be registered or updated. Please use the default judge model or "
"specify a model value starting with `databricks:/`. "
f"Got {model}."
)
def scorer(
func=None,
*,
name: str | None = None,
description: str | None = None,
aggregations: list[_AggregationType] | None = None,
):
"""
A decorator to define a custom scorer that can be used in ``mlflow.genai.evaluate()``.
The scorer function should take in a **subset** of the following parameters:
.. list-table::
:widths: 20 20 20
:header-rows: 1
* - Parameter
- Description
- Source
* - ``inputs``
- A single input to the target model/app.
- Derived from either dataset or trace.
* When the dataset contains ``inputs`` column, the value will be passed as is.
* When traces are provided as evaluation dataset, this will be derived
from the ``inputs`` field of the trace (i.e. inputs captured as the
root span of the trace).
* - ``outputs``
- A single output from the target model/app.
- Derived from either dataset, trace, or output of ``predict_fn``.
* When the dataset contains ``outputs`` column, the value will be passed as is.
* When ``predict_fn`` is provided, MLflow will make a prediction using the
``inputs`` and the ``predict_fn`` and pass the result as the ``outputs``.
* When traces are provided as evaluation dataset, this will be derived
from the ``response`` field of the trace (i.e. outputs captured as the
root span of the trace).
* - ``expectations``
- Ground truth or any expectation for each prediction e.g., expected retrieved docs.
- Derived from either dataset or trace.
* When the dataset contains ``expectations`` column, the value will be passed as is.
* When traces are provided as evaluation dataset, this will be a dictionary
that contains a set of assessments in the format of
[assessment name]: [assessment value].
* - ``trace``
- A trace object corresponding to the prediction for the row.
- Specified as a ``trace`` column in the dataset, or generated during the prediction.
The scorer function should return one of the following:
* A boolean value
* An integer value
* A float value
* A string value
* A single :class:`~mlflow.entities.Feedback` object
* A list of :class:`~mlflow.entities.Feedback` objects
.. note::
The metric name will be determined by the scorer function's name or a custom name
specified in the `name` parameter for the scorer.
Args:
func: The scorer function to be decorated.
name: The name of the scorer.
description: A description of what the scorer evaluates.
aggregations: A list of aggregation functions to apply to the scorer's output.
The aggregation functions can be either a string or a callable.
* If a string, it must be one of `["min", "max", "mean", "median", "variance", "p90"]`.
* If a callable, it must take a list of values and return a single value.
By default, "mean" is used as the aggregation function.
Example:
.. code-block:: python
import json
from mlflow.genai.scorers import scorer
from mlflow.entities import AssessmentSource, Feedback
# Basic scorers that returns primitive values
@scorer
def not_empty(outputs) -> bool:
return outputs != ""
@scorer
def exact_match(outputs, expectations) -> bool:
return outputs == expectations["expected_response"]
@scorer
def num_tool_calls(trace) -> int:
spans = trace.search_spans(name="tool_call")
return len(spans)
# Use `Feedback` object to return additional information about the scorer's
# result, such as a rationale for the score.
@scorer
def harmfulness(outputs) -> Feedback:
import openai
prompt = f'''
Judge if the following text is harmful or not.
Text:
{outputs}
Return the answer in a JSON object with the following format:
{{
"harmful": true
"reason": "The text contains harmful content"
}}
Do not output any other characters than the json object.
'''
response = openai.OpenAI().chat.completions.create(
model="o4-mini",
messages=[{"role": "user", "content": prompt}],
)
payload = json.loads(response.choices[0].message.content)
return Feedback(
value=payload["harmful"],
rationale=payload["reason"],
source=AssessmentSource(
source_type="LLM_JUDGE",
source_id="openai:/o4-mini",
),
)
# Use the scorer in an evaluation
mlflow.genai.evaluate(
data=data,
scorers=[not_empty, exact_match, num_tool_calls, harmfulness],
)
"""
if func is None:
return functools.partial(
scorer, name=name, description=description, aggregations=aggregations
)
class CustomScorer(Scorer):
# Store reference to the original function
_original_func: Callable[..., Any] | None = PrivateAttr(default=None)
def __init__(self, **data):
super().__init__(**data)
# Set the original function reference
# Use object.__setattr__ to bypass Pydantic's attribute handling for private attributes
# during model initialization, as direct assignment (self._original_func = func) may be
# ignored or fail in this context
object.__setattr__(self, "_original_func", func)
def __call__(self, *args, **kwargs):
return func(*args, **kwargs)
@property
def kind(self) -> ScorerKind:
return ScorerKind.DECORATOR
# Update the __call__ method's signature to match the original function
# but add 'self' as the first parameter. This is required for MLflow to
# pass the correct set of parameters to the scorer.
signature = inspect.signature(func)
params = list(signature.parameters.values())
new_params = [inspect.Parameter("self", inspect.Parameter.POSITIONAL_OR_KEYWORD)] + params
new_signature = signature.replace(parameters=new_params)
CustomScorer.__call__.__signature__ = new_signature
return CustomScorer(
name=name or func.__name__,
description=description,
aggregations=aggregations,
)
| Scorer |
python | scipy__scipy | scipy/optimize/tests/test_minimize_constrained.py | {
"start": 7490,
"end": 8014
} | class ____(Rosenbrock):
"""Rosenbrock subject to inequality constraints.
The following optimization problem:
minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
subject to: -2 <= x[0] <= 0
0 <= x[1] <= 2
Taken from matlab ``fmincon`` documentation.
"""
def __init__(self, random_state=0):
Rosenbrock.__init__(self, 2, random_state)
self.x0 = [-0.2, 0.2]
self.x_opt = None
self.bounds = Bounds([-2, 0], [0, 2])
| BoundedRosenbrock |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 10390,
"end": 12835
} | class ____:
"""Represents a typed variable."""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
"""Return a new variable.
Parameters
==========
name : Symbol or MatrixSymbol
datatype : optional
When not given, the data type will be guessed based on the
assumptions on the symbol argument.
dimensions : sequence containing tuples, optional
If present, the argument is interpreted as an array, where this
sequence of tuples specifies (lower, upper) bounds for each
index of the array.
precision : int, optional
Controls the precision of floating point constants.
"""
if not isinstance(name, (Symbol, MatrixSymbol)):
raise TypeError("The first argument must be a SymPy symbol.")
if datatype is None:
datatype = get_default_datatype(name)
elif not isinstance(datatype, DataType):
raise TypeError("The (optional) `datatype' argument must be an "
"instance of the DataType class.")
if dimensions and not isinstance(dimensions, (tuple, list)):
raise TypeError(
"The dimensions argument must be a sequence of tuples")
self._name = name
self._datatype = {
'C': datatype.cname,
'FORTRAN': datatype.fname,
'JULIA': datatype.jlname,
'OCTAVE': datatype.octname,
'PYTHON': datatype.pyname,
'RUST': datatype.rsname,
}
self.dimensions = dimensions
self.precision = precision
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.name)
__repr__ = __str__
@property
def name(self):
return self._name
def get_datatype(self, language):
"""Returns the datatype string for the requested language.
Examples
========
>>> from sympy import Symbol
>>> from sympy.utilities.codegen import Variable
>>> x = Variable(Symbol('x'))
>>> x.get_datatype('c')
'double'
>>> x.get_datatype('fortran')
'REAL*8'
"""
try:
return self._datatype[language.upper()]
except KeyError:
raise CodeGenError("Has datatypes for languages: %s" %
", ".join(self._datatype))
| Variable |
python | django__django | tests/auth_tests/models/custom_user.py | {
"start": 2527,
"end": 3559
} | class ____:
"""
A context manager to temporarily remove the groups and user_permissions M2M
fields from the AbstractUser class, so they don't clash with the
related_name sets.
"""
def __enter__(self):
self._old_au_local_m2m = AbstractUser._meta.local_many_to_many
self._old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
def __exit__(self, exc_type, exc_value, traceback):
AbstractUser._meta.local_many_to_many = self._old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = self._old_pm_local_m2m
| RemoveGroupsAndPermissions |
python | huggingface__transformers | src/transformers/generation/utils.py | {
"start": 7131,
"end": 10796
} | class ____(ModelOutput):
"""
Outputs of encoder-decoder generation models, when using non-beam methods.
Args:
sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):
Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True`):
Returns the model cache, used to speed up decoding. Different models have a different cache format, check
the model's documentation. Usually, a [`~cache_utils.Cache`] instance.
"""
sequences: torch.LongTensor
scores: tuple[torch.FloatTensor] | None = None
logits: tuple[torch.FloatTensor] | None = None
encoder_attentions: tuple[torch.FloatTensor] | None = None
encoder_hidden_states: tuple[torch.FloatTensor] | None = None
decoder_attentions: tuple[tuple[torch.FloatTensor]] | None = None
cross_attentions: tuple[tuple[torch.FloatTensor]] | None = None
decoder_hidden_states: tuple[tuple[torch.FloatTensor]] | None = None
past_key_values: Cache | None = None
@dataclass
| GenerateEncoderDecoderOutput |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ext.py | {
"start": 13608,
"end": 14436
} | class ____(_regconfig_fn):
"""The PostgreSQL ``phraseto_tsquery`` SQL function.
This function applies automatic casting of the REGCONFIG argument
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
and applies a return type of :class:`_postgresql.TSQUERY`.
Assuming the PostgreSQL dialect has been imported, either by invoking
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
engine using ``create_engine("postgresql...")``,
:class:`_postgresql.phraseto_tsquery` will be used automatically when
invoking ``sqlalchemy.func.phraseto_tsquery()``, ensuring the correct
argument and return type handlers are used at compile and execution time.
.. versionadded:: 2.0.0rc1
"""
inherit_cache = True
type = types.TSQUERY
| phraseto_tsquery |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 91600,
"end": 92704
} | class ____(APITestCase):
def setUp(self):
user = self.create_user(is_staff=False, is_superuser=False)
self.org = self.create_organization()
self.org.save()
team = self.create_team(organization=self.org)
self.project = self.create_project(name="foo", organization=self.org, teams=[team])
self.create_member(teams=[team], user=user, organization=self.org)
self.login_as(user=user)
@cached_property
def url(self):
raise NotImplementedError(f"implement for {type(self).__module__}.{type(self).__name__}")
def assert_commit(self, commit, repo_id, key, author_id, message):
assert commit.organization_id == self.org.id
assert commit.repository_id == repo_id
assert commit.key == key
assert commit.author_id == author_id
assert commit.message == message
def assert_file_change(self, file_change, type, filename, commit_id):
assert file_change.type == type
assert file_change.filename == filename
assert file_change.commit_id == commit_id
| ReleaseCommitPatchTest |
python | pypa__hatch | backend/src/hatchling/builders/hooks/version.py | {
"start": 182,
"end": 2416
} | class ____(BuildHookInterface):
PLUGIN_NAME = "version"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__config_path: str | None = None
self.__config_template: str | None = None
self.__config_pattern: str | bool | None = None
@property
def config_path(self) -> str:
if self.__config_path is None:
path = self.config.get("path", "")
if not isinstance(path, str):
message = f"Option `path` for build hook `{self.PLUGIN_NAME}` must be a string"
raise TypeError(message)
if not path:
message = f"Option `path` for build hook `{self.PLUGIN_NAME}` is required"
raise ValueError(message)
self.__config_path = path
return self.__config_path
@property
def config_template(self) -> str:
if self.__config_template is None:
template = self.config.get("template", "")
if not isinstance(template, str):
message = f"Option `template` for build hook `{self.PLUGIN_NAME}` must be a string"
raise TypeError(message)
self.__config_template = template
return self.__config_template
@property
def config_pattern(self) -> str | bool:
if self.__config_pattern is None:
pattern = self.config.get("pattern", "")
if not isinstance(pattern, (str, bool)):
message = f"Option `pattern` for build hook `{self.PLUGIN_NAME}` must be a string or a boolean"
raise TypeError(message)
self.__config_pattern = pattern
return self.__config_pattern
def initialize(
self,
version: str, # noqa: ARG002
build_data: dict[str, Any],
) -> None:
version_file = VersionFile(self.root, self.config_path)
if self.config_pattern:
version_file.read(pattern=self.config_pattern)
version_file.set_version(self.metadata.version)
else:
version_file.write(self.metadata.version, self.config_template)
build_data["artifacts"].append(f"/{self.config_path}")
| VersionBuildHook |
python | numba__llvmlite | llvmlite/binding/newpassmanagers.py | {
"start": 19573,
"end": 19785
} | class ____(ffi.ObjectRef):
def __init__(self):
super().__init__(ffi.lib.LLVMPY_CreateTimePassesHandler())
def _dispose(self):
ffi.lib.LLVMPY_DisposeTimePassesHandler(self)
| TimePassesHandler |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_escalating.py | {
"start": 506,
"end": 5558
} | class ____(SlackActivityNotificationTest, PerformanceIssueTestCase):
def create_notification(self, group):
return EscalatingActivityNotification(
Activity(
project=self.project,
group=group,
user_id=self.user.id,
type=ActivityType.SET_ESCALATING,
data={"forecast": 100},
)
)
def test_escalating_block(self) -> None:
"""
Test that a Slack message is sent with the expected payload when an issue escalates
and block kit is enabled.
"""
with self.tasks():
self.create_notification(self.group).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == "Issue marked as escalating"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
blocks[1]["text"]["text"]
== f"<http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=escalating_activity-slack¬ification_uuid={notification_uuid}|*{self.group.title}*> \nSentry flagged this issue as escalating because over 100 events happened in an hour."
)
assert (
blocks[2]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=escalating_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_PERF_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_escalating_performance_issue_block(self, occurrence: mock.MagicMock) -> None:
"""
Test that a Slack message is sent with the expected payload when a performance issue escalates
and block kit is enabled.
"""
event = self.create_performance_issue()
assert event.group is not None
with self.tasks():
self.create_notification(event.group).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == "Issue marked as escalating"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
blocks[1]["text"]["text"]
== f"<http://testserver/organizations/{self.organization.slug}/issues/{event.group.id}/?referrer=escalating_activity-slack¬ification_uuid={notification_uuid}|*{event.group.title}*> \nSentry flagged this issue as escalating because over 100 events happened in an hour."
)
assert (
blocks[2]["elements"][0]["text"]
== f"{self.project.slug} | production | <http://testserver/settings/account/notifications/workflow/?referrer=escalating_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
@mock.patch(
"sentry.services.eventstore.models.GroupEvent.occurrence",
return_value=TEST_ISSUE_OCCURRENCE,
new_callable=mock.PropertyMock,
)
def test_escalating_generic_issue_block(self, occurrence: mock.MagicMock) -> None:
"""
Test that a Slack message is sent with the expected payload when a generic issue type escalates
and block kit is enabled.
"""
event = self.store_event(
data={"message": "Hellboy's world", "level": "error"}, project_id=self.project.id
)
group_event = event.for_group(event.groups[0])
with self.tasks():
self.create_notification(group_event.group).send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert fallback_text == "Issue marked as escalating"
assert blocks[0]["text"]["text"] == fallback_text
notification_uuid = self.get_notification_uuid(blocks[1]["text"]["text"])
assert (
blocks[1]["text"]["text"]
== f"<http://testserver/organizations/{self.organization.slug}/issues/{group_event.group.id}/?referrer=escalating_activity-slack¬ification_uuid={notification_uuid}|*{TEST_ISSUE_OCCURRENCE.issue_title}*> \nSentry flagged this issue as escalating because over 100 events happened in an hour."
)
assert (
blocks[2]["elements"][0]["text"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=escalating_activity-slack-user¬ification_uuid={notification_uuid}&organizationId={self.organization.id}|Notification Settings>"
)
| SlackRegressionNotificationTest |
python | huggingface__transformers | src/transformers/models/internvl/modeling_internvl.py | {
"start": 19523,
"end": 20484
} | class ____(nn.Module):
def __init__(self, config: InternVLConfig):
super().__init__()
self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size * int(1 / config.downsample_ratio) ** 2)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * int(1 / config.downsample_ratio) ** 2, config.text_config.hidden_size
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size)
def forward(self, image_features):
hidden_states = self.layer_norm(image_features)
hidden_states = self.linear_1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Base class for InternVL outputs, with hidden states and attentions.
"""
)
| InternVLMultiModalProjector |
python | getsentry__sentry | src/sentry/management/commands/createsuperuser.py | {
"start": 95,
"end": 359
} | class ____(DjangoCommand):
help = "Performs any pending database migrations and upgrades"
def handle(self, **options):
from sentry.runner import call_command
call_command("sentry.runner.commands.createuser.createuser", superuser=True)
| Command |
python | jazzband__django-model-utils | model_utils/managers.py | {
"start": 13098,
"end": 13192
} | class ____(SoftDeletableQuerySetMixin[ModelT], QuerySet[ModelT]):
pass
| SoftDeletableQuerySet |
python | mitmproxy__pdoc | test/testdata/flavors_numpy.py | {
"start": 5314,
"end": 6494
} | class ____(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note
----
Do not include the `self` parameter in the ``Parameters`` section.
Parameters
----------
msg : str
Human readable string describing the exception.
code : :obj:`int`, optional
Numeric error code.
Attributes
----------
msg : str
Human readable string describing the exception.
code : int
Numeric error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
def add_note(self, note: str):
"""This method is present on Python 3.11+ and manually added here so that snapshots are consistent."""
def with_traceback(self, object, /):
"""This method has a changed docstring in Python 3.13+ and is manually added here so that snapshots are consistent."""
| ExampleError |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 115860,
"end": 119903
} | class ____(Qwen3OmniMoePreTrainedModel):
config_class = Qwen3OmniMoeTalkerCodePredictorConfig
base_model_prefix = "talker.code_predictor.model"
_can_record_outputs = {
"attentions": Qwen3OmniMoeTalkerCodePredictorAttention,
"hidden_states": Qwen3OmniMoeTalkerCodePredictorDecoderLayer,
}
def __init__(self, config: Qwen3OmniMoeTalkerCodePredictorConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.layers = nn.ModuleList(
[
Qwen3OmniMoeTalkerCodePredictorDecoderLayer(config, layer_idx)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = Qwen3OmniMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Qwen3OmniMoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
self.codec_embedding = nn.ModuleList(
[nn.Embedding(config.vocab_size, config.hidden_size) for _ in range(config.num_code_groups - 1)]
)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if input_ids is not None:
raise ValueError("`input_ids` is expected to be `None`")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
def get_input_embeddings(self):
return self.codec_embedding
@auto_docstring
| Qwen3OmniMoeTalkerCodePredictorModel |
python | kamyu104__LeetCode-Solutions | Python/number-of-distinct-averages.py | {
"start": 66,
"end": 426
} | class ____(object):
def distinctAverages(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lookup = set()
nums.sort()
left, right = 0, len(nums)-1
while left < right:
lookup.add(nums[left]+nums[right])
left, right = left+1, right-1
return len(lookup)
| Solution |
python | facebook__pyre-check | client/commands/infer.py | {
"start": 1363,
"end": 1987
} | class ____:
"""
Data structure for configuration options the backend infer command can recognize.
Need to keep in sync with `source/command/inferCommand.ml`
"""
base_arguments: backend_arguments.BaseArguments
paths_to_modify: Optional[Set[Path]] = None
def serialize(self) -> Dict[str, Any]:
return {
**self.base_arguments.serialize(),
**(
{}
if self.paths_to_modify is None
else {"paths_to_modify": [str(path) for path in self.paths_to_modify]}
),
}
@dataclasses.dataclass(frozen=True)
| Arguments |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 36776,
"end": 37253
} | class ____(InlineProcessor):
""" Return a link Element given an auto-link (`<http://example/com>`). """
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]:
""" Return an `a` [`Element`][xml.etree.ElementTree.Element] of `group(1)`. """
el = etree.Element("a")
el.set('href', self.unescape(m.group(1)))
el.text = util.AtomicString(m.group(1))
return el, m.start(0), m.end(0)
| AutolinkInlineProcessor |
python | modin-project__modin | modin/core/dataframe/algebra/tree_reduce.py | {
"start": 1129,
"end": 3212
} | class ____(Operator):
"""Builder class for TreeReduce operator."""
@classmethod
def register(
cls,
map_function: Optional[Callable[..., pandas.DataFrame]],
reduce_function: Optional[Callable[..., pandas.Series]] = None,
axis: Optional[int] = None,
compute_dtypes: Optional[Callable[..., DtypeObj]] = None,
) -> Callable[..., PandasQueryCompiler]:
"""
Build TreeReduce operator.
Parameters
----------
map_function : callable(pandas.DataFrame, *args, **kwargs) -> pandas.DataFrame
Source map function.
reduce_function : callable(pandas.DataFrame, *args, **kwargs) -> pandas.Series, optional
Source reduce function.
axis : int, optional
Specifies axis to apply function along.
compute_dtypes : callable(pandas.Series, *func_args, **func_kwargs) -> DtypeObj, optional
Callable for computing dtypes.
Returns
-------
callable
Function that takes query compiler and executes passed functions
with TreeReduce algorithm.
"""
if reduce_function is None:
reduce_function = map_function
def caller(
query_compiler: PandasQueryCompiler, *args: tuple, **kwargs: dict
) -> PandasQueryCompiler:
"""Execute TreeReduce function against passed query compiler."""
_axis = kwargs.get("axis") if axis is None else axis
new_dtypes = None
if compute_dtypes and query_compiler.frame_has_materialized_dtypes:
new_dtypes = str(compute_dtypes(query_compiler.dtypes, *args, **kwargs))
return query_compiler.__constructor__(
query_compiler._modin_frame.tree_reduce(
cls.validate_axis(_axis),
lambda x: map_function(x, *args, **kwargs),
lambda y: reduce_function(y, *args, **kwargs),
dtypes=new_dtypes,
)
)
return caller
| TreeReduce |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 1160,
"end": 1239
} | class ____(Frozen):
y = attr.ib()
@attr.s(frozen=True, slots=False)
| SubFrozen |
python | facebook__pyre-check | client/libcst_vendored_visitors/_apply_type_annotations.py | {
"start": 18331,
"end": 43333
} | class ____(ContextAwareTransformer):
"""
Apply type annotations to a source module using the given stub mdules.
You can also pass in explicit annotations for functions and attributes and
pass in new class definitions that need to be added to the source module.
This is one of the transforms that is available automatically to you when
running a codemod. To use it in this manner, import
:class:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor` and then call
the static
:meth:`~libcst.codemod.visitors.ApplyTypeAnnotationsVisitor.store_stub_in_context`
method, giving it the current context (found as ``self.context`` for all
subclasses of :class:`~libcst.codemod.Codemod`), the stub module from which
you wish to add annotations.
For example, you can store the type annotation ``int`` for ``x`` using::
stub_module = parse_module("x: int = ...")
ApplyTypeAnnotationsVisitor.store_stub_in_context(self.context, stub_module)
You can apply the type annotation using::
source_module = parse_module("x = 1")
ApplyTypeAnnotationsVisitor.transform_module(source_module)
This will produce the following code::
x: int = 1
If the function or attribute already has a type annotation, it will not be
overwritten.
To overwrite existing annotations when applying annotations from a stub,
use the keyword argument ``overwrite_existing_annotations=True`` when
constructing the codemod or when calling ``store_stub_in_context``.
"""
CONTEXT_KEY = "ApplyTypeAnnotationsVisitor"
def __init__(
self,
context: CodemodContext,
annotations: Optional[Annotations] = None,
overwrite_existing_annotations: bool = False,
use_future_annotations: bool = False,
strict_posargs_matching: bool = True,
strict_annotation_matching: bool = False,
) -> None:
super().__init__(context)
# Qualifier for storing the canonical name of the current function.
self.qualifier: List[str] = []
self.annotations: Annotations = (
Annotations.empty() if annotations is None else annotations
)
self.toplevel_annotations: Dict[str, cst.Annotation] = {}
self.visited_classes: Set[str] = set()
self.overwrite_existing_annotations = overwrite_existing_annotations
self.use_future_annotations = use_future_annotations
self.strict_posargs_matching = strict_posargs_matching
self.strict_annotation_matching = strict_annotation_matching
# We use this to determine the end of the import block so that we can
# insert top-level annotations.
self.import_statements: List[cst.ImportFrom] = []
# We use this to report annotations added, as well as to determine
# whether to abandon the codemod in edge cases where we may have
# only made changes to the imports.
self.annotation_counts: AnnotationCounts = AnnotationCounts()
# We use this to collect typevars, to avoid importing existing ones from the pyi file
self.current_assign: Optional[cst.Assign] = None
self.typevars: Dict[str, cst.Assign] = {}
# Global variables and classes defined on the toplevel of the target module.
# Used to help determine which names we need to check are in scope, and add
# quotations to avoid undefined forward references in type annotations.
self.global_names: Set[str] = set()
@staticmethod
def store_stub_in_context(
context: CodemodContext,
stub: cst.Module,
overwrite_existing_annotations: bool = False,
use_future_annotations: bool = False,
strict_posargs_matching: bool = True,
strict_annotation_matching: bool = False,
) -> None:
"""
Store a stub module in the :class:`~libcst.codemod.CodemodContext` so
that type annotations from the stub can be applied in a later
invocation of this class.
If the ``overwrite_existing_annotations`` flag is ``True``, the
codemod will overwrite any existing annotations.
If you call this function multiple times, only the last values of
``stub`` and ``overwrite_existing_annotations`` will take effect.
"""
context.scratch[ApplyTypeAnnotationsVisitor.CONTEXT_KEY] = (
stub,
overwrite_existing_annotations,
use_future_annotations,
strict_posargs_matching,
strict_annotation_matching,
)
def transform_module_impl(
self,
tree: cst.Module,
) -> cst.Module:
"""
Collect type annotations from all stubs and apply them to ``tree``.
Gather existing imports from ``tree`` so that we don't add duplicate imports.
Gather global names from ``tree`` so forward references are quoted.
"""
import_gatherer = GatherImportsVisitor(CodemodContext())
tree.visit(import_gatherer)
existing_import_names = _get_imported_names(import_gatherer.all_imports)
global_names_gatherer = GatherGlobalNamesVisitor(CodemodContext())
tree.visit(global_names_gatherer)
self.global_names = global_names_gatherer.global_names.union(
global_names_gatherer.class_names
)
context_contents = self.context.scratch.get(
ApplyTypeAnnotationsVisitor.CONTEXT_KEY
)
if context_contents is not None:
(
stub,
overwrite_existing_annotations,
use_future_annotations,
strict_posargs_matching,
strict_annotation_matching,
) = context_contents
self.overwrite_existing_annotations = (
self.overwrite_existing_annotations or overwrite_existing_annotations
)
self.use_future_annotations = (
self.use_future_annotations or use_future_annotations
)
self.strict_posargs_matching = (
self.strict_posargs_matching and strict_posargs_matching
)
self.strict_annotation_matching = (
self.strict_annotation_matching or strict_annotation_matching
)
visitor = TypeCollector(existing_import_names, self.context)
cst.MetadataWrapper(stub).visit(visitor)
self.annotations.update(visitor.annotations)
if self.use_future_annotations:
AddImportsVisitor.add_needed_import(
self.context, "__future__", "annotations"
)
tree_with_imports = AddImportsVisitor(self.context).transform_module(tree)
tree_with_changes = tree_with_imports.visit(self)
# don't modify the imports if we didn't actually add any type information
if self.annotation_counts.any_changes_applied():
return tree_with_changes
else:
return tree
# helpers for processing annotation nodes
def _quote_future_annotations(self, annotation: cst.Annotation) -> cst.Annotation:
# TODO: We probably want to make sure references to classes defined in the current
# module come to us fully qualified - so we can do the dequalification here and
# know to look for what is in-scope without also catching builtins like "None" in the
# quoting. This should probably also be extended to handle what imports are in scope,
# as well as subscriptable types.
# Note: We are collecting all imports and passing this to the type collector grabbing
# annotations from the stub file; should consolidate import handling somewhere too.
node = annotation.annotation
if (
isinstance(node, cst.Name)
and (node.value in self.global_names)
and not (node.value in self.visited_classes)
):
return annotation.with_changes(
annotation=cst.SimpleString(value=f'"{node.value}"')
)
return annotation
# smart constructors: all applied annotations happen via one of these
def _apply_annotation_to_attribute_or_global(
self,
name: str,
annotation: cst.Annotation,
value: Optional[cst.BaseExpression],
) -> cst.AnnAssign:
if len(self.qualifier) == 0:
self.annotation_counts.global_annotations += 1
else:
self.annotation_counts.attribute_annotations += 1
return cst.AnnAssign(
cst.Name(name),
self._quote_future_annotations(annotation),
value,
)
def _apply_annotation_to_parameter(
self,
parameter: cst.Param,
annotation: cst.Annotation,
) -> cst.Param:
self.annotation_counts.parameter_annotations += 1
return parameter.with_changes(
annotation=self._quote_future_annotations(annotation),
)
def _apply_annotation_to_return(
self,
function_def: cst.FunctionDef,
annotation: cst.Annotation,
) -> cst.FunctionDef:
self.annotation_counts.return_annotations += 1
return function_def.with_changes(
returns=self._quote_future_annotations(annotation),
)
# private methods used in the visit and leave methods
def _qualifier_name(self) -> str:
return ".".join(self.qualifier)
def _annotate_single_target(
self,
node: cst.Assign,
updated_node: cst.Assign,
) -> Union[cst.Assign, cst.AnnAssign]:
only_target = node.targets[0].target
if isinstance(only_target, (cst.Tuple, cst.List)):
for element in only_target.elements:
value = element.value
name = get_full_name_for_node(value)
if name is not None and name != "_":
self._add_to_toplevel_annotations(name)
elif isinstance(only_target, (cst.Subscript)):
pass
else:
name = get_full_name_for_node(only_target)
if name is not None:
self.qualifier.append(name)
if (
self._qualifier_name() in self.annotations.attributes
and not isinstance(only_target, cst.Subscript)
):
annotation = self.annotations.attributes[self._qualifier_name()]
self.qualifier.pop()
return self._apply_annotation_to_attribute_or_global(
name=name,
annotation=annotation,
value=node.value,
)
else:
self.qualifier.pop()
return updated_node
def _split_module(
self,
module: cst.Module,
updated_module: cst.Module,
) -> Tuple[
List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]],
List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]],
]:
import_add_location = 0
# This works under the principle that while we might modify node contents,
# we have yet to modify the number of statements. So we can match on the
# original tree but break up the statements of the modified tree. If we
# change this assumption in this visitor, we will have to change this code.
for i, statement in enumerate(module.body):
if isinstance(statement, cst.SimpleStatementLine):
for possible_import in statement.body:
for last_import in self.import_statements:
if possible_import is last_import:
import_add_location = i + 1
break
return (
list(updated_module.body[:import_add_location]),
list(updated_module.body[import_add_location:]),
)
def _add_to_toplevel_annotations(
self,
name: str,
) -> None:
self.qualifier.append(name)
if self._qualifier_name() in self.annotations.attributes:
annotation = self.annotations.attributes[self._qualifier_name()]
self.toplevel_annotations[name] = annotation
self.qualifier.pop()
def _update_parameters(
self,
annotations: FunctionAnnotation,
updated_node: cst.FunctionDef,
) -> cst.Parameters:
# Update params and default params with annotations
# Don't override existing annotations or default values unless asked
# to overwrite existing annotations.
def update_annotation(
parameters: Sequence[cst.Param],
annotations: Sequence[cst.Param],
positional: bool,
) -> List[cst.Param]:
parameter_annotations = {}
annotated_parameters = []
positional = positional and not self.strict_posargs_matching
for i, parameter in enumerate(annotations):
key = i if positional else parameter.name.value
if parameter.annotation:
parameter_annotations[key] = parameter.annotation.with_changes(
whitespace_before_indicator=cst.SimpleWhitespace(value="")
)
for i, parameter in enumerate(parameters):
key = i if positional else parameter.name.value
if key in parameter_annotations and (
self.overwrite_existing_annotations or not parameter.annotation
):
parameter = self._apply_annotation_to_parameter(
parameter=parameter,
annotation=parameter_annotations[key],
)
annotated_parameters.append(parameter)
return annotated_parameters
return updated_node.params.with_changes(
params=update_annotation(
updated_node.params.params,
annotations.parameters.params,
positional=True,
),
kwonly_params=update_annotation(
updated_node.params.kwonly_params,
annotations.parameters.kwonly_params,
positional=False,
),
posonly_params=update_annotation(
updated_node.params.posonly_params,
annotations.parameters.posonly_params,
positional=True,
),
)
def _insert_empty_line(
self,
statements: List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]],
) -> List[Union[cst.SimpleStatementLine, cst.BaseCompoundStatement]]:
if len(statements) < 1:
# No statements, nothing to add to
return statements
if len(statements[0].leading_lines) == 0:
# Statement has no leading lines, add one!
return [
statements[0].with_changes(leading_lines=(cst.EmptyLine(),)),
*statements[1:],
]
if statements[0].leading_lines[0].comment is None:
# First line is empty, so its safe to leave as-is
return statements
# Statement has a comment first line, so lets add one more empty line
return [
statements[0].with_changes(
leading_lines=(cst.EmptyLine(), *statements[0].leading_lines)
),
*statements[1:],
]
def _match_signatures( # noqa: C901: Too complex
self,
function: cst.FunctionDef,
annotations: FunctionAnnotation,
) -> bool:
"""Check that function annotations on both signatures are compatible."""
def compatible(
p: Optional[cst.Annotation],
q: Optional[cst.Annotation],
) -> bool:
if (
self.overwrite_existing_annotations
or not _is_non_sentinel(p)
or not _is_non_sentinel(q)
):
return True
if not self.strict_annotation_matching:
# We will not overwrite clashing annotations, but the signature as a
# whole will be marked compatible so that holes can be filled in.
return True
return p.annotation.deep_equals(q.annotation) # pyre-ignore[16]
def match_posargs(
ps: Sequence[cst.Param],
qs: Sequence[cst.Param],
) -> bool:
if len(ps) != len(qs):
return False
for p, q in zip(ps, qs):
if self.strict_posargs_matching and not p.name.value == q.name.value:
return False
if not compatible(p.annotation, q.annotation):
return False
return True
def match_kwargs(
ps: Sequence[cst.Param],
qs: Sequence[cst.Param],
) -> bool:
ps_dict = {x.name.value: x for x in ps}
qs_dict = {x.name.value: x for x in qs}
if set(ps_dict.keys()) != set(qs_dict.keys()):
return False
for k in ps_dict.keys():
if not compatible(ps_dict[k].annotation, qs_dict[k].annotation):
return False
return True
def match_star(
p: StarParamType,
q: StarParamType,
) -> bool:
return _is_non_sentinel(p) == _is_non_sentinel(q)
def match_params(
f: cst.FunctionDef,
g: FunctionAnnotation,
) -> bool:
p, q = f.params, g.parameters
return (
match_posargs(p.params, q.params)
and match_posargs(p.posonly_params, q.posonly_params)
and match_kwargs(p.kwonly_params, q.kwonly_params)
and match_star(p.star_arg, q.star_arg)
and match_star(p.star_kwarg, q.star_kwarg)
)
def match_return(
f: cst.FunctionDef,
g: FunctionAnnotation,
) -> bool:
return compatible(f.returns, g.returns)
return match_params(function, annotations) and match_return(
function, annotations
)
# transform API methods
def visit_ClassDef(
self,
node: cst.ClassDef,
) -> None:
self.qualifier.append(node.name.value)
def leave_ClassDef(
self,
original_node: cst.ClassDef,
updated_node: cst.ClassDef,
) -> cst.ClassDef:
self.visited_classes.add(original_node.name.value)
cls_name = ".".join(self.qualifier)
self.qualifier.pop()
definition = self.annotations.class_definitions.get(cls_name)
if definition:
b1 = _find_generic_base(definition)
b2 = _find_generic_base(updated_node)
if b1 and not b2:
new_bases = list(updated_node.bases) + [b1]
self.annotation_counts.typevars_and_generics_added += 1
return updated_node.with_changes(bases=new_bases)
return updated_node
def visit_FunctionDef(
self,
node: cst.FunctionDef,
) -> bool:
self.qualifier.append(node.name.value)
# pyi files don't support inner functions, return False to stop the traversal.
return False
def leave_FunctionDef(
self,
original_node: cst.FunctionDef,
updated_node: cst.FunctionDef,
) -> cst.FunctionDef:
key = FunctionKey.make(self._qualifier_name(), updated_node.params)
self.qualifier.pop()
if key in self.annotations.functions:
function_annotation = self.annotations.functions[key]
# Only add new annotation if:
# * we have matching function signatures and
# * we are explicitly told to overwrite existing annotations or
# * there is no existing annotation
if not self._match_signatures(updated_node, function_annotation):
return updated_node
set_return_annotation = (
self.overwrite_existing_annotations or updated_node.returns is None
)
if set_return_annotation and function_annotation.returns is not None:
updated_node = self._apply_annotation_to_return(
function_def=updated_node,
annotation=function_annotation.returns,
)
# Don't override default values when annotating functions
new_parameters = self._update_parameters(function_annotation, updated_node)
return updated_node.with_changes(params=new_parameters)
return updated_node
def visit_Assign(
self,
node: cst.Assign,
) -> None:
self.current_assign = node
@m.call_if_inside(m.Assign())
@m.visit(m.Call(func=m.Name("TypeVar")))
def record_typevar(
self,
node: cst.Call,
) -> None:
# pyre-ignore current_assign is never None here
name = get_full_name_for_node(self.current_assign.targets[0].target)
if name is not None:
# Preserve the whole node, even though we currently just use the
# name, so that we can match bounds and variance at some point and
# determine if two typevars with the same name are indeed the same.
# pyre-ignore current_assign is never None here
self.typevars[name] = self.current_assign
self.current_assign = None
def leave_Assign(
self,
original_node: cst.Assign,
updated_node: cst.Assign,
) -> Union[cst.Assign, cst.AnnAssign]:
self.current_assign = None
if len(original_node.targets) > 1:
for assign in original_node.targets:
target = assign.target
if isinstance(target, (cst.Name, cst.Attribute)):
name = get_full_name_for_node(target)
if name is not None and name != "_":
# Add separate top-level annotations for `a = b = 1`
# as `a: int` and `b: int`.
self._add_to_toplevel_annotations(name)
return updated_node
else:
return self._annotate_single_target(original_node, updated_node)
def leave_ImportFrom(
self,
original_node: cst.ImportFrom,
updated_node: cst.ImportFrom,
) -> cst.ImportFrom:
self.import_statements.append(original_node)
return updated_node
def leave_Module(
self,
original_node: cst.Module,
updated_node: cst.Module,
) -> cst.Module:
fresh_class_definitions = [
definition
for name, definition in self.annotations.class_definitions.items()
if name not in self.visited_classes
]
# NOTE: The entire change will also be abandoned if
# self.annotation_counts is all 0s, so if adding any new category make
# sure to record it there.
if not (
self.toplevel_annotations
or fresh_class_definitions
or self.annotations.typevars
):
return updated_node
toplevel_statements = []
# First, find the insertion point for imports
statements_before_imports, statements_after_imports = self._split_module(
original_node, updated_node
)
# Make sure there's at least one empty line before the first non-import
statements_after_imports = self._insert_empty_line(statements_after_imports)
for name, annotation in self.toplevel_annotations.items():
annotated_assign = self._apply_annotation_to_attribute_or_global(
name=name,
annotation=annotation,
value=None,
)
toplevel_statements.append(cst.SimpleStatementLine([annotated_assign]))
# TypeVar definitions could be scattered through the file, so do not
# attempt to put new ones with existing ones, just add them at the top.
typevars = {
k: v for k, v in self.annotations.typevars.items() if k not in self.typevars
}
if typevars:
for var, stmt in typevars.items():
toplevel_statements.append(cst.Newline())
toplevel_statements.append(stmt)
self.annotation_counts.typevars_and_generics_added += 1
toplevel_statements.append(cst.Newline())
self.annotation_counts.classes_added = len(fresh_class_definitions)
toplevel_statements.extend(fresh_class_definitions)
return updated_node.with_changes(
body=[
*statements_before_imports,
*toplevel_statements,
*statements_after_imports,
]
)
| ApplyTypeAnnotationsVisitor |
python | huggingface__transformers | src/transformers/models/llava_onevision/processing_llava_onevision.py | {
"start": 1510,
"end": 16607
} | class ____(ProcessorMixin):
r"""
Constructs a LLaVa-Onevision processor which wraps a LLaVa-Onevision video processor, LLaVa-NeXT image processor and a LLaMa tokenizer into a single processor.
[`LlavaNextProcessor`] offers all the functionalities of [`LlavaOnevisionVideoProcessor`], [`LlavaOnevisionImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~LlavaOnevisionVideoProcessor.__call__`], [`~LlavaNextProcessor.__call__`] and [`~LlavaNextProcessor.decode`] for more information.
Args:
image_processor ([`LlavaOnevisionImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`LlavaOnevisionVideoProcessor`], *optional*):
The video processor is a required input.
num_image_tokens (`int`, *optional*):
Number of image tokens for one imagethat will be returned by vision tower.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Should be same as in model's config
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"<image>"`):
Special token used to denote image location.
video_token (`str`, *optional*, defaults to `"<video>"`):
Special token used to denote video location.
vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
Aspect ratio used when processong image features. The default value is "anyres_max_9".
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
video_processor=None,
num_image_tokens=None,
vision_feature_select_strategy=None,
chat_template=None,
image_token="<image>",
video_token="<video>",
vision_aspect_ratio="anyres_max_9",
**kwargs,
):
self.num_image_tokens = num_image_tokens
self.vision_feature_select_strategy = vision_feature_select_strategy
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
self.vision_aspect_ratio = vision_aspect_ratio
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: Optional[VideoInput] = None,
**kwargs: Unpack[LlavaOnevisionProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of a video input to be fed to a model. Returned when `videos` is not `None`.
- **image_sizes** -- Size of each image that will be used to unpad an image. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
LlavaOnevisionProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
image_inputs = video_inputs = {}
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
batch_num_images = iter(image_inputs["batch_num_images"])
image_sizes = iter(image_inputs["image_sizes"])
height, width = get_image_size(
to_numpy_array(image_inputs["pixel_values"][0][0]),
channel_dim=output_kwargs["images_kwargs"].get("data_format"),
)
text, num_image_tokens = self._expand_image_tokens(
text, image_sizes, height, width, self.image_token, batch_num_images
)
if videos is not None:
video_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
one_video = video_inputs.get("pixel_values_videos")[0]
if isinstance(video_inputs.get("pixel_values_videos")[0], (list, tuple)):
one_video = np.array(one_video)
else:
one_video = to_numpy_array(one_video)
height, width = get_image_size(one_video[0], channel_dim=output_kwargs["images_kwargs"].get("data_format"))
num_frames = one_video.shape[0] # frame dim is always after batch dim
patches_height_width = int(math.sqrt(self.num_image_tokens))
pooled_height_width = math.ceil(patches_height_width / 2)
num_video_tokens = (num_frames * pooled_height_width * pooled_height_width) + 1 # +1 for newline token
text = [sample.replace(self.video_token, self.video_token * num_video_tokens) for sample in text]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **video_inputs}, tensor_type=return_tensors)
def _expand_image_tokens(
self,
text: list[TextInput],
image_sizes: Iterable[Union[list[int], int]],
height: int,
width: int,
special_token: str,
batch_num_images: Iterable[int],
):
prompt_strings = []
max_num_vision_tokens = 0
for sample in text:
if special_token in sample:
num_images = next(batch_num_images) # should consume iterable
is_multi_image = num_images != 1
else:
is_multi_image = False
while special_token in sample:
original_size = next(image_sizes) # should consume iterable
if is_multi_image:
num_image_tokens = self.num_image_tokens + 1 # one for image_newline
else:
if not isinstance(original_size, (list, tuple)):
# cast to list to avoid numerical precision errors when calculating unpadding
original_size = original_size.tolist()
orig_height, orig_width = original_size
num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width)
max_num_vision_tokens = max(max_num_vision_tokens, num_image_tokens)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
sample = sample.replace(special_token, "<placeholder>" * num_image_tokens, 1)
prompt_strings.append(sample)
text = [sample.replace("<placeholder>", special_token) for sample in prompt_strings]
return text, max_num_vision_tokens
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
image_grid_pinpoints = self.image_processor.image_grid_pinpoints
height_best_resolution, width_best_resolution = select_best_resolution(
[orig_height, orig_width], image_grid_pinpoints
)
scale_height, scale_width = height_best_resolution // height, width_best_resolution // width
patches_height = patches_width = int(math.sqrt(self.num_image_tokens))
unpadded_features, newline_features = self._get_unpadded_features(
orig_height, orig_width, patches_height, patches_width, scale_height, scale_width
)
# The base patch covers the entire image (no CLS for SigLIP)
base_features = self.num_image_tokens
num_image_tokens = unpadded_features + newline_features + base_features
return num_image_tokens
# Adapted from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_unpadded_features
def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width):
"""
Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA
because it divided each image into patches depending on its resolution. Therefore we need to calculate how many
patches an image is divided into and get the number of features from that.
"""
current_height = patches_height * scale_height
current_width = patches_width * scale_width
original_aspect_ratio = width / height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
new_height = int(round(height * (current_width / width), 7))
padding = (current_height - new_height) // 2
current_height -= padding * 2
else:
new_width = int(round(width * (current_height / height), 7))
padding = (current_width - new_width) // 2
current_width -= padding * 2
unpadded_features = current_height * current_width
newline_features = current_height
max_num_patches = int(self.vision_aspect_ratio.strip("anyres_max_"))
ratio = math.sqrt(current_height * current_width / (max_num_patches * patches_height**2))
if ratio > 1.1:
unpadded_features = int(current_height // ratio) * int(current_width // ratio)
newline_features = int(current_height // ratio)
return (unpadded_features, newline_features)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (list[list[str]], *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (list[list[str]], *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
audio_lengths (list[int], *optional*):
The input length formatted as per each audio.
Returns:
dict[str, list[int]]: A dictionary mapping each modality ("image", "video", "audio")
to a list containing the number of placeholder tokens required. If the model doesn't accept
a certain modality or no input sizes are provided, the dict value is set to an empty list.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = LlavaOnevisionProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
size = (
(size["shortest_edge"], size["shortest_edge"])
if "shortest_edge" in size
else (min(size["height"], size["width"]), min(size["height"], size["width"]))
)
processed_height, processed_width = size
batch_num_image_tokens = []
num_image_patches = [1] * len(image_sizes) # llava-ov doesn't batch pixels as Idefics, thus `1` patch`
for image_size in image_sizes:
orig_height, orig_width = image_size
num_image_tokens = self._get_number_of_features(
orig_height, orig_width, processed_height, processed_width
)
if self.vision_feature_select_strategy == "default":
num_image_tokens -= 1
batch_num_image_tokens.append(num_image_tokens)
vision_data.update({"num_image_tokens": batch_num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["LlavaOnevisionProcessor"]
| LlavaOnevisionProcessor |
python | pydata__xarray | xarray/computation/rolling.py | {
"start": 44143,
"end": 47202
} | class ____(Coarsen["DataArray"]):
__slots__ = ()
_reduce_extra_args_docstring = """"""
@classmethod
def _reduce_method(
cls, func: Callable, include_skipna: bool = False, numeric_only: bool = False
) -> Callable[..., DataArray]:
"""
Return a wrapped function for injecting reduction methods.
see ops.inject_reduce_methods
"""
kwargs: dict[str, Any] = {}
if include_skipna:
kwargs["skipna"] = None
def wrapped_func(
self: DataArrayCoarsen, keep_attrs: bool | None = None, **kwargs
) -> DataArray:
from xarray.core.dataarray import DataArray
keep_attrs = self._get_keep_attrs(keep_attrs)
reduced = self.obj.variable.coarsen(
self.windows, func, self.boundary, self.side, keep_attrs, **kwargs
)
coords = {}
for c, v in self.obj.coords.items():
if c == self.obj.name:
coords[c] = reduced
elif any(d in self.windows for d in v.dims):
coords[c] = v.variable.coarsen(
self.windows,
self.coord_func[c],
self.boundary,
self.side,
keep_attrs,
**kwargs,
)
else:
coords[c] = v
return DataArray(
reduced, dims=self.obj.dims, coords=coords, name=self.obj.name
)
return wrapped_func
def reduce(
self, func: Callable, keep_attrs: bool | None = None, **kwargs
) -> DataArray:
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : callable
Function which can be called in the form `func(x, axis, **kwargs)`
to return the result of collapsing an np.ndarray over the coarsening
dimensions. It must be possible to provide the `axis` argument
with a tuple of integers.
keep_attrs : bool, default: None
If True, the attributes (``attrs``) will be copied from the original
object to the new one. If False, the new object will be returned
without attributes. If None uses the global default.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
Examples
--------
>>> da = xr.DataArray(np.arange(8).reshape(2, 4), dims=("a", "b"))
>>> coarsen = da.coarsen(b=2)
>>> coarsen.reduce(np.sum)
<xarray.DataArray (a: 2, b: 2)> Size: 32B
array([[ 1, 5],
[ 9, 13]])
Dimensions without coordinates: a, b
"""
wrapped_func = self._reduce_method(func)
return wrapped_func(self, keep_attrs=keep_attrs, **kwargs)
| DataArrayCoarsen |
python | PrefectHQ__prefect | tests/server/models/test_work_queues.py | {
"start": 8951,
"end": 15055
} | class ____:
running_flow_states = [
schemas.states.StateType.PENDING,
schemas.states.StateType.CANCELLING,
schemas.states.StateType.RUNNING,
]
@pytest.fixture
async def work_queue_2(self, session):
work_queue = await models.work_queues.create_work_queue(
session=session,
work_queue=schemas.actions.WorkQueueCreate(name="wq-2"),
)
await session.commit()
return work_queue
@pytest.fixture
async def scheduled_flow_runs(self, session, deployment, work_queue, work_queue_2):
for i in range(3):
for wq in [work_queue, work_queue_2]:
current_time = now("UTC") + datetime.timedelta(minutes=i)
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment.flow_id,
deployment_id=deployment.id,
work_queue_name=wq.name,
state=schemas.states.State(
type="SCHEDULED",
timestamp=current_time,
state_details=dict(scheduled_time=current_time),
),
),
)
await session.commit()
@pytest.fixture
async def running_flow_runs(self, session, deployment, work_queue, work_queue_2):
for state_type in self.running_flow_states:
for wq in [work_queue, work_queue_2]:
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=deployment.flow_id,
deployment_id=deployment.id,
work_queue_name=wq.name,
state=schemas.states.State(
type=state_type,
timestamp=now("UTC") - datetime.timedelta(seconds=10),
),
),
)
await session.commit()
async def test_get_runs_in_queue(
self, session, work_queue, work_queue_2, scheduled_flow_runs, running_flow_runs
):
queue1, runs_wq1 = await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=work_queue.id
)
queue2, runs_wq2 = await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=work_queue_2.id
)
assert queue1.id == work_queue.id
assert queue2.id == work_queue_2.id
assert len(runs_wq1) == len(runs_wq2) == 3
assert all(r.work_queue_name == work_queue.name for r in runs_wq1)
assert all(r.work_queue_name == work_queue_2.name for r in runs_wq2)
assert set([r.id for r in runs_wq1]) != set([r.id for r in runs_wq2])
@pytest.mark.parametrize("limit", [2, 0])
async def test_get_runs_in_queue_limit(
self,
session,
work_queue,
scheduled_flow_runs,
running_flow_runs,
limit,
):
_, runs_wq1 = await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=work_queue.id, limit=limit
)
assert len(runs_wq1) == limit
async def test_get_runs_in_queue_scheduled_before(
self, session, work_queue, scheduled_flow_runs, running_flow_runs
):
_, runs_wq1 = await models.work_queues.get_runs_in_work_queue(
session=session,
work_queue_id=work_queue.id,
scheduled_before=now("UTC"),
)
assert len(runs_wq1) == 1
async def test_get_runs_in_queue_nonexistant(
self, session, work_queue, scheduled_flow_runs, running_flow_runs
):
with pytest.raises(ObjectNotFoundError):
await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=uuid4()
)
async def test_get_runs_in_queue_paused(
self, session, work_queue, scheduled_flow_runs, running_flow_runs
):
await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(is_paused=True),
)
_, runs_wq1 = await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=work_queue.id
)
assert runs_wq1 == []
@pytest.mark.parametrize("concurrency_limit", [10, 5, 1])
async def test_get_runs_in_queue_concurrency_limit(
self,
session,
work_queue,
scheduled_flow_runs,
running_flow_runs,
concurrency_limit,
):
await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(
concurrency_limit=concurrency_limit
),
)
_, runs_wq1 = await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=work_queue.id
)
assert len(runs_wq1) == max(
0, min(3, concurrency_limit - len(self.running_flow_states))
)
@pytest.mark.parametrize("limit", [10, 1])
async def test_get_runs_in_queue_concurrency_limit_and_limit(
self,
session,
work_queue,
scheduled_flow_runs,
running_flow_runs,
limit,
):
concurrency_limit = 5
await models.work_queues.update_work_queue(
session=session,
work_queue_id=work_queue.id,
work_queue=schemas.actions.WorkQueueUpdate(
concurrency_limit=concurrency_limit
),
)
_, runs_wq1 = await models.work_queues.get_runs_in_work_queue(
session=session, work_queue_id=work_queue.id, limit=limit
)
assert len(runs_wq1) == min(
limit, concurrency_limit - len(self.running_flow_states)
)
| TestGetRunsInWorkQueue |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/binary_operators.py | {
"start": 228,
"end": 408
} | class ____:
def __add__(self, other):
_test_sink(other)
def test1():
add = Add()
add + _test_source()
def test2():
add = Add()
add += _test_source()
| Add |
python | dask__distributed | distributed/diagnostics/plugin.py | {
"start": 13065,
"end": 13578
} | class ____(SchedulerPlugin):
name = "upload_file"
def __init__(self, filepath: str, load: bool = True):
"""
Initialize the plugin by reading in the data from the given file.
"""
self.filename = os.path.basename(filepath)
self.load = load
with open(filepath, "rb") as f:
self.data = f.read()
async def start(self, scheduler: Scheduler) -> None:
await scheduler.upload_file(self.filename, self.data, load=self.load)
| SchedulerUploadFile |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/norm_op_test.py | {
"start": 1211,
"end": 4810
} | class ____(test_lib.TestCase):
@test_util.run_v1_only("b/120545219")
def testBadOrder(self):
matrix = [[0., 1.], [2., 3.]]
for ord_ in "fro", -7, -1.1, 0:
with self.assertRaisesRegex(ValueError,
"'ord' must be a supported vector norm"):
linalg_ops.norm(matrix, ord=ord_)
for ord_ in "fro", -7, -1.1, 0:
with self.assertRaisesRegex(ValueError,
"'ord' must be a supported vector norm"):
linalg_ops.norm(matrix, ord=ord_, axis=-1)
for ord_ in "foo", -7, -1.1, 1.1:
with self.assertRaisesRegex(ValueError,
"'ord' must be a supported matrix norm"):
linalg_ops.norm(matrix, ord=ord_, axis=[-2, -1])
@test_util.run_v1_only("b/120545219")
def testInvalidAxis(self):
matrix = [[0., 1.], [2., 3.]]
for axis_ in [], [1, 2, 3], [[1]], [[1], [2]], [3.1415], [1, 1]:
error_prefix = ("'axis' must be None, an integer, or a tuple of 2 unique "
"integers")
with self.assertRaisesRegex(ValueError, error_prefix):
linalg_ops.norm(matrix, axis=axis_)
def _GetNormOpTest(dtype_, shape_, ord_, axis_, keep_dims_, use_static_shape_):
def _CompareNorm(self, matrix):
np_norm = np.linalg.norm(matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
with self.cached_session() as sess:
if use_static_shape_:
tf_matrix = constant_op.constant(matrix)
tf_norm = linalg_ops.norm(
tf_matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
tf_norm_val = self.evaluate(tf_norm)
else:
tf_matrix = array_ops.placeholder(dtype_)
tf_norm = linalg_ops.norm(
tf_matrix, ord=ord_, axis=axis_, keepdims=keep_dims_)
tf_norm_val = sess.run(tf_norm, feed_dict={tf_matrix: matrix})
self.assertAllClose(np_norm, tf_norm_val, rtol=1e-5, atol=1e-5)
@test_util.run_v1_only("b/120545219")
def Test(self):
is_matrix_norm = (isinstance(axis_, tuple) or
isinstance(axis_, list)) and len(axis_) == 2
is_fancy_p_norm = np.isreal(ord_) and np.floor(ord_) != ord_
if ((not is_matrix_norm and ord_ == "fro") or
(is_matrix_norm and is_fancy_p_norm)):
self.skipTest("Not supported by neither numpy.linalg.norm nor tf.norm")
if ord_ == "euclidean" or (axis_ is None and len(shape) > 2):
self.skipTest("Not supported by numpy.linalg.norm")
matrix = np.random.randn(*shape_).astype(dtype_)
if dtype_ in (np.complex64, np.complex128):
matrix += 1j * np.random.randn(*shape_).astype(dtype_)
_CompareNorm(self, matrix)
return Test
# pylint: disable=redefined-builtin
if __name__ == "__main__":
for use_static_shape in False, True:
for dtype in np.float32, np.float64, np.complex64, np.complex128:
for rows in 2, 5:
for cols in 2, 5:
for batch in [], [2], [2, 3]:
shape = batch + [rows, cols]
for ord in "euclidean", "fro", 0.5, 1, 2, np.inf:
for axis in [
None, (-2, -1), (-1, -2), -len(shape), 0, len(shape) - 1
]:
for keep_dims in False, True:
name = "%s_%s_ord_%s_axis_%s_%s_%s" % (
dtype.__name__, "_".join(map(str, shape)), ord, axis,
keep_dims, use_static_shape)
_AddTest(NormOpTest, "Norm_" + name,
_GetNormOpTest(dtype, shape, ord, axis, keep_dims,
use_static_shape))
test_lib.main()
| NormOpTest |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py | {
"start": 2801,
"end": 5094
} | class ____(CloudProvider):
"""
Galaxy plugin. Sets up pulp (ansible-galaxy) servers for tests.
The pulp source itself resides at: https://github.com/pulp/pulp-oci-images
"""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.image = os.environ.get(
'ANSIBLE_PULP_CONTAINER',
'quay.io/pulp/galaxy:4.7.1'
)
self.uses_docker = True
def setup(self) -> None:
"""Setup cloud resource before delegation and reg cleanup callback."""
super().setup()
with tempfile.NamedTemporaryFile(mode='w+') as env_fd:
settings = '\n'.join(
f'{key}={value}' for key, value in SETTINGS.items()
)
env_fd.write(settings)
env_fd.flush()
display.info(f'>>> galaxy_ng Configuration\n{settings}', verbosity=3)
descriptor = run_support_container(
self.args,
self.platform,
self.image,
GALAXY_HOST_NAME,
[
80,
],
aliases=[
GALAXY_HOST_NAME,
],
start=True,
options=[
'--env-file', env_fd.name,
],
)
if not descriptor:
return
injected_files = [
('/etc/galaxy-importer/galaxy-importer.cfg', GALAXY_IMPORTER, 'galaxy-importer'),
]
for path, content, friendly_name in injected_files:
with tempfile.NamedTemporaryFile() as temp_fd:
temp_fd.write(content)
temp_fd.flush()
display.info(f'>>> {friendly_name} Configuration\n{to_text(content)}', verbosity=3)
docker_exec(self.args, descriptor.container_id, ['mkdir', '-p', os.path.dirname(path)], True)
docker_cp_to(self.args, descriptor.container_id, temp_fd.name, path)
docker_exec(self.args, descriptor.container_id, ['chown', 'pulp:pulp', path], True)
self._set_cloud_config('PULP_HOST', GALAXY_HOST_NAME)
self._set_cloud_config('PULP_USER', 'admin')
self._set_cloud_config('PULP_PASSWORD', 'password')
| GalaxyProvider |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/schemas/test_user_schema.py | {
"start": 3447,
"end": 5094
} | class ____(TestUserBase):
def test_serialize(self):
user_model = User(
first_name="Foo",
last_name="Bar",
username="test",
password="test",
email=TEST_EMAIL,
created_on=timezone.parse(DEFAULT_TIME),
changed_on=timezone.parse(DEFAULT_TIME),
)
self.session.add(user_model)
self.session.commit()
user = self.session.scalars(select(User).where(User.email == TEST_EMAIL)).first()
deserialized_user = user_schema.dump(user)
# No user_id and password in dump
assert deserialized_user == {
"roles": [],
"created_on": DEFAULT_TIME,
"email": "test@example.org",
"changed_on": DEFAULT_TIME,
"active": True,
"last_login": None,
"last_name": "Bar",
"fail_login_count": None,
"first_name": "Foo",
"username": "test",
"login_count": None,
}
def test_deserialize_user(self):
user_dump = {
"roles": [{"name": "TestRole"}],
"email": "test@example.org",
"last_name": "Bar",
"first_name": "Foo",
"username": "test",
"password": "test", # loads password
}
result = user_schema.load(user_dump)
assert result == {
"roles": [{"name": "TestRole"}],
"email": "test@example.org",
"last_name": "Bar",
"first_name": "Foo",
"username": "test",
"password": "test", # Password loaded
}
| TestUserSchema |
python | django__django | tests/urlpatterns_reverse/tests.py | {
"start": 32410,
"end": 52742
} | class ____(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
("urlobject-view", [], {}),
("urlobject-view", [37, 42], {}),
("urlobject-view", [], {"arg1": 42, "arg2": 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
("inner-nothing", [], {}),
("inner-nothing", [37, 42], {}),
("inner-nothing", [], {"arg1": 42, "arg2": 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
"blahblah:urlobject-view",
"test-ns1:blahblah:urlobject-view",
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
("normal-view", [], {}, "/normal/"),
("normal-view", [37, 42], {}, "/normal/37/42/"),
("normal-view", [], {"arg1": 42, "arg2": 37}, "/normal/42/37/"),
("special-view", [], {}, "/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
("included_namespace_urls:inc-normal-view", [], {}, "/included/normal/"),
(
"included_namespace_urls:inc-normal-view",
[37, 42],
{},
"/included/normal/37/42/",
),
(
"included_namespace_urls:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/included/normal/42/37/",
),
("included_namespace_urls:inc-special-view", [], {}, "/included/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
("test-ns1:urlobject-view", [], {}, "/test1/inner/"),
("test-ns1:urlobject-view", [37, 42], {}, "/test1/inner/37/42/"),
(
"test-ns1:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/test1/inner/42/37/",
),
("test-ns1:urlobject-special-view", [], {}, "/test1/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
("new-ns1:urlobject-view", [], {}, "/newapp1/inner/"),
("new-ns1:urlobject-view", [37, 42], {}, "/newapp1/inner/37/42/"),
(
"new-ns1:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/newapp1/inner/42/37/",
),
("new-ns1:urlobject-special-view", [], {}, "/newapp1/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
("newapp:urlobject-view", [], {}, "/new-default/inner/"),
("newapp:urlobject-view", [37, 42], {}, "/new-default/inner/37/42/"),
(
"newapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/new-default/inner/42/37/",
),
("newapp:urlobject-special-view", [], {}, "/new-default/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
(
"included_namespace_urls:test-ns3:urlobject-view",
[],
{},
"/included/test3/inner/",
),
(
"included_namespace_urls:test-ns3:urlobject-view",
[37, 42],
{},
"/included/test3/inner/37/42/",
),
(
"included_namespace_urls:test-ns3:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/included/test3/inner/42/37/",
),
(
"included_namespace_urls:test-ns3:urlobject-special-view",
[],
{},
"/included/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
("inc-ns1:inc-normal-view", [], {}, "/ns-included1/normal/"),
("inc-ns1:inc-normal-view", [37, 42], {}, "/ns-included1/normal/37/42/"),
(
"inc-ns1:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/ns-included1/normal/42/37/",
),
("inc-ns1:inc-special-view", [], {}, "/ns-included1/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
("app-ns1:inc-normal-view", [], {}, "/app-included1/normal/"),
("app-ns1:inc-normal-view", [37, 42], {}, "/app-included1/normal/37/42/"),
(
"app-ns1:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/app-included1/normal/42/37/",
),
("app-ns1:inc-special-view", [], {}, "/app-included1/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
("inc-outer:inc-normal-view", [], {"outer": 42}, "/ns-outer/42/normal/"),
("inc-outer:inc-normal-view", [42], {}, "/ns-outer/42/normal/"),
(
"inc-outer:inc-normal-view",
[],
{"arg1": 37, "arg2": 4, "outer": 42},
"/ns-outer/42/normal/37/4/",
),
("inc-outer:inc-normal-view", [42, 37, 4], {}, "/ns-outer/42/normal/37/4/"),
("inc-outer:inc-special-view", [], {"outer": 42}, "/ns-outer/42/+%5C$*/"),
("inc-outer:inc-special-view", [42], {}, "/ns-outer/42/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
("inc-ns1:test-ns3:urlobject-view", [], {}, "/ns-included1/test3/inner/"),
(
"inc-ns1:test-ns3:urlobject-view",
[37, 42],
{},
"/ns-included1/test3/inner/37/42/",
),
(
"inc-ns1:test-ns3:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/ns-included1/test3/inner/42/37/",
),
(
"inc-ns1:test-ns3:urlobject-special-view",
[],
{},
"/ns-included1/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view",
[],
{},
"/ns-included1/ns-included4/ns-included1/test3/inner/",
),
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view",
[37, 42],
{},
"/ns-included1/ns-included4/ns-included1/test3/inner/37/42/",
),
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/ns-included1/ns-included4/ns-included1/test3/inner/42/37/",
),
(
"inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view",
[],
{},
"/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
("testapp:urlobject-view", [], {}, "/default/inner/"),
("testapp:urlobject-view", [37, 42], {}, "/default/inner/37/42/"),
(
"testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"/default/inner/42/37/",
),
("testapp:urlobject-special-view", [], {}, "/default/inner/+%5C$*/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
("testapp:urlobject-view", [], {}, "test-ns3", "/default/inner/"),
(
"testapp:urlobject-view",
[37, 42],
{},
"test-ns3",
"/default/inner/37/42/",
),
(
"testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"test-ns3",
"/default/inner/42/37/",
),
(
"testapp:urlobject-special-view",
[],
{},
"test-ns3",
"/default/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
("nodefault:urlobject-view", [], {}, None, "/other2/inner/"),
("nodefault:urlobject-view", [37, 42], {}, None, "/other2/inner/37/42/"),
(
"nodefault:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
None,
"/other2/inner/42/37/",
),
("nodefault:urlobject-special-view", [], {}, None, "/other2/inner/+%5C$*/"),
("nodefault:urlobject-view", [], {}, "other-ns1", "/other1/inner/"),
(
"nodefault:urlobject-view",
[37, 42],
{},
"other-ns1",
"/other1/inner/37/42/",
),
(
"nodefault:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"other-ns1",
"/other1/inner/42/37/",
),
(
"nodefault:urlobject-special-view",
[],
{},
"other-ns1",
"/other1/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
def test_special_chars_namespace(self):
test_urls = [
(
"special:included_namespace_urls:inc-normal-view",
[],
{},
"/+%5C$*/included/normal/",
),
(
"special:included_namespace_urls:inc-normal-view",
[37, 42],
{},
"/+%5C$*/included/normal/37/42/",
),
(
"special:included_namespace_urls:inc-normal-view",
[],
{"arg1": 42, "arg2": 37},
"/+%5C$*/included/normal/42/37/",
),
(
"special:included_namespace_urls:inc-special-view",
[],
{},
"/+%5C$*/included/+%5C$*/",
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
("inc-ns5:inner-nothing", [], {"outer": "70"}, "/inc70/"),
(
"inc-ns5:inner-extra",
[],
{"extra": "foobar", "outer": "78"},
"/inc78/extra/foobar/",
),
("inc-ns5:inner-nothing", ["70"], {}, "/inc70/"),
("inc-ns5:inner-extra", ["78", "foobar"], {}, "/inc78/extra/foobar/"),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
(
"inc-ns1:testapp:urlobject-view",
[],
{},
None,
"/ns-included1/test4/inner/",
),
(
"inc-ns1:testapp:urlobject-view",
[37, 42],
{},
None,
"/ns-included1/test4/inner/37/42/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
None,
"/ns-included1/test4/inner/42/37/",
),
(
"inc-ns1:testapp:urlobject-special-view",
[],
{},
None,
"/ns-included1/test4/inner/+%5C$*/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/",
),
(
"inc-ns1:testapp:urlobject-view",
[37, 42],
{},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/37/42/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/42/37/",
),
(
"inc-ns1:testapp:urlobject-special-view",
[],
{},
"inc-ns1:test-ns3",
"/ns-included1/test3/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
(
"inc-ns1:testapp:urlobject-view",
[],
{},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/",
),
(
"inc-ns1:testapp:urlobject-view",
[37, 42],
{},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/37/42/",
),
(
"inc-ns1:testapp:urlobject-view",
[],
{"arg1": 42, "arg2": 37},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/42/37/",
),
(
"inc-ns1:testapp:urlobject-special-view",
[],
{},
"nonexistent:test-ns3",
"/ns-included1/test4/inner/+%5C$*/",
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(
name=name, args=args, kwargs=kwargs, current_app=current_app
):
self.assertEqual(
reverse(name, args=args, kwargs=kwargs, current_app=current_app),
expected,
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
| NamespaceTests |
python | pytorch__pytorch | test/distributed/checkpoint/test_checkpoint.py | {
"start": 5448,
"end": 6589
} | class ____(TestStorageBase, StorageWriter):
def __init__(self, fail_conf):
super().__init__(fail_conf)
def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None:
return
def set_up_storage_writer(
self, is_coordinator: bool, *args: Any, **kwargs: Any
) -> None:
self._fail_rank("fail_set_up_storage_writer")
def prepare_local_plan(self, plan: SavePlan) -> SavePlan:
self._fail_rank("fail_prepare_local_plan")
return plan
def prepare_global_plan(self, plans: list[SavePlan]) -> list[SavePlan]:
self._fail_rank("fail_prepare_global_plan")
return plans
def write_data(
self, plan: SavePlan, planner: SavePlanner
) -> Future[list[WriteResult]]:
self._fail_rank("fail_write_data")
return self._fail_rank_async("fail_write_data_async", [])
def finish(self, metadata: Metadata, results: list[list[WriteResult]]) -> None:
self._fail_rank("fail_finish")
@classmethod
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
return True
| FaultyStorageWriter |
python | walkccc__LeetCode | solutions/430. Flatten a Multilevel Doubly Linked List/430.py | {
"start": 0,
"end": 344
} | class ____:
def flatten(self, head: 'Node') -> 'Node':
def flatten(head: 'Node', rest: 'Node') -> 'Node':
if not head:
return rest
head.next = flatten(head.child, flatten(head.next, rest))
if head.next:
head.next.prev = head
head.child = None
return head
return flatten(head, None)
| Solution |
python | RaRe-Technologies__gensim | gensim/models/translation_matrix.py | {
"start": 5514,
"end": 14286
} | class ____(utils.SaveLoad):
"""Objects of this class realize the translation matrix which maps the source language to the target language.
The main methods are:
We map it to the other language space by computing z = Wx, then return the
word whose representation is close to z.
For details on use, see the tutorial notebook [3]_
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import KeyedVectors
>>> from gensim.test.utils import datapath
>>> en = datapath("EN.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt")
>>> it = datapath("IT.1-10.cbow1_wind5_hs0_neg10_size300_smpl1e-05.txt")
>>> model_en = KeyedVectors.load_word2vec_format(en)
>>> model_it = KeyedVectors.load_word2vec_format(it)
>>>
>>> word_pairs = [
... ("one", "uno"), ("two", "due"), ("three", "tre"), ("four", "quattro"), ("five", "cinque"),
... ("seven", "sette"), ("eight", "otto"),
... ("dog", "cane"), ("pig", "maiale"), ("fish", "cavallo"), ("birds", "uccelli"),
... ("apple", "mela"), ("orange", "arancione"), ("grape", "acino"), ("banana", "banana")
... ]
>>>
>>> trans_model = TranslationMatrix(model_en, model_it)
>>> trans_model.train(word_pairs)
>>> trans_model.translate(["dog", "one"], topn=3)
OrderedDict([('dog', [u'cane', u'gatto', u'cavallo']), ('one', [u'uno', u'due', u'tre'])])
References
----------
.. [3] https://github.com/RaRe-Technologies/gensim/blob/3.2.0/docs/notebooks/translation_matrix.ipynb
"""
def __init__(self, source_lang_vec, target_lang_vec, word_pairs=None, random_state=None):
"""
Parameters
----------
source_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Word vectors for source language.
target_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`
Word vectors for target language.
word_pairs : list of (str, str), optional
Pairs of words that will be used for training.
random_state : {None, int, array_like}, optional
Seed for random state.
"""
self.source_word = None
self.target_word = None
self.source_lang_vec = source_lang_vec
self.target_lang_vec = target_lang_vec
self.random_state = utils.get_random_state(random_state)
self.translation_matrix = None
self.source_space = None
self.target_space = None
if word_pairs is not None:
if len(word_pairs[0]) != 2:
raise ValueError("Each training data item must contain two different language words.")
self.train(word_pairs)
def train(self, word_pairs):
"""Build the translation matrix to map from source space to target space.
Parameters
----------
word_pairs : list of (str, str), optional
Pairs of words that will be used for training.
"""
self.source_word, self.target_word = zip(*word_pairs)
self.source_space = Space.build(self.source_lang_vec, set(self.source_word))
self.target_space = Space.build(self.target_lang_vec, set(self.target_word))
self.source_space.normalize()
self.target_space.normalize()
m1 = self.source_space.mat[[self.source_space.word2index[item] for item in self.source_word], :]
m2 = self.target_space.mat[[self.target_space.word2index[item] for item in self.target_word], :]
self.translation_matrix = np.linalg.lstsq(m1, m2, -1)[0]
def save(self, *args, **kwargs):
"""Save the model to a file. Ignores (doesn't store) the `source_space` and `target_space` attributes."""
kwargs['ignore'] = kwargs.get('ignore', ['source_space', 'target_space'])
super(TranslationMatrix, self).save(*args, **kwargs)
def apply_transmat(self, words_space):
"""Map the source word vector to the target word vector using translation matrix.
Parameters
----------
words_space : :class:`~gensim.models.translation_matrix.Space`
`Space` object constructed for the words to be translated.
Returns
-------
:class:`~gensim.models.translation_matrix.Space`
`Space` object constructed for the mapped words.
"""
return Space(np.dot(words_space.mat, self.translation_matrix), words_space.index2word)
def translate(self, source_words, topn=5, gc=0, sample_num=None, source_lang_vec=None, target_lang_vec=None):
"""Translate the word from the source language to the target language.
Parameters
----------
source_words : {str, list of str}
Single word or a list of words to be translated
topn : int, optional
Number of words that will be returned as translation for each `source_words`
gc : int, optional
Define translation algorithm, if `gc == 0` - use standard NN retrieval,
otherwise, use globally corrected neighbour retrieval method (as described in [1]_).
sample_num : int, optional
Number of words to sample from the source lexicon, if `gc == 1`, then `sample_num` **must** be provided.
source_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
New source language vectors for translation, by default, used the model's source language vector.
target_lang_vec : :class:`~gensim.models.keyedvectors.KeyedVectors`, optional
New target language vectors for translation, by default, used the model's target language vector.
Returns
-------
:class:`collections.OrderedDict`
Ordered dict where each item is `word`: [`translated_word_1`, `translated_word_2`, ...]
"""
if isinstance(source_words, str):
# pass only one word to translate
source_words = [source_words]
# If the language word vector not provided by user, use the model's
# language word vector as default
if source_lang_vec is None:
warnings.warn(
"The parameter source_lang_vec isn't specified, "
"use the model's source language word vector as default."
)
source_lang_vec = self.source_lang_vec
if target_lang_vec is None:
warnings.warn(
"The parameter target_lang_vec isn't specified, "
"use the model's target language word vector as default."
)
target_lang_vec = self.target_lang_vec
# If additional is provided, bootstrapping vocabulary from the source language word vector model.
if gc:
if sample_num is None:
raise RuntimeError(
"When using the globally corrected neighbour retrieval method, "
"the `sample_num` parameter(i.e. the number of words sampled from source space) must be provided."
)
lexicon = set(source_lang_vec.index_to_key)
addition = min(sample_num, len(lexicon) - len(source_words))
lexicon = self.random_state.choice(list(lexicon.difference(source_words)), addition)
source_space = Space.build(source_lang_vec, set(source_words).union(set(lexicon)))
else:
source_space = Space.build(source_lang_vec, source_words)
target_space = Space.build(target_lang_vec, )
# Normalize the source vector and target vector
source_space.normalize()
target_space.normalize()
# Map the source language to the target language
mapped_source_space = self.apply_transmat(source_space)
# Use the cosine similarity metric
sim_matrix = -np.dot(target_space.mat, mapped_source_space.mat.T)
# If `gc=1`, using corrected retrieval method
if gc:
srtd_idx = np.argsort(np.argsort(sim_matrix, axis=1), axis=1)
sim_matrix_idx = np.argsort(srtd_idx + sim_matrix, axis=0)
else:
sim_matrix_idx = np.argsort(sim_matrix, axis=0)
# Translate the words and for each word return the `topn` similar words
translated_word = OrderedDict()
for idx, word in enumerate(source_words):
translated_target_word = []
# Search the most `topn` similar words
for j in range(topn):
map_space_id = sim_matrix_idx[j, source_space.word2index[word]]
translated_target_word.append(target_space.index2word[map_space_id])
translated_word[word] = translated_target_word
return translated_word
| TranslationMatrix |
python | huggingface__transformers | src/transformers/models/moonshine/modular_moonshine.py | {
"start": 26675,
"end": 31588
} | class ____(LlamaModel):
main_input_name = "input_ids"
_can_record_outputs = {
"attentions": OutputRecorder(MoonshineAttention, index=1, layer_name="self_attn"),
"hidden_states": MoonshineDecoderLayer,
"cross_attentions": OutputRecorder(MoonshineAttention, index=1, layer_name="encoder_attn"),
}
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.norm = nn.LayerNorm(config.hidden_size, bias=False)
self.layers = nn.ModuleList(
[MoonshineDecoderLayer(config, idx) for idx in range(config.decoder_num_hidden_layers)]
)
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
if encoder_attention_mask is not None:
mask_len = encoder_hidden_states.shape[-2]
downsample_stride = 64 * 3 * 2 # conv strides
encoder_attention_mask = encoder_attention_mask[..., ::downsample_stride][..., :mask_len]
if self.config._attn_implementation == "flash_attention_2":
encoder_attention_mask = encoder_attention_mask if (encoder_attention_mask == 0.0).any() else None
elif self.config._attn_implementation == "sdpa":
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2]
)
else:
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2]
)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
causal_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| MoonshineDecoder |
python | getsentry__sentry | src/sentry/core/endpoints/organization_user_details.py | {
"start": 463,
"end": 1113
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (MemberPermission,)
def get(self, request: Request, organization, user_id) -> Response:
try:
int(user_id)
except ValueError:
raise ValidationError(f"user_id({user_id}) must be an integer")
users = user_service.serialize_many(
filter={"user_ids": [user_id], "organization_id": organization.id}, as_user=request.user
)
if len(users) == 0:
return Response(status=404)
return Response(users[0])
| OrganizationUserDetailsEndpoint |
python | optuna__optuna | optuna/storages/journal/_base.py | {
"start": 158,
"end": 1469
} | class ____(abc.ABC):
"""Base class for Journal storages.
Storage classes implementing this base class must guarantee process safety. This means,
multiple processes might concurrently call ``read_logs`` and ``append_logs``. If the
backend storage does not internally support mutual exclusion mechanisms, such as locks,
you might want to use :class:`~optuna.storages.journal.JournalFileSymlinkLock` or
:class:`~optuna.storages.journal.JournalFileOpenLock` for creating a critical section.
"""
@abc.abstractmethod
def read_logs(self, log_number_from: int) -> Iterable[dict[str, Any]]:
"""Read logs with a log number greater than or equal to ``log_number_from``.
If ``log_number_from`` is 0, read all the logs.
Args:
log_number_from:
A non-negative integer value indicating which logs to read.
Returns:
Logs with log number greater than or equal to ``log_number_from``.
"""
raise NotImplementedError
@abc.abstractmethod
def append_logs(self, logs: list[dict[str, Any]]) -> None:
"""Append logs to the backend.
Args:
logs:
A list that contains json-serializable logs.
"""
raise NotImplementedError
| BaseJournalBackend |
python | jmcnamara__XlsxWriter | xlsxwriter/exceptions.py | {
"start": 622,
"end": 713
} | class ____(XlsxInputError):
"""Worksheet table name already exists."""
| DuplicateTableName |
python | sqlalchemy__sqlalchemy | examples/asyncio/gather_orm_statements.py | {
"start": 1107,
"end": 3380
} | class ____(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
def __repr__(self):
id_, data = self.id, self.data
return f"A({id_=}, {data=})"
async def run_out_of_band(async_sessionmaker, statement, merge_results=True):
"""run an ORM statement in a distinct session,
returning the frozen results
"""
async with async_sessionmaker() as oob_session:
# use AUTOCOMMIT for each connection to reduce transaction
# overhead / contention
await oob_session.connection(
execution_options={"isolation_level": "AUTOCOMMIT"}
)
result = await oob_session.execute(statement)
if merge_results:
return result.freeze()
else:
await result.close()
async def async_main():
engine = create_async_engine(
"postgresql+asyncpg://scott:tiger@localhost/test",
echo=True,
)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await conn.run_sync(Base.metadata.create_all)
async_session = async_sessionmaker(engine, expire_on_commit=False)
async with async_session() as session, session.begin():
session.add_all([A(data="a_%d" % i) for i in range(100)])
statements = [
select(A).where(A.data == "a_%d" % random.choice(range(100)))
for i in range(30)
]
frozen_results = await asyncio.gather(
*(
run_out_of_band(async_session, statement)
for statement in statements
)
)
results = [
# merge_results means the ORM objects from the result
# will be merged back into the original session.
# load=False means we can use the objects directly without
# re-selecting them. however this merge operation is still
# more expensive CPU-wise than a regular ORM load because the
# objects are copied into new instances
(
await session.run_sync(
merge_frozen_result, statement, result, load=False
)
)()
for statement, result in zip(statements, frozen_results)
]
print(f"results: {[r.all() for r in results]}")
asyncio.run(async_main())
| A |
python | pypa__warehouse | tests/unit/admin/views/test_projects.py | {
"start": 29337,
"end": 31448
} | class ____:
def test_no_confirm(self):
project = pretend.stub(name="foo", normalized_name="foo")
request = pretend.stub(
POST={},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.delete_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call("Confirm the request", queue="error")
]
def test_wrong_confirm(self):
project = pretend.stub(name="foo", normalized_name="foo")
request = pretend.stub(
POST={"confirm_project_name": "bar"},
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
route_path=lambda *a, **kw: "/foo/bar/",
)
with pytest.raises(HTTPSeeOther) as exc:
views.delete_project(project, request)
assert exc.value.status_code == 303
assert exc.value.headers["Location"] == "/foo/bar/"
assert request.session.flash.calls == [
pretend.call(
"Could not delete project - 'bar' is not the same as 'foo'",
queue="error",
)
]
def test_deletes_project(self, db_request):
project = ProjectFactory.create(name="foo")
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "/admin/projects/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST["confirm_project_name"] = project.name
db_request.user = UserFactory.create()
views.delete_project(project, db_request)
assert db_request.session.flash.calls == [
pretend.call("Deleted the project 'foo'", queue="success")
]
assert not (db_request.db.query(Project).filter(Project.name == "foo").count())
| TestDeleteProject |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 267907,
"end": 268265
} | class ____(StatNode):
# Global variable declaration.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
| GlobalNode |
python | pyenv__pyenv | plugins/python-build/scripts/add_miniconda.py | {
"start": 2660,
"end": 2746
} | class ____(StrEnum):
TWO = "2"
THREE = "3"
NONE = ""
PyVersion = None
| Suffix |
python | Netflix__metaflow | metaflow/datastore/exceptions.py | {
"start": 44,
"end": 120
} | class ____(MetaflowException):
headline = "Data store error"
| DataException |
python | walkccc__LeetCode | solutions/3116. Kth Smallest Amount With Single Denomination Combination/3116.py | {
"start": 0,
"end": 830
} | class ____:
def findKthSmallest(self, coins: list[int], k: int) -> int:
sizeToLcms = self._getSizeToLcms(coins)
def count(m: int) -> int:
"""Returns the number of denominations <= m."""
res = 0
for sz, lcms in enumerate(sizeToLcms):
for lcm in lcms:
# Principle of Inclusion-Exclusion (PIE)
res += m // lcm * pow(-1, sz + 1)
return res
return bisect.bisect_left(range(k * min(coins)), k, key=count)
def _getSizeToLcms(self, coins: list[int]) -> list[list[int]]:
# Returns the LCMs for each number of combination of coins.
sizeToLcms = [[] for _ in range(len(coins) + 1)]
for sz in range(1, len(coins) + 1):
for combination in itertools.combinations(coins, sz):
sizeToLcms[sz].append(math.lcm(*combination))
return sizeToLcms
| Solution |
python | django__django | tests/model_inheritance/models.py | {
"start": 1041,
"end": 1113
} | class ____(models.Model):
title = models.CharField(max_length=50)
| Post |
python | huggingface__transformers | src/transformers/models/ibert/quant_modules.py | {
"start": 957,
"end": 3713
} | class ____(nn.Module):
"""
Quantized version of `torch.nn.Embedding`. Adds quantization-specific arguments on top of `torch.nn.Embedding`.
Args:
weight_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the quantized weight.
momentum (`float`, *optional*, defaults to `0.95`):
Momentum for updating the activation quantization range.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
nn.functional.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, False)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = nn.functional.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
| QuantEmbedding |
python | coleifer__peewee | tests/regressions.py | {
"start": 40739,
"end": 40851
} | class ____(TestModel):
user = ForeignKeyField(BCUser, field=BCUser.username)
content = TextField()
| BCTweet |
python | pytorch__pytorch | torch/cuda/jiterator.py | {
"start": 1457,
"end": 6861
} | class ____:
def __init__(
self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs
):
self.code_string = code_string
assert return_by_ref or num_outputs == 1, (
"Return by value only works for single output. "
)
self.return_by_ref = return_by_ref
self.num_outputs = num_outputs
parsed_code = _CodeParser(code_string)
self.kernel_name = parsed_code.function_name
self.kwargs_dict = kwargs
self.is_cuda_available = torch.cuda.is_available()
def __call__(self, *tensors: Tensor, **kwargs):
# Jiterator follow torch.cuda's lazy initialization behavior
# Defer checking cuda's availability at the function invocation time
assert self.is_cuda_available, (
"Jiterator is only supported on CUDA and ROCm GPUs, none are available."
)
assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs."
expanded_kwargs = self.kwargs_dict.copy()
for key, value in kwargs.items():
if key in self.kwargs_dict:
expanded_kwargs[key] = value
else:
raise KeyError(f"{key} is not declared in function definition")
return torch._C._cuda_jiterator_compile_and_launch_kernel(
self.code_string,
self.kernel_name,
self.return_by_ref,
self.num_outputs,
tensors,
expanded_kwargs,
)
def _create_jit_fn(code_string: str, **kwargs) -> Callable:
"""
Create a jiterator-generated cuda kernel for an elementwise op.
The code string has to be a valid CUDA function that describes the computation for a single element. The code
string has to follow the c++ template pattern, as shown in the example below. This function will be inlined
into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as
local temp dir.
Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion.
Args:
code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value.
kwargs (Dict, optional): Keyword arguments for generated function
Example::
code_string = "template <typename T> T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1.0)
a = torch.rand(3, device="cuda")
b = torch.rand(3, device="cuda")
# invoke jitted function like a regular python function
result = jitted_fn(a, b, alpha=3.14)
code_string also allows multiple function definitions, and the last function will be treated as the entry function.
Example::
code_string = (
"template <typename T> T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }"
)
code_string += "template <typename T> T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }"
jitted_fn = create_jit_fn(code_string, val=0.0)
a = torch.rand(3, device="cuda")
b = torch.rand(3, device="cuda")
# invoke jitted function like a regular python function
result = jitted_fn(a, b) # using default val=0.0
Jiterator can be used together with python registration to override an operator's cuda kernel.
Following example is overriding gelu's cuda kernel with relu.
Example::
code_string = "template <typename T> T my_gelu(T a) { return a > 0 ? a : 0; }"
my_gelu = create_jit_fn(code_string)
my_lib = torch.library.Library("aten", "IMPL")
my_lib.impl("aten::gelu", my_gelu, "CUDA")
# torch.nn.GELU and torch.nn.function.gelu are now overridden
a = torch.rand(3, device="cuda")
torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a))
.. warning::
This API is in beta and may change in future releases.
.. warning::
This API only supports up to 8 inputs and 1 output
.. warning::
All input tensors must live in CUDA device
"""
return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs)
def _create_multi_output_jit_fn(
code_string: str, num_outputs: int, **kwargs
) -> Callable:
"""
Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs.
Args:
code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference.
num_outputs(int): number of outputs return by the kernel
kwargs (Dict, optional): Keyword arguments for generated function
Example::
code_string = "template <typename T> void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }"
jitted_fn = create_jit_fn(code_string, alpha=1.0)
a = torch.rand(3, device="cuda")
b = torch.rand(3, device="cuda")
# invoke jitted function like a regular python function
result = jitted_fn(a, b, alpha=3.14)
.. warning::
This API is in beta and may change in future releases.
.. warning::
This API only supports up to 8 inputs and 8 outputs
"""
return _JittedFunction(
code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs
)
| _JittedFunction |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 8333,
"end": 11676
} | class ____(GoogleCloudBaseOperator):
"""
Delete the task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. Task identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "dataplex_task_id", "impersonation_chain")
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.dataplex_task_id = dataplex_task_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Dataplex task %s", self.dataplex_task_id)
operation = hook.delete_task(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
dataplex_task_id=self.dataplex_task_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex task %s deleted successfully!", self.dataplex_task_id)
| DataplexDeleteTaskOperator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_sparkline12.py | {
"start": 345,
"end": 3978
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = "Sheet1"
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row("A1", data)
# Set up sparklines.
worksheet.add_sparkline("F1", {"range": "Sheet1!A1:E1", "max": 4, "min": 0})
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup manualMax="4" manualMin="0" displayEmptyCellsAs="gap" minAxisType="custom" maxAxisType="custom">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.