language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
tests/syndication_tests/feeds.py
|
{
"start": 3774,
"end": 4104
}
|
class ____(TestRss2Feed):
"""
A feed to test that RSS feeds work with a single enclosure.
"""
def item_enclosure_url(self, item):
return "http://example.com"
def item_enclosure_size(self, item):
return 0
def item_mime_type(self, item):
return "image/png"
|
TestSingleEnclosureRSSFeed
|
python
|
openai__openai-python
|
src/openai/types/responses/response_function_web_search.py
|
{
"start": 380,
"end": 535
}
|
class ____(BaseModel):
type: Literal["url"]
"""The type of source. Always `url`."""
url: str
"""The URL of the source."""
|
ActionSearchSource
|
python
|
ansible__ansible
|
lib/ansible/_internal/_json/_profiles/_legacy.py
|
{
"start": 3081,
"end": 6831
}
|
class ____(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
visitor_type = _LegacyVariableVisitor
@classmethod
def serialize_untrusted(cls, value: _Untrusted) -> dict[str, str] | str:
return dict(
__ansible_unsafe=_datatag.AnsibleTagHelper.untag(value.value),
)
@classmethod
def serialize_tagged_str(cls, value: _datatag.AnsibleTaggedObject) -> _t.Any:
if ciphertext := _vault.VaultHelper.get_ciphertext(value, with_tags=False):
return dict(
__ansible_vault=ciphertext,
)
return _datatag.AnsibleTagHelper.untag(value)
@classmethod
def deserialize_unsafe(cls, value: dict[str, _t.Any]) -> _Untrusted:
ansible_unsafe = value['__ansible_unsafe']
if type(ansible_unsafe) is not str: # pylint: disable=unidiomatic-typecheck
raise TypeError(f"__ansible_unsafe is {type(ansible_unsafe)} not {str}")
return _Untrusted(ansible_unsafe)
@classmethod
def deserialize_vault(cls, value: dict[str, _t.Any]) -> _vault.EncryptedString:
ansible_vault = value['__ansible_vault']
if type(ansible_vault) is not str: # pylint: disable=unidiomatic-typecheck
raise TypeError(f"__ansible_vault is {type(ansible_vault)} not {str}")
encrypted_string = _vault.EncryptedString(ciphertext=ansible_vault)
return encrypted_string
@classmethod
def serialize_encrypted_string(cls, value: _vault.EncryptedString) -> dict[str, str]:
return dict(
__ansible_vault=_vault.VaultHelper.get_ciphertext(value, with_tags=False),
)
@classmethod
def post_init(cls) -> None:
cls.serialize_map = {
set: cls.serialize_as_list,
tuple: cls.serialize_as_list,
_datetime.date: cls.serialize_as_isoformat, # existing devel behavior
_datetime.time: cls.serialize_as_isoformat, # always failed pre-2.18, so okay to include for consistency
_datetime.datetime: cls.serialize_as_isoformat, # existing devel behavior
_datatag._AnsibleTaggedDate: cls.discard_tags,
_datatag._AnsibleTaggedTime: cls.discard_tags,
_datatag._AnsibleTaggedDateTime: cls.discard_tags,
_vault.EncryptedString: cls.serialize_encrypted_string,
_datatag._AnsibleTaggedStr: cls.serialize_tagged_str, # for VaultedValue tagged str
_datatag._AnsibleTaggedInt: cls.discard_tags,
_datatag._AnsibleTaggedFloat: cls.discard_tags,
_datatag._AnsibleTaggedList: cls.discard_tags,
_datatag._AnsibleTaggedSet: cls.discard_tags,
_datatag._AnsibleTaggedTuple: cls.discard_tags,
_datatag._AnsibleTaggedDict: cls.discard_tags,
_Untrusted: cls.serialize_untrusted, # equivalent to AnsibleJSONEncoder(preprocess_unsafe=True) in devel
}
cls.deserialize_map = {
'__ansible_unsafe': cls.deserialize_unsafe,
'__ansible_vault': cls.deserialize_vault,
}
cls.handle_key = cls._handle_key_str_fallback # type: ignore[method-assign] # legacy stdlib-compatible key behavior
@classmethod
def pre_serialize(cls, encoder: Encoder, o: _t.Any) -> _t.Any:
# DTFIX7: these conversion args probably aren't needed
avv = cls.visitor_type(invert_trust=True, convert_mapping_to_dict=True, convert_sequence_to_list=True, convert_custom_scalars=True)
return avv.visit(o)
@classmethod
def post_deserialize(cls, decoder: Decoder, o: _t.Any) -> _t.Any:
avv = cls.visitor_type(trusted_as_template=decoder._trusted_as_template, origin=decoder._origin)
return avv.visit(o)
|
_Profile
|
python
|
huggingface__transformers
|
src/transformers/models/reformer/modeling_reformer.py
|
{
"start": 78796,
"end": 80334
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed
up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_buckets_states: Optional[list[tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring
|
ReformerModelWithLMHeadOutput
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/hooks/test_quicksight.py
|
{
"start": 3266,
"end": 10559
}
|
class ____:
def test_get_conn_returns_a_boto3_connection(self):
hook = QuickSightHook(aws_conn_id="aws_default", region_name="us-east-1")
assert hook.conn is not None
@pytest.mark.parametrize(
("response", "expected_status"),
[
pytest.param(MOCK_DESCRIBE_INGESTION_SUCCESS, "COMPLETED", id="completed"),
pytest.param(MOCK_DESCRIBE_INGESTION_FAILURE, "Failed", id="failed"),
],
)
@pytest.mark.parametrize(("aws_account_id", "expected_account_id"), ACCOUNT_TEST_CASES)
def test_get_job_status(
self, response, expected_status, aws_account_id, expected_account_id, mocked_account_id, mocked_client
):
"""Test get job status."""
mocked_client.describe_ingestion.return_value = response
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
assert (
hook.get_status(
data_set_id="DemoDataSet",
ingestion_id="DemoDataSet_Ingestion",
aws_account_id=aws_account_id,
)
== expected_status
)
mocked_client.describe_ingestion.assert_called_with(
AwsAccountId=expected_account_id,
DataSetId="DemoDataSet",
IngestionId="DemoDataSet_Ingestion",
)
@pytest.mark.parametrize(
("exception", "error_match"),
[
pytest.param(KeyError("Foo"), "Could not get status", id="key-error"),
pytest.param(
ClientError(error_response={}, operation_name="fake"),
"AWS request failed",
id="botocore-client",
),
],
)
def test_get_job_status_exception(self, exception, error_match, mocked_client, mocked_account_id):
mocked_client.describe_ingestion.side_effect = exception
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
with pytest.raises(AirflowException, match=error_match):
assert hook.get_status(
data_set_id="DemoDataSet",
ingestion_id="DemoDataSet_Ingestion",
aws_account_id=None,
)
@pytest.mark.parametrize(
"error_info",
[
pytest.param({"foo": "bar"}, id="error-info-exists"),
pytest.param(None, id="error-info-not-exists"),
],
)
@pytest.mark.parametrize(("aws_account_id", "expected_account_id"), ACCOUNT_TEST_CASES)
def test_get_error_info(
self, error_info, aws_account_id, expected_account_id, mocked_client, mocked_account_id
):
mocked_response = {"Ingestion": {}}
if error_info:
mocked_response["Ingestion"]["ErrorInfo"] = error_info
mocked_client.describe_ingestion.return_value = mocked_response
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
assert (
hook.get_error_info(
data_set_id="DemoDataSet", ingestion_id="DemoDataSet_Ingestion", aws_account_id=None
)
== error_info
)
@mock.patch.object(QuickSightHook, "get_status", return_value="FAILED")
@mock.patch.object(QuickSightHook, "get_error_info")
@pytest.mark.parametrize(("aws_account_id", "expected_account_id"), ACCOUNT_TEST_CASES)
def test_wait_for_state_failure(
self,
mocked_get_error_info,
mocked_get_status,
aws_account_id,
expected_account_id,
mocked_client,
mocked_account_id,
):
mocked_get_error_info.return_value = "Something Bad Happen"
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
with pytest.raises(AirflowException, match="Error info: Something Bad Happen"):
hook.wait_for_state(
aws_account_id, "data_set_id", "ingestion_id", target_state={"COMPLETED"}, check_interval=0
)
mocked_get_status.assert_called_with(expected_account_id, "data_set_id", "ingestion_id")
mocked_get_error_info.assert_called_with(expected_account_id, "data_set_id", "ingestion_id")
@mock.patch.object(QuickSightHook, "get_status", return_value="CANCELLED")
def test_wait_for_state_canceled(self, _):
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
with pytest.raises(AirflowException, match="The Amazon QuickSight SPICE ingestion cancelled"):
hook.wait_for_state(
"aws_account_id", "data_set_id", "ingestion_id", target_state={"COMPLETED"}, check_interval=0
)
@mock.patch.object(QuickSightHook, "get_status")
def test_wait_for_state_completed(self, mocked_get_status):
mocked_get_status.side_effect = ["INITIALIZED", "QUEUED", "RUNNING", "COMPLETED"]
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
assert (
hook.wait_for_state(
"aws_account_id", "data_set_id", "ingestion_id", target_state={"COMPLETED"}, check_interval=0
)
== "COMPLETED"
)
assert mocked_get_status.call_count == 4
@pytest.mark.parametrize(
"wait_for_completion", [pytest.param(True, id="wait"), pytest.param(False, id="no-wait")]
)
@pytest.mark.parametrize(("aws_account_id", "expected_account_id"), ACCOUNT_TEST_CASES)
def test_create_ingestion(
self, wait_for_completion, aws_account_id, expected_account_id, mocked_account_id, mocked_client
):
mocked_client.create_ingestion.return_value = MOCK_CREATE_INGESTION_RESPONSE
hook = QuickSightHook(aws_conn_id=None, region_name="us-east-1")
with mock.patch.object(QuickSightHook, "wait_for_state") as mocked_wait_for_state:
assert (
hook.create_ingestion(
data_set_id="DemoDataSet",
ingestion_id="DemoDataSet_Ingestion",
ingestion_type="INCREMENTAL_REFRESH",
aws_account_id=aws_account_id,
wait_for_completion=wait_for_completion,
check_interval=0,
)
== MOCK_CREATE_INGESTION_RESPONSE
)
if wait_for_completion:
mocked_wait_for_state.assert_called_once_with(
aws_account_id=expected_account_id,
data_set_id="DemoDataSet",
ingestion_id="DemoDataSet_Ingestion",
target_state={"COMPLETED"},
check_interval=0,
)
else:
mocked_wait_for_state.assert_not_called()
mocked_client.create_ingestion.assert_called_with(AwsAccountId=expected_account_id, **MOCK_DATA)
def test_create_ingestion_exception(self, mocked_account_id, mocked_client, caplog):
mocked_client.create_ingestion.side_effect = ValueError("Fake Error")
hook = QuickSightHook(aws_conn_id=None)
with pytest.raises(ValueError, match="Fake Error"):
hook.create_ingestion(
data_set_id="DemoDataSet",
ingestion_id="DemoDataSet_Ingestion",
ingestion_type="INCREMENTAL_REFRESH",
)
assert "create_ingestion API, error: Fake Error" in caplog.text
|
TestQuicksight
|
python
|
getsentry__sentry
|
tests/sentry/monitors/endpoints/test_base_monitor_checkin_index.py
|
{
"start": 509,
"end": 9957
}
|
class ____(MonitorTestCase):
__test__ = False
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def create_error(self, platform, trace_id, project_id, timestamp):
data = load_data(platform, timestamp=timestamp)
if "contexts" not in data:
data["contexts"] = {}
data["contexts"]["trace"] = {
"type": "trace",
"trace_id": trace_id,
"span_id": uuid.uuid4().hex[:16],
}
return self.store_event(data, project_id=project_id)
def test_options_cors(self) -> None:
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=2),
status=CheckInStatus.OK,
)
resp = self.get_success_response(
self.organization.slug,
monitor.slug,
method="OPTIONS",
statsPeriod="1d",
)
assert resp.status_code == 200
assert resp["Access-Control-Allow-Origin"]
assert resp["Access-Control-Allow-Headers"]
def test_simple(self) -> None:
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin1 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=2),
status=CheckInStatus.OK,
)
checkin2 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=1),
status=CheckInStatus.OK,
)
checkin3 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added,
status=CheckInStatus.OK,
)
resp = self.get_success_response(
self.organization.slug,
monitor.slug,
**{"statsPeriod": "1d"},
)
assert len(resp.data) == 3
# Newest first
assert resp.data[0]["id"] == str(checkin3.guid)
assert resp.data[1]["id"] == str(checkin2.guid)
assert resp.data[2]["id"] == str(checkin1.guid)
def test_statsperiod_constraints(self) -> None:
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
checkin = MonitorCheckIn.objects.create(
project_id=self.project.id,
monitor_id=monitor.id,
monitor_environment_id=monitor_environment.id,
status=MonitorStatus.OK,
date_added=timezone.now() - timedelta(hours=12),
)
end = timezone.now()
startOneHourAgo = end - timedelta(hours=1)
startOneDayAgo = end - timedelta(days=1)
resp = self.get_response(self.organization.slug, monitor.slug, **{"statsPeriod": "1h"})
assert resp.data == []
resp = self.get_response(
self.organization.slug,
monitor.slug,
**{"start": startOneHourAgo.isoformat(), "end": end.isoformat()},
)
assert resp.data == []
resp = self.get_response(self.organization.slug, monitor.slug, **{"statsPeriod": "1d"})
assert resp.data[0]["id"] == str(checkin.guid)
resp = self.get_response(
self.organization.slug,
monitor.slug,
**{"start": startOneDayAgo.isoformat(), "end": end.isoformat()},
)
assert resp.data[0]["id"] == str(checkin.guid)
def test_simple_environment(self) -> None:
self.login_as(self.user)
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor, name="jungle")
monitor_environment_2 = self._create_monitor_environment(monitor, name="volcano")
checkin1 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=2),
status=CheckInStatus.OK,
)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment_2,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=1),
status=CheckInStatus.OK,
)
resp = self.get_success_response(
self.organization.slug, monitor.slug, **{"statsPeriod": "1d", "environment": "jungle"}
)
assert len(resp.data) == 1
assert resp.data[0]["id"] == str(checkin1.guid)
assert resp.data[0]["environment"] == checkin1.monitor_environment.get_environment().name
def test_bad_monitorenvironment(self) -> None:
self.login_as(self.user)
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor, name="jungle")
Environment.objects.create(name="volcano", organization_id=self.organization.id)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=2),
status=CheckInStatus.OK,
)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=1),
status=CheckInStatus.OK,
)
resp = self.get_success_response(
self.organization.slug, monitor.slug, **{"statsPeriod": "1d", "environment": "volcano"}
)
assert len(resp.data) == 0
def test_trace_ids(self) -> None:
monitor = self._create_monitor()
monitor_environment = self._create_monitor_environment(monitor)
trace_id = uuid.uuid4().hex
error = self.create_error(
platform="python",
trace_id=trace_id,
project_id=self.project.id,
timestamp=monitor.date_added,
)
group = Group.objects.get(id=error.group_id)
checkin1 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=2),
status=CheckInStatus.OK,
trace_id=trace_id,
)
checkin2 = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=1),
status=CheckInStatus.OK,
)
resp = self.get_success_response(
self.organization.slug,
monitor.slug,
**{"statsPeriod": "1d", "expand": ["groups"]},
)
assert len(resp.data) == 2
# Newest first
assert resp.data[0]["id"] == str(checkin2.guid)
assert resp.data[0]["groups"] == []
assert resp.data[1]["id"] == str(checkin1.guid)
assert resp.data[1]["groups"] == [{"id": group.id, "shortId": group.qualified_short_id}]
def test_serializes_monitor_config_correctly(self) -> None:
monitor = self.create_monitor(project=self.project)
config = {
"schedule": "0 0 * * *",
"schedule_type": ScheduleType.CRONTAB,
"timezone": "US/Arizona",
"max_runtime": None,
"checkin_margin": None,
}
monitor_environment = self._create_monitor_environment(monitor)
self.create_monitor_checkin(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
date_added=monitor.date_added - timedelta(minutes=2),
status=CheckInStatus.OK,
monitor_config=config,
)
# Mutating the monitor config to test that the check-in config is used
monitor.config = {
"schedule": "0 * * * *",
"schedule_type": ScheduleType.INTERVAL,
"timezone": "CA/Toronto",
"max_runtime": 1000,
"checkin_margin": 100,
}
monitor.save()
response = self.get_success_response(
self.project.organization.slug,
monitor.slug,
)
assert response.data[0]["monitorConfig"]["schedule_type"] == ScheduleType.get_name(
config["schedule_type"]
)
assert response.data[0]["monitorConfig"]["schedule"] == config["schedule"]
assert response.data[0]["monitorConfig"]["timezone"] == config["timezone"]
assert response.data[0]["monitorConfig"]["max_runtime"] == config["max_runtime"]
assert response.data[0]["monitorConfig"]["checkin_margin"] == config["checkin_margin"]
|
BaseListMonitorCheckInsTest
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 152063,
"end": 152643
}
|
class ____(PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__(self) -> None:
super().__init__()
self.set_name("end of text")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
if loc == len(instring):
return loc + 1, []
if loc > len(instring):
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
|
StringEnd
|
python
|
doocs__leetcode
|
solution/3400-3499/3476.Maximize Profit from Task Assignment/Solution.py
|
{
"start": 0,
"end": 469
}
|
class ____:
def maxProfit(self, workers: List[int], tasks: List[List[int]]) -> int:
d = defaultdict(SortedList)
for skill, profit in tasks:
d[skill].add(profit)
ans = 0
for skill in workers:
if not d[skill]:
continue
ans += d[skill].pop()
mx = 0
for ls in d.values():
if ls:
mx = max(mx, ls[-1])
ans += mx
return ans
|
Solution
|
python
|
Netflix__metaflow
|
metaflow/plugins/pypi/parsers.py
|
{
"start": 133,
"end": 8556
}
|
class ____(MetaflowException):
headline = "Value error"
def requirements_txt_parser(content: str):
"""
Parse non-comment lines from a requirements.txt file as strictly valid
PEP 508 requirements.
Recognizes direct references (e.g. "my_lib @ git+https://..."), extras
(e.g. "requests[security]"), and version specifiers (e.g. "==2.0"). If
the package name is "python", its specifier is stored in the "python"
key instead of "packages".
Parameters
----------
content : str
Contents of a requirements.txt file.
Returns
-------
dict
A dictionary with two keys:
- "packages": dict(str -> str)
Mapping from package name (plus optional extras/references) to a
version specifier string.
- "python": str or None
The Python version constraints if present, otherwise None.
Raises
------
ParserValueError
If a requirement line is invalid PEP 508 or if environment markers are
detected, or if multiple Python constraints are specified.
"""
import re
from metaflow._vendor.packaging.requirements import Requirement, InvalidRequirement
parsed = {"packages": {}, "python": None}
inline_comment_pattern = re.compile(r"\s+#.*$")
for line in content.splitlines():
line = line.strip()
# support Rye lockfiles by skipping lines not compliant with requirements
if line == "-e file:.":
continue
if not line or line.startswith("#"):
continue
line = inline_comment_pattern.sub("", line).strip()
if not line:
continue
try:
req = Requirement(line)
except InvalidRequirement:
raise ParserValueError(f"Not a valid PEP 508 requirement: '{line}'")
if req.marker is not None:
raise ParserValueError(
"Environment markers (e.g. 'platform_system==\"Linux\"') "
f"are not supported for line: '{line}'"
)
dep_key = req.name
if req.extras:
dep_key += f"[{','.join(req.extras)}]"
if req.url:
dep_key += f"@{req.url}"
dep_spec = str(req.specifier).lstrip(" =")
if req.name.lower() == "python":
if parsed["python"] is not None and dep_spec:
raise ParserValueError(
f"Multiple Python version specs not allowed: '{line}'"
)
parsed["python"] = dep_spec or None
else:
parsed["packages"][dep_key] = dep_spec
return parsed
def pyproject_toml_parser(content: str):
"""
Parse a pyproject.toml file per PEP 621.
Reads the 'requires-python' and 'dependencies' fields from the "[project]" section.
Each dependency line must be a valid PEP 508 requirement. If the package name is
"python", its specifier is stored in the "python" key instead of "packages".
Parameters
----------
content : str
Contents of a pyproject.toml file.
Returns
-------
dict
A dictionary with two keys:
- "packages": dict(str -> str)
Mapping from package name (plus optional extras/references) to a
version specifier string.
- "python": str or None
The Python version constraints if present, otherwise None.
Raises
------
RuntimeError
If no TOML library (tomllib in Python 3.11+ or tomli in earlier versions) is found.
ParserValueError
If a dependency is not valid PEP 508, if environment markers are used, or if
multiple Python constraints are specified.
"""
try:
import tomllib as toml # Python 3.11+
except ImportError:
try:
import tomli as toml # Python < 3.11 (requires "tomli" package)
except ImportError:
raise RuntimeError(
"Could not import a TOML library. For Python <3.11, please install 'tomli'."
)
from metaflow._vendor.packaging.requirements import Requirement, InvalidRequirement
data = toml.loads(content)
project = data.get("project", {})
requirements = project.get("dependencies", [])
requires_python = project.get("requires-python")
parsed = {"packages": {}, "python": None}
if requires_python is not None:
# If present, store verbatim; note that PEP 621 does not necessarily
# require "python" to be a dependency in the usual sense.
# Example: "requires-python" = ">=3.7,<4"
parsed["python"] = requires_python.lstrip("=").strip()
for dep_line in requirements:
dep_line_stripped = dep_line.strip()
try:
req = Requirement(dep_line_stripped)
except InvalidRequirement:
raise ParserValueError(
f"Not a valid PEP 508 requirement: '{dep_line_stripped}'"
)
if req.marker is not None:
raise ParserValueError(
f"Environment markers not supported for line: '{dep_line_stripped}'"
)
dep_key = req.name
if req.extras:
dep_key += f"[{','.join(req.extras)}]"
if req.url:
dep_key += f"@{req.url}"
dep_spec = str(req.specifier).lstrip("=")
if req.name.lower() == "python":
if parsed["python"] is not None and dep_spec:
raise ParserValueError(
f"Multiple Python version specs not allowed: '{dep_line_stripped}'"
)
parsed["python"] = dep_spec or None
else:
parsed["packages"][dep_key] = dep_spec
return parsed
def conda_environment_yml_parser(content: str):
"""
Parse a minimal environment.yml file under strict assumptions.
The file must contain a 'dependencies:' line, after which each dependency line
appears with a '- ' prefix. Python can appear as 'python=3.9', etc.; other
packages as 'numpy=1.21.2' or simply 'numpy'. Non-compliant lines raise ParserValueError.
Parameters
----------
content : str
Contents of a environment.yml file.
Returns
-------
dict
A dictionary with keys:
{
"packages": dict(str -> str),
"python": str or None
}
Raises
------
ParserValueError
If the file has malformed lines or unsupported sections.
"""
import re
packages = {}
python_version = None
inside_dependencies = False
# Basic pattern for lines like "numpy=1.21.2"
# Group 1: package name
# Group 2: optional operator + version (could be "=1.21.2", "==1.21.2", etc.)
line_regex = re.compile(r"^([A-Za-z0-9_\-\.]+)(\s*[=<>!~].+\s*)?$")
inline_comment_pattern = re.compile(r"\s+#.*$")
for line in content.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
line = inline_comment_pattern.sub("", line).strip()
if not line:
continue
if line.lower().startswith("dependencies:"):
inside_dependencies = True
continue
if inside_dependencies and not line.startswith("-"):
inside_dependencies = False
continue
if not inside_dependencies:
continue
dep_line = line.lstrip("-").strip()
if dep_line.endswith(":"):
raise ParserValueError(
f"Unsupported subsection '{dep_line}' in environment.yml."
)
match = line_regex.match(dep_line)
if not match:
raise ParserValueError(
f"Line '{dep_line}' is not a valid conda package specifier."
)
pkg_name, pkg_version_part = match.groups()
version_spec = pkg_version_part.strip() if pkg_version_part else ""
if version_spec.startswith("="):
version_spec = version_spec.lstrip("=").strip()
if pkg_name.lower() == "python":
if python_version is not None and version_spec:
raise ParserValueError(
f"Multiple Python version specs detected: '{dep_line}'"
)
python_version = version_spec
else:
packages[pkg_name] = version_spec
return {"packages": packages, "python": python_version}
|
ParserValueError
|
python
|
joke2k__faker
|
tests/test_unique.py
|
{
"start": 90,
"end": 3093
}
|
class ____:
def test_uniqueness(self):
fake = Faker("en_US")
names = set()
# There are (at time of writing 690) first names in the
# US identity provider. Birthday paradox puts the chances of
# no duplicates in 250 selections as low enough to be impossible
for i in range(250):
first_name = fake.unique.first_name()
assert first_name not in names
names.add(first_name)
def test_sanity_escape(self):
fake = Faker()
# Those of you who are especially astute may realise
# there are only 2 booleans, so the third boolean cannot
# be unique.
with pytest.raises(UniquenessException, match=r"Got duplicated values after [\d,]+ iterations."):
for i in range(3):
_ = fake.unique.boolean()
def test_uniqueness_clear(self):
fake = Faker()
for i in range(2):
fake.unique.boolean()
fake.unique.clear()
# Because we cleared the generated values, this will not
# throw an exception
fake.unique.boolean()
def test_exclusive_arguments(self):
"""Calls through the "unique" portal will only affect
calls with that specific function signature.
"""
fake = Faker()
for i in range(10):
fake.unique.random_int(min=1, max=10)
# Different signature, so new pool. If they shared a pool
# this would throw a sanity exception
fake.unique.random_int(min=2, max=10)
def test_functions_only(self):
"""Accessing non-functions through the `.unique` attribute
will throw a TypeError."""
fake = Faker()
with pytest.raises(TypeError, match="Accessing non-functions through .unique is not supported."):
fake.unique.locales
def test_complex_return_types_is_supported(self):
"""The unique decorator supports complex return types
like the ones used in the profile provider."""
fake = Faker()
for i in range(10):
fake.unique.pydict()
for i in range(10):
fake.unique.pylist()
for i in range(10):
fake.unique.pyset()
def test_unique_locale_access(self):
"""Accessing locales through UniqueProxy with subscript notation
maintains global uniqueness across all locales."""
fake = Faker(["en_US", "fr_FR", "ja_JP"])
generated = set()
for i in range(5):
value = fake.unique["en_US"].random_int(min=1, max=10)
assert value not in generated
generated.add(value)
for i in range(5):
value = fake.unique["fr_FR"].random_int(min=1, max=10)
assert value not in generated
generated.add(value)
with pytest.raises(UniquenessException, match=r"Got duplicated values after [\d,]+ iterations."):
fake.unique["ja_JP"].random_int(min=1, max=10)
|
TestUniquenessClass
|
python
|
huggingface__transformers
|
src/transformers/models/clvp/modeling_clvp.py
|
{
"start": 53296,
"end": 61403
}
|
class ____(ClvpPreTrainedModel, GenerationMixin):
def __init__(self, config):
super().__init__(config)
self.config = config
self.model = ClvpModel(self.config)
self.final_norm = nn.LayerNorm(self.config.hidden_size)
self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return None
def get_input_embeddings(self):
return self.model.decoder.input_embeds_layer
def set_input_embeddings(self, new_embeddings):
self.model.decoder.input_embeds_layer = new_embeddings
def _prepare_model_inputs(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[dict[str, torch.Tensor]] = None,
) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]:
"""
This function extracts the model-specific `inputs` for generation.
"""
input_name = self.main_input_name
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None}
inputs_kwarg = model_kwargs.pop(input_name, None)
if inputs_kwarg is not None and inputs is not None:
raise ValueError(
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed."
f"Make sure to either pass {inputs} or {input_name}=..."
)
elif inputs_kwarg is not None:
inputs = inputs_kwarg
if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
inputs, bos_token_id, model_kwargs=model_kwargs
)
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
# Check if conditioning_embeds are provided or not, if yes then concatenate the bos_token_id at the end of the conditioning_embeds.
# Then we must subtract the positional_ids because during the forward pass it will be added anyways, so we must cancel them out here.
conditioning_embeds = model_kwargs.get("conditioning_embeds")
if conditioning_embeds is not None:
mel_start_token_embedding = self.model.decoder.input_embeds_layer(
torch.full(
(conditioning_embeds.shape[0], 1),
fill_value=self.config.bos_token_id,
device=conditioning_embeds.device,
)
)
mel_start_token_embedding += self.model.decoder.position_embeds_layer(
torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device)
)
conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1)
# subtract the positional_ids here
if hasattr(model_kwargs, "attention_mask"):
position_ids = model_kwargs["attention_mask"].long().cumsum(-1) - 1
else:
position_ids = torch.arange(
0, conditioning_embeds.shape[1], dtype=torch.long, device=conditioning_embeds.device
)
position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1)
model_kwargs["inputs_embeds"] = conditioning_embeds - self.model.decoder.position_embeds_layer(
position_ids
)
model_kwargs["input_ids"] = (
torch.ones((model_kwargs["inputs_embeds"].shape[0], 1), dtype=torch.long, device=self.device)
* self.config.bos_token_id
)
return model_kwargs["inputs_embeds"], "inputs_embeds", model_kwargs
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
return inputs, input_name, model_kwargs
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
conditioning_embeds=None,
cache_position=None,
**kwargs,
):
# Overwritten: has `conditioning_embeds`-related logic
input_ids_length = input_ids.shape[-1]
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
if conditioning_embeds is not None and cache_position[0] != 0:
model_inputs["position_ids"] = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device)
return model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
lm_logits = self.final_norm(hidden_states)
lm_logits = self.lm_head(lm_logits)
loss = None
if labels is not None:
labels = labels.to(lm_logits.device)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
The composite CLVP model with a text encoder, speech encoder and speech decoder model.
"""
)
|
ClvpForCausalLM
|
python
|
keon__algorithms
|
algorithms/tree/trie/trie.py
|
{
"start": 296,
"end": 981
}
|
class ____:
def __init__(self):
self.root = TrieNode()
def insert(self, word):
current = self.root
for letter in word:
current = current.children[letter]
current.is_word = True
def search(self, word):
current = self.root
for letter in word:
current = current.children.get(letter)
if current is None:
return False
return current.is_word
def starts_with(self, prefix):
current = self.root
for letter in prefix:
current = current.children.get(letter)
if current is None:
return False
return True
|
Trie
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/resources/gcp.py
|
{
"start": 695,
"end": 6910
}
|
class ____(GCSFileManager):
"""
Slighlty modified dagster_gcp.gcs.file_manager.GCSFileManager
to allow setting the content type of the file
"""
def get_content_type(self, ext):
if ext == "csv":
return "text/csv"
elif ext == "json":
return "application/json"
elif ext == "html":
return "text/html"
elif ext == "md":
return "text/markdown"
else:
return "text/plain"
def get_full_key(self, *args, **kwargs):
full_key = super().get_full_key(*args, **kwargs)
# remove the first slash if it exists to prevent double slashes
if full_key.startswith("/"):
full_key = full_key[1:]
return full_key
def write(self, file_obj, mode="wb", ext=None, key: Optional[str] = None) -> PublicGCSFileHandle:
"""
Reworked from dagster_gcp.gcs.file_manager.GCSFileManager.write
As the original method does not allow to set the content type of the file
"""
key = check.opt_str_param(key, "key", default=str(uuid.uuid4()))
check_file_like_obj(file_obj)
gcs_key = self.get_full_key(key + (("." + ext) if ext is not None else ""))
bucket_obj = self._client.bucket(self._gcs_bucket)
blob = bucket_obj.blob(gcs_key)
# Set Cache-Control header to no-cache to avoid caching issues
# This is IMPORTANT because if we don't set this header, the metadata file will be cached by GCS
# and the next time we try to download it, we will get the stale version
blob.cache_control = "no-cache"
blob.content_type = self.get_content_type(ext)
blob.upload_from_file(file_obj)
return PublicGCSFileHandle(self._gcs_bucket, gcs_key)
def delete_by_key(self, key: str, ext: Optional[str] = None) -> Optional[PublicGCSFileHandle]:
gcs_key = self.get_full_key(key + (("." + ext) if ext is not None else ""))
bucket_obj = self._client.bucket(self._gcs_bucket)
blob = bucket_obj.blob(gcs_key)
# if the file does not exist, return None
if not blob.exists():
return None
blob.delete()
return PublicGCSFileHandle(self._gcs_bucket, gcs_key)
@resource(config_schema={"gcp_gcs_cred_string": StringSource})
def gcp_gcs_client(resource_context: InitResourceContext) -> storage.Client:
"""Create a connection to gcs."""
resource_context.log.info("retrieving gcp_gcs_client")
gcp_gcs_cred_string = resource_context.resource_config["gcp_gcs_cred_string"]
gcp_gsm_cred_json = json.loads(gcp_gcs_cred_string)
credentials = service_account.Credentials.from_service_account_info(gcp_gsm_cred_json)
return storage.Client(
credentials=credentials,
project=credentials.project_id,
)
@resource(
required_resource_keys={"gcp_gcs_client"},
config_schema={"gcs_bucket": StringSource, "prefix": StringSource},
)
def gcs_file_manager(resource_context) -> GCSFileManager:
"""FileManager that provides abstract access to GCS.
Implements the :py:class:`~dagster._core.storage.file_manager.FileManager` API.
"""
storage_client = resource_context.resources.gcp_gcs_client
return ContentTypeAwareGCSFileManager(
client=storage_client,
gcs_bucket=resource_context.resource_config["gcs_bucket"],
gcs_base_key=resource_context.resource_config["prefix"],
)
@resource(
required_resource_keys={"gcp_gcs_client"},
config_schema={
"gcs_bucket": StringSource,
"prefix": Noneable(StringSource),
"gcs_filename": StringSource,
},
)
def gcs_file_blob(resource_context: InitResourceContext) -> storage.Blob:
"""
Create a connection to a gcs file blob.
This is implemented so we are able to retrieve the metadata of a file
before committing to downloading the file.
"""
gcs_bucket = resource_context.resource_config["gcs_bucket"]
storage_client = resource_context.resources.gcp_gcs_client
bucket = storage_client.get_bucket(gcs_bucket)
prefix = resource_context.resource_config["prefix"]
gcs_filename = resource_context.resource_config["gcs_filename"]
gcs_file_path = f"{prefix}/{gcs_filename}" if prefix else gcs_filename
resource_context.log.info(f"retrieving gcs file blob {gcs_file_path} in bucket: {gcs_bucket}")
gcs_file_blob = bucket.get_blob(gcs_file_path)
if not gcs_file_blob or not gcs_file_blob.exists():
raise Exception(f"File does not exist at path: {gcs_file_path}")
return gcs_file_blob
@resource(
required_resource_keys={"gcp_gcs_client"},
config_schema={
"gcs_bucket": StringSource,
"prefix": StringSource,
"match_regex": StringSource,
"only_one": Field(config=bool, default_value=False),
"sort_key": Field(config=str, is_required=False),
"reverse_sort": Field(config=bool, default_value=False),
},
)
def gcs_directory_blobs(resource_context: InitResourceContext) -> Union[List[storage.Blob], storage.Blob]:
"""
List all blobs in a bucket that match the prefix.
"""
gcs_bucket = resource_context.resource_config["gcs_bucket"]
prefix = resource_context.resource_config["prefix"]
match_regex = resource_context.resource_config["match_regex"]
only_one = resource_context.resource_config["only_one"]
sort_key = resource_context.resource_config.get("sort_key")
reverse_sort = resource_context.resource_config["reverse_sort"]
storage_client = resource_context.resources.gcp_gcs_client
bucket = storage_client.get_bucket(gcs_bucket)
resource_context.log.info(f"retrieving gcs file blobs for prefix: {prefix}, match_regex: {match_regex}, in bucket: {gcs_bucket}")
gcs_file_blobs = bucket.list_blobs(prefix=prefix)
if match_regex:
gcs_file_blobs = [blob for blob in gcs_file_blobs if re.match(match_regex, blob.name)]
if sort_key:
gcs_file_blobs = sorted(gcs_file_blobs, key=lambda x: getattr(x, sort_key), reverse=reverse_sort)
if only_one:
return gcs_file_blobs[0] if gcs_file_blobs else None
return gcs_file_blobs
|
ContentTypeAwareGCSFileManager
|
python
|
pytorch__pytorch
|
test/inductor/test_ordered_set.py
|
{
"start": 41398,
"end": 43867
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self.values = ["a", "b", "c"]
self.OrderedSet = OrderedSet(self.values)
def test_add_present(self):
self.OrderedSet.add("c")
self.assertEqual(self.OrderedSet, OrderedSet("abc"))
def test_add_absent(self):
self.OrderedSet.add("d")
self.assertEqual(self.OrderedSet, OrderedSet("abcd"))
def test_add_until_full(self):
tmp = OrderedSet()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1 # noqa: SIM113
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.OrderedSet)
def test_remove_present(self):
self.OrderedSet.remove("b")
self.assertEqual(self.OrderedSet, OrderedSet("ac"))
def test_remove_absent(self):
try:
self.OrderedSet.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.OrderedSet)
for v in self.values:
self.OrderedSet.remove(v)
expected_len -= 1
self.assertEqual(len(self.OrderedSet), expected_len)
def test_discard_present(self):
self.OrderedSet.discard("c")
self.assertEqual(self.OrderedSet, OrderedSet("ab"))
def test_discard_absent(self):
self.OrderedSet.discard("d")
self.assertEqual(self.OrderedSet, OrderedSet("abc"))
def test_clear(self):
self.OrderedSet.clear()
self.assertEqual(len(self.OrderedSet), 0)
def test_pop(self):
popped = {}
while self.OrderedSet:
popped[self.OrderedSet.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.OrderedSet.update(())
self.assertEqual(self.OrderedSet, OrderedSet(self.values))
def test_update_unit_tuple_overlap(self):
self.OrderedSet.update(("a",))
self.assertEqual(self.OrderedSet, OrderedSet(self.values))
def test_update_unit_tuple_non_overlap(self):
self.OrderedSet.update(("a", "z"))
self.assertEqual(self.OrderedSet, OrderedSet(self.values + ["z"]))
# ==============================================================================
|
TestMutate
|
python
|
Netflix__metaflow
|
metaflow/plugins/datatools/s3/s3.py
|
{
"start": 2952,
"end": 3034
}
|
class ____(MetaflowException):
headline = "S3 access failed"
|
MetaflowS3Exception
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/data_mocks/modin_mocks.py
|
{
"start": 1519,
"end": 2269
}
|
class ____:
"""This is dummy Series class, which imitates modin.pandas.series.Series class
for testing purposes. We use this to make sure that our code does a special handling
if it detects a modin Series.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "modin.pandas.series"
def __init__(self, data: pd.Series):
self._data: pd.Series = data
def _to_pandas(self) -> pd.Series:
return self._data
def head(self, n: int) -> Series:
"""Returns the top n element of a mock version of Modin Series."""
return Series(self._data.head(n))
|
Series
|
python
|
openai__openai-python
|
src/openai/_module_client.py
|
{
"start": 3215,
"end": 3362
}
|
class ____(LazyProxy["Completions"]):
@override
def __load__(self) -> Completions:
return _load_client().completions
|
CompletionsProxy
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/metrics_test.py
|
{
"start": 171485,
"end": 173596
}
|
class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_positives_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0.15, 0.5, 0.85])
_assert_metric_variables(self, ('false_positives/false_positives:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.false_positives_at_thresholds(
predictions=predictions, labels=labels, thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp)
self.assertAllEqual((7, 4, 2), fp_update_op)
self.assertAllEqual((7, 4, 2), fp)
@test_util.run_deprecated_v1
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.false_positives_at_thresholds(
predictions=predictions,
labels=labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=[0.15, 0.5, 0.85])
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp)
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op)
self.assertAllEqual((125.0, 42.0, 12.0), fp)
|
FalsePositivesAtThresholdsTest
|
python
|
ipython__ipython
|
IPython/terminal/ipapp.py
|
{
"start": 6663,
"end": 12512
}
|
class ____(BaseIPythonApplication, InteractiveShellApp):
name = "ipython"
description = usage.cl_usage
crash_handler_class = IPAppCrashHandler # typing: ignore[assignment]
examples = _examples
flags = flags
aliases = aliases
classes = List()
interactive_shell_class = Type(
klass=object, # use default_value otherwise which only allow subclasses.
default_value=TerminalInteractiveShell,
help="Class to use to instantiate the TerminalInteractiveShell object. Useful for custom Frontends"
).tag(config=True)
@default('classes')
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
return [
InteractiveShellApp, # ShellApp comes before TerminalApp, because
self.__class__, # it will also affect subclasses (e.g. QtConsole)
TerminalInteractiveShell,
HistoryManager,
MagicsManager,
ProfileDir,
PlainTextFormatter,
IPCompleter,
ScriptMagics,
LoggingMagics,
StoreMagics,
]
subcommands = dict(
profile = ("IPython.core.profileapp.ProfileApp",
"Create and manage IPython profiles."
),
kernel = ("ipykernel.kernelapp.IPKernelApp",
"Start a kernel without an attached frontend."
),
locate=('IPython.terminal.ipapp.LocateIPythonApp',
LocateIPythonApp.description
),
history=('IPython.core.historyapp.HistoryApp',
"Manage the IPython history database."
),
)
# *do* autocreate requested profile, but don't create the config file.
auto_create = Bool(True).tag(config=True)
# configurables
quick = Bool(False,
help="""Start IPython quickly by skipping the loading of config files."""
).tag(config=True)
@observe('quick')
def _quick_changed(self, change):
if change['new']:
self.load_config_file = lambda *a, **kw: None
display_banner = Bool(True,
help="Whether to display a banner upon starting IPython."
).tag(config=True)
# if there is code of files to run from the cmd line, don't interact
# unless the --i flag (App.force_interact) is true.
force_interact = Bool(False,
help="""If a command or file is given via the command-line,
e.g. 'ipython foo.py', start an interactive shell after executing the
file or command."""
).tag(config=True)
@observe('force_interact')
def _force_interact_changed(self, change):
if change['new']:
self.interact = True
@observe('file_to_run', 'code_to_run', 'module_to_run')
def _file_to_run_changed(self, change):
new = change['new']
if new:
self.something_to_run = True
if new and not self.force_interact:
self.interact = False
# internal, not-configurable
something_to_run=Bool(False)
@catch_config_error
def initialize(self, argv=None):
"""Do actions after construct, but before starting the app."""
super(TerminalIPythonApp, self).initialize(argv)
if self.subapp is not None:
# don't bother initializing further, starting subapp
return
# print(self.extra_args)
if self.extra_args and not self.something_to_run:
self.file_to_run = self.extra_args[0]
self.init_path()
# create the shell
self.init_shell()
# and draw the banner
self.init_banner()
# Now a variety of things that happen after the banner is printed.
self.init_gui_pylab()
self.init_extensions()
self.init_code()
def init_shell(self):
"""initialize the InteractiveShell instance"""
# Create an InteractiveShell instance.
# shell.display_banner should always be False for the terminal
# based app, because we call shell.show_banner() by hand below
# so the banner shows *before* all extension loading stuff.
self.shell = self.interactive_shell_class.instance(parent=self,
profile_dir=self.profile_dir,
ipython_dir=self.ipython_dir, user_ns=self.user_ns)
self.shell.configurables.append(self)
def init_banner(self):
"""optionally display the banner"""
if self.display_banner and self.interact:
self.shell.show_banner()
# Make sure there is a space below the banner.
if self.log_level <= logging.INFO: print()
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warnings.warn("'inline' not available as pylab backend, "
"using 'auto' instead.")
self.pylab = 'auto'
def start(self):
if self.subapp is not None:
return self.subapp.start()
# perform any prexec steps:
if self.interact:
self.log.debug("Starting IPython's mainloop...")
self.shell.mainloop()
else:
self.log.debug("IPython not interactive...")
self.shell.restore_term_title()
if not self.shell.last_execution_succeeded:
sys.exit(1)
def load_default_config(ipython_dir=None):
"""Load the default config file from the default ipython_dir.
This is useful for embedded shells.
"""
if ipython_dir is None:
ipython_dir = get_ipython_dir()
profile_dir = os.path.join(ipython_dir, 'profile_default')
app = TerminalIPythonApp()
app.config_file_paths.append(profile_dir)
app.load_config_file()
return app.config
launch_new_instance = TerminalIPythonApp.launch_instance
|
TerminalIPythonApp
|
python
|
numba__numba
|
numba/core/compiler_machinery.py
|
{
"start": 12834,
"end": 14391
}
|
class ____(object):
"""
Pass registry singleton class.
"""
_id = 0
_registry = dict()
def register(self, mutates_CFG, analysis_only):
def make_festive(pass_class):
assert not self.is_registered(pass_class)
assert not self._does_pass_name_alias(pass_class.name())
pass_class.pass_id = self._id
self._id += 1
self._registry[pass_class] = pass_info(pass_class(), mutates_CFG,
analysis_only)
return pass_class
return make_festive
def is_registered(self, clazz):
return clazz in self._registry.keys()
def get(self, clazz):
assert self.is_registered(clazz)
return self._registry[clazz]
def _does_pass_name_alias(self, check):
for k, v in self._registry.items():
if v.pass_inst.name == check:
return True
return False
def find_by_name(self, class_name):
assert isinstance(class_name, str)
for k, v in self._registry.items():
if v.pass_inst.name == class_name:
return v
else:
raise ValueError("No pass with name %s is registered" % class_name)
def dump(self):
for k, v in self._registry.items():
print("%s: %s" % (k, v))
_pass_registry = PassRegistry()
del PassRegistry
"""
register_pass is used to register a compiler pass class for use with PassManager
instances.
"""
register_pass = _pass_registry.register
|
PassRegistry
|
python
|
scipy__scipy
|
scipy/linalg/_testutils.py
|
{
"start": 158,
"end": 1807
}
|
class ____:
def __init__(self, data):
self._data = data
def __array__(self, dtype=None, copy=None):
if copy:
return self._data.copy()
return self._data
def _get_array(shape, dtype):
"""
Get a test array of given shape and data type.
Returned NxN matrices are posdef, and 2xN are banded-posdef.
"""
if len(shape) == 2 and shape[0] == 2:
# yield a banded positive definite one
x = np.zeros(shape, dtype=dtype)
x[0, 1:] = -1
x[1] = 2
return x
elif len(shape) == 2 and shape[0] == shape[1]:
# always yield a positive definite matrix
x = np.zeros(shape, dtype=dtype)
j = np.arange(shape[0])
x[j, j] = 2
x[j[:-1], j[:-1]+1] = -1
x[j[:-1]+1, j[:-1]] = -1
return x
else:
np.random.seed(1234)
return np.random.randn(*shape).astype(dtype)
def _id(x):
return x
def assert_no_overwrite(call, shapes, dtypes=None):
"""
Test that a call does not overwrite its input arguments
"""
if dtypes is None:
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in dtypes:
for order in ["C", "F"]:
for faker in [_id, _FakeMatrix, _FakeMatrix2]:
orig_inputs = [_get_array(s, dtype) for s in shapes]
inputs = [faker(x.copy(order)) for x in orig_inputs]
call(*inputs)
msg = f"call modified inputs [{dtype!r}, {faker!r}]"
for a, b in zip(inputs, orig_inputs):
np.testing.assert_equal(a, b, err_msg=msg)
|
_FakeMatrix2
|
python
|
django__django
|
django/views/generic/detail.py
|
{
"start": 249,
"end": 3664
}
|
class ____(ContextMixin):
"""
Provide the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = "slug"
context_object_name = None
slug_url_kwarg = "slug"
pk_url_kwarg = "pk"
query_pk_and_slug = False
def get_object(self, queryset=None):
"""
Return the object the view is displaying.
Require `self.queryset` and a `pk` or `slug` argument in the URLconf.
Subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError(
"Generic detail view %s must be called with either an object "
"pk or a slug in the URLconf." % self.__class__.__name__
)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(
_("No %(verbose_name)s found matching the query")
% {"verbose_name": queryset.model._meta.verbose_name}
)
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
This method is called by the default implementation of get_object() and
may not be called if get_object() is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {"cls": self.__class__.__name__}
)
return self.queryset.all()
def get_slug_field(self):
"""Get the name of a slug field to be used to look up by slug."""
return self.slug_field
def get_context_object_name(self, obj):
"""Get the name to use for the object."""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""Insert the single object into the context dict."""
context = {}
if self.object:
context["object"] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super().get_context_data(**context)
|
SingleObjectMixin
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/generic3.py
|
{
"start": 708,
"end": 888
}
|
class ____(Generic[_T1], Generic[_T2]):
pass
K = TypeVar("K")
V = TypeVar("V")
# This should generate an error because V isn't included
# in the Generic type variable list.
|
Bar
|
python
|
pappasam__jedi-language-server
|
jedi_language_server/text_edit_utils.py
|
{
"start": 4336,
"end": 5045
}
|
class ____(NamedTuple):
"""Typed opcode.
Op can be one of the following values:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
"""
op: str
old_start: int
old_end: int
new_start: int
new_end: int
def get_opcodes(old: str, new: str) -> List[Opcode]:
"""Obtain typed opcodes from two files (old and new)."""
diff = difflib.SequenceMatcher(a=old, b=new)
return [Opcode(*opcode) for opcode in diff.get_opcodes()]
|
Opcode
|
python
|
huggingface__transformers
|
tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py
|
{
"start": 2696,
"end": 4337
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = MobileNetV1ImageProcessor if is_vision_available() else None
fast_image_processing_class = MobileNetV1ImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = MobileNetV1ImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
MobileNetV1ImageProcessingTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/event_log/sqlite/sqlite_event_log.py
|
{
"start": 21684,
"end": 23346
}
|
class ____(PatternMatchingEventHandler):
def __init__(
self,
event_log_storage: SqliteEventLogStorage,
run_id: str,
callback: EventHandlerFn,
cursor: Optional[str],
**kwargs: Any,
):
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqliteEventLogStorage
)
self._run_id = check.str_param(run_id, "run_id")
self._cb = check.callable_param(callback, "callback")
self._log_path = event_log_storage.path_for_shard(run_id)
self._cursor = cursor
super().__init__(patterns=[self._log_path], **kwargs)
def _process_log(self) -> None:
connection = self._event_log_storage.get_records_for_run(self._run_id, self._cursor)
if connection.cursor:
self._cursor = connection.cursor
for record in connection.records:
status = None
try:
status = self._cb(
record.event_log_entry, str(EventLogCursor.from_storage_id(record.storage_id))
)
except Exception:
logging.exception("Exception in callback for event watch on run %s.", self._run_id)
if (
status == DagsterRunStatus.SUCCESS
or status == DagsterRunStatus.FAILURE
or status == DagsterRunStatus.CANCELED
):
self._event_log_storage.end_watch(self._run_id, self._cb)
def on_modified(self, event: FileSystemEvent) -> None:
check.invariant(event.src_path == self._log_path)
self._process_log()
|
SqliteEventLogStorageWatchdog
|
python
|
Textualize__textual
|
docs/examples/how-to/layout.py
|
{
"start": 835,
"end": 1113
}
|
class ____(Screen):
def compose(self) -> ComposeResult:
yield Header(id="Header")
yield Footer(id="Footer")
with HorizontalScroll():
yield Column()
yield Column()
yield Column()
yield Column()
|
TweetScreen
|
python
|
google__pytype
|
pytype/overlays/special_builtins.py
|
{
"start": 21972,
"end": 25635
}
|
class ____(abstract.Function, mixin.HasSlots):
"""Property instance (constructed by Property.call())."""
def __init__(self, ctx, name, cls, fget=None, fset=None, fdel=None, doc=None):
super().__init__("property", ctx)
mixin.HasSlots.init_mixin(self)
self.name = name # Reports the correct decorator in error messages.
is_abstract = False
for var in [fget, fset, fdel]:
if not var:
continue
is_abstract |= _is_fn_abstract(var)
for v in var.data:
v.is_attribute_of_class = True
self.is_abstract = is_abstract
self.fget = fget
self.fset = fset
self.fdel = fdel
self.doc = doc
self.cls = cls
self.set_native_slot("__get__", self.fget_slot)
self.set_native_slot("__set__", self.fset_slot)
self.set_native_slot("__delete__", self.fdelete_slot)
self.set_native_slot("getter", self.getter_slot)
self.set_native_slot("setter", self.setter_slot)
self.set_native_slot("deleter", self.deleter_slot)
self.is_method = True
self.bound_class = abstract.BoundFunction
def fget_slot(self, node, obj, objtype):
obj_val = abstract_utils.get_atomic_value(
obj, default=self.ctx.convert.unsolvable
)
# If this property is defined on a generic class, we need to annotate self
# with a parameterized type for the property return type to be computed
# properly, e.g.:
# @property
# def x(self) -> T: ...
# is changed to:
# @property
# def x(self: Foo[T]) -> T: ...
t = abstract_utils.get_generic_type(obj_val)
generic = t and any(self in member.data for member in t.members.values())
with contextlib.ExitStack() as stack:
if generic:
for f in self.fget.data:
if f.should_set_self_annot():
stack.enter_context(f.set_self_annot(t))
return function.call_function(
self.ctx, node, self.fget, function.Args((obj,))
)
def fset_slot(self, node, obj, value):
return function.call_function(
self.ctx, node, self.fset, function.Args((obj, value))
)
def fdelete_slot(self, node, obj):
return function.call_function(
self.ctx, node, self.fdel, function.Args((obj,))
)
def getter_slot(self, node, fget):
prop = PropertyInstance(
self.ctx, self.name, self.cls, fget, self.fset, self.fdel, self.doc
)
result = self.ctx.program.NewVariable([prop], fget.bindings, node)
return node, result
def setter_slot(self, node, fset):
prop = PropertyInstance(
self.ctx, self.name, self.cls, self.fget, fset, self.fdel, self.doc
)
result = self.ctx.program.NewVariable([prop], fset.bindings, node)
return node, result
def deleter_slot(self, node, fdel):
prop = PropertyInstance(
self.ctx, self.name, self.cls, self.fget, self.fset, fdel, self.doc
)
result = self.ctx.program.NewVariable([prop], fdel.bindings, node)
return node, result
def update_signature_scope(self, cls):
for fvar in (self.fget, self.fset, self.fdel):
if fvar:
for f in fvar.data:
if isinstance(f, abstract.Function):
f.update_signature_scope(cls)
def _check_method_decorator_arg(fn_var, name, ctx):
"""Check that @classmethod or @staticmethod are applied to a function."""
for d in fn_var.data:
try:
_ = function.get_signatures(d)
except NotImplementedError:
# We are wrapping something that is not a function in a method decorator.
details = f"@{name} applied to something that is not a function."
ctx.errorlog.not_callable(ctx.vm.stack(), d, details)
return False
return True
|
PropertyInstance
|
python
|
openai__openai-python
|
tests/api_resources/audio/test_transcriptions.py
|
{
"start": 4606,
"end": 9149
}
|
class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
transcription = await async_client.audio.transcriptions.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
)
assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"])
@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
transcription = await async_client.audio.transcriptions.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
chunking_strategy="auto",
include=["logprobs"],
known_speaker_names=["string"],
known_speaker_references=["string"],
language="language",
prompt="prompt",
response_format="json",
stream=False,
temperature=0,
timestamp_granularities=["word"],
)
assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"])
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.audio.transcriptions.with_raw_response.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
transcription = response.parse()
assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"])
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.audio.transcriptions.with_streaming_response.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
transcription = await response.parse()
assert_matches_type(TranscriptionCreateResponse, transcription, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
transcription_stream = await async_client.audio.transcriptions.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
stream=True,
)
await transcription_stream.response.aclose()
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
transcription_stream = await async_client.audio.transcriptions.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
stream=True,
chunking_strategy="auto",
include=["logprobs"],
known_speaker_names=["string"],
known_speaker_references=["string"],
language="language",
prompt="prompt",
response_format="json",
temperature=0,
timestamp_granularities=["word"],
)
await transcription_stream.response.aclose()
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.audio.transcriptions.with_raw_response.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
await stream.close()
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.audio.transcriptions.with_streaming_response.create(
file=b"raw file contents",
model="gpt-4o-transcribe",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = await response.parse()
await stream.close()
assert cast(Any, response.is_closed) is True
|
TestAsyncTranscriptions
|
python
|
google__jax
|
jax/_src/mesh.py
|
{
"start": 7331,
"end": 15117
}
|
class ____(BaseMesh, contextlib.ContextDecorator):
"""Declare the hardware resources available in the scope of this manager.
See `Distributed arrays and automatic parallelization`_ and
`Explicit Sharding`_ tutorials.
Args:
devices: A NumPy ndarray object containing JAX device objects (as
obtained e.g. from :py:func:`jax.devices`).
axis_names: A sequence of resource axis names to be assigned to the
dimensions of the ``devices`` argument. Its length should match the
rank of ``devices``.
axis_types: and optional tuple of :class:`jax.sharding.AxisType` entries corresponding to
the ``axis_names``. See `Explicit Sharding`_ for more information.
Examples:
>>> from jax.sharding import Mesh
>>> from jax.sharding import PartitionSpec as P, NamedSharding
>>> import numpy as np
...
>>> # Declare a 2D mesh with axes `x` and `y`.
>>> devices = np.array(jax.devices()).reshape(4, 2)
>>> mesh = Mesh(devices, ('x', 'y'))
>>> inp = np.arange(16).reshape(8, 2)
>>> arr = jax.device_put(inp, NamedSharding(mesh, P('x', 'y')))
>>> out = jax.jit(lambda x: x * 2)(arr)
>>> assert out.sharding == NamedSharding(mesh, P('x', 'y'))
.. _Distributed arrays and automatic parallelization: https://docs.jax.dev/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html
.. _Explicit Sharding: https://docs.jax.dev/en/latest/notebooks/explicit-sharding.html
"""
devices: np.ndarray
axis_names: tuple[MeshAxisName, ...]
def __new__(cls, devices: np.ndarray | Sequence[xc.Device],
axis_names: str | Sequence[MeshAxisName],
axis_types: tuple[AxisType, ...] | None = None):
if not isinstance(devices, np.ndarray):
devices = np.array(devices)
if isinstance(axis_names, str):
axis_names = (axis_names,)
axis_names = tuple(axis_names)
if any(i is None for i in axis_names):
raise ValueError(f"Mesh axis names cannot be None. Got: {axis_names}")
if devices.ndim != len(axis_names):
raise ValueError(
"Mesh requires the ndim of its first argument (`devices`) to equal "
"the length of its second argument (`axis_names`), but got "
f"devices.ndim == {devices.ndim} and "
f"len(axis_names) == {len(axis_names)}.")
axis_types = _normalize_axis_types(axis_names, axis_types, 'Mesh')
key = (axis_names, devices.shape, tuple(devices.flat), axis_types)
val = _mesh_object_dict.get(key, None)
if val is not None:
return val
self = super().__new__(cls)
self.devices = devices.copy()
self.devices.flags.writeable = False
self.axis_names = axis_names
self.axis_types = axis_types
self._size = math.prod(self.shape.values()) if self.devices.ndim else 0
_mesh_object_dict[key] = self
return self
def __reduce__(self):
return (_unpicke_mesh, (self.devices, self.axis_names, self.axis_types))
def __eq__(self, other):
# This is a performance optimization. Comparing thousands of devices
# can be expensive.
if self is other:
return True
if not isinstance(other, Mesh):
return False
return (self.axis_names == other.axis_names and
self.devices.shape == other.devices.shape and
self.axis_types == other.axis_types and
self._internal_device_list == other._internal_device_list)
def __hash__(self):
if not hasattr(self, '_hash'):
self._hash = hash(
(self.axis_names, self._internal_device_list, self.devices.shape,
self.axis_types))
return self._hash
def __setattr__(self, name, value):
if hasattr(self, name):
if getattr(self, name) == value:
# This can to happen if two threads race, for example if two threads
# are trying to hash the same Mesh instance.
return
raise RuntimeError(
f"Cannot reassign attributes ({name}) of immutable mesh objects"
)
super().__setattr__(name, value)
def __enter__(self):
if jax_config.disallow_mesh_context_manager.value:
raise RuntimeError("Mesh context manager is disabled.")
new_env = thread_resources.stack[-1].with_mesh(self)
thread_resources.stack.append(new_env)
thread_resources.env = new_env
jax_config.mesh_context_manager.set_local(
tuple(t.physical_mesh for t in thread_resources.stack
if not t.physical_mesh.empty))
return self
def __exit__(self, exc_type, exc_value, traceback):
thread_resources.stack.pop()
thread_resources.env = thread_resources.stack[-1]
jax_config.mesh_context_manager.set_local(
tuple(t.physical_mesh for t in thread_resources.stack
if not t.physical_mesh.empty))
return False
def update(self, devices=None, axis_names=None, axis_types=None):
if devices is None:
devices = self.devices
if axis_names is None:
axis_names = self.axis_names
if axis_types is None:
axis_types = self.axis_types
return Mesh(devices, axis_names, axis_types)
@functools.cached_property
def shape(self):
return collections.OrderedDict(
(name, size)
for name, size in safe_zip(self.axis_names, self.devices.shape))
@functools.cached_property
def shape_tuple(self):
return tuple(
(name, size)
for name, size in safe_zip(self.axis_names, self.devices.shape))
@property
def axis_sizes(self) -> tuple[int, ...]:
return self.devices.shape
@property
def size(self):
return self._size
@property
def empty(self):
return self.size == 0
@functools.cached_property
def is_multi_process(self):
return self.devices.size != len(self.local_devices)
@property
def local_mesh(self):
return self._local_mesh(xb.process_index())
def _local_mesh(self, process_index):
return _get_local_mesh(self, process_index)
@functools.cached_property
def device_ids(self):
assert not self.empty
return np.vectorize(lambda d: d.id, otypes=[int])(self.devices)
@functools.cached_property
def _local_devices_set(self):
return set(self.local_devices)
@functools.cached_property
def _flat_devices_tuple(self):
return tuple(self.devices.flat)
@functools.cached_property
def _internal_device_list(self):
return xc.DeviceList(self._flat_devices_tuple)
@functools.cached_property
def _flat_devices_set(self):
return set(self.devices.flat)
def __str__(self):
if self.empty:
return "Mesh()"
mesh_str = ", ".join(f"'{k}': {v}" for k, v in self.shape.items())
atr = f", axis_types={self.axis_types}"
return f"Mesh({mesh_str}{atr})"
@functools.cached_property
def _repr(self):
if self.empty:
return "Mesh(axis_sizes=(), axis_names=())"
atr = f", axis_types={self.axis_types}"
return (f"Mesh(axis_sizes={self.device_ids.shape}, "
f"axis_names={self.axis_names!r}{atr})")
def __repr__(self):
return self._repr
@functools.cached_property
def local_devices(self):
return [d for d in self.devices.flat
if d.process_index == d.client.process_index()]
@functools.cached_property
def abstract_mesh(self):
d = self.devices.flat[0]
if d is None:
abstract_device = None
else:
if d.platform == 'tpu':
num_cores = getattr(d, 'num_cores', None)
elif d.platform == 'gpu':
num_cores = getattr(d, 'core_count', None)
else:
num_cores = None
abstract_device = AbstractDevice(
device_kind=d.device_kind, num_cores=num_cores)
return AbstractMesh(
self.axis_sizes, self.axis_names, axis_types=self.axis_types,
abstract_device=abstract_device)
EMPTY_ENV = ResourceEnv(Mesh(np.empty((), dtype=object), ()))
|
Mesh
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 20819,
"end": 21139
}
|
class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
kurts = helper_functions.get_value("Kurtosisses")
maximum = np.nanmax(kurts) if len(kurts) > 0 else 0
return maximum if np.isfinite(maximum) else 0
@metafeatures.define("KurtosisMean", dependency="Kurtosisses")
|
KurtosisMax
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_lambdas.py
|
{
"start": 1477,
"end": 58342
}
|
class ____(
fixtures.TestBase, testing.AssertsExecutionResults, AssertsCompiledSQL
):
__dialect__ = "default"
def test_reject_methods(self):
"""test #7032"""
t1 = table("t1", column("q"), column("p"))
subq = select(t1).subquery
with expect_raises_message(
exc.ArgumentError,
"Method <bound method SelectBase.subquery .* may not be "
"passed as a SQL expression",
):
select(func.count()).select_from(subq)
self.assert_compile(
select(func.count()).select_from(subq()),
"SELECT count(*) AS count_1 FROM "
"(SELECT t1.q AS q, t1.p AS p FROM t1) AS anon_1",
)
def test_select_whereclause(self):
t1 = table("t1", column("q"), column("p"))
x = 10
y = 5
def go():
return select(t1).where(lambda: and_(t1.c.q == x, t1.c.p == y))
self.assert_compile(
go(), "SELECT t1.q, t1.p FROM t1 WHERE t1.q = :x_1 AND t1.p = :y_1"
)
self.assert_compile(
go(), "SELECT t1.q, t1.p FROM t1 WHERE t1.q = :x_1 AND t1.p = :y_1"
)
def test_global_tracking(self):
t1 = table("t1", column("q"), column("p"))
global global_x, global_y
global_x = 10
global_y = 17
def go():
return select(t1).where(
lambda: and_(t1.c.q == global_x, t1.c.p == global_y)
)
self.assert_compile(
go(),
"SELECT t1.q, t1.p FROM t1 WHERE t1.q = :global_x_1 "
"AND t1.p = :global_y_1",
checkparams={"global_x_1": 10, "global_y_1": 17},
)
global_y = 9
self.assert_compile(
go(),
"SELECT t1.q, t1.p FROM t1 WHERE t1.q = :global_x_1 "
"AND t1.p = :global_y_1",
checkparams={"global_x_1": 10, "global_y_1": 9},
)
def test_boolean_constants(self):
t1 = table("t1", column("q"), column("p"))
def go():
xy = True
stmt = select(t1).where(lambda: t1.c.q == xy)
return stmt
self.assert_compile(
go(), "SELECT t1.q, t1.p FROM t1 WHERE t1.q = :xy_1"
)
def test_execute_boolean(self, boolean_table_fixture, connection):
boolean_data = boolean_table_fixture
connection.execute(
boolean_data.insert(),
[{"id": 1, "data": True}, {"id": 2, "data": False}],
)
xy = True
def go():
stmt = select(lambda: boolean_data.c.id).where(
lambda: boolean_data.c.data == xy
)
return connection.execute(stmt)
result = go()
eq_(result.all(), [(1,)])
xy = False
result = go()
eq_(result.all(), [(2,)])
def test_in_expressions(self, user_address_fixture, connection):
"""test #6397. we initially were going to use two different
forms for "empty in" vs. regular "in", but instead we have an
improved substitution for "empty in". regardless, as there's more
going on with these, make sure lambdas work with them including
caching.
"""
users, _ = user_address_fixture
data = [
{"id": 1, "name": "u1"},
{"id": 2, "name": "u2"},
{"id": 3, "name": "u3"},
]
connection.execute(users.insert(), data)
def go(val):
stmt = lambdas.lambda_stmt(lambda: select(users.c.id))
stmt += lambda s: s.where(users.c.name.in_(val))
stmt += lambda s: s.order_by(users.c.id)
return connection.execute(stmt)
for case in [
[],
["u1", "u2"],
["u3"],
[],
["u1", "u2"],
]:
with testing.assertsql.assert_engine(testing.db) as asserter_:
result = go(case)
asserter_.assert_(
CompiledSQL(
"SELECT users.id FROM users WHERE users.name "
"IN (__[POSTCOMPILE_val_1]) ORDER BY users.id",
params={"val_1": case},
)
)
eq_(result.all(), [(e["id"],) for e in data if e["name"] in case])
def test_in_expr_compile(self, user_address_fixture):
users, _ = user_address_fixture
def go(val):
stmt = lambdas.lambda_stmt(lambda: select(users.c.id))
stmt += lambda s: s.where(users.c.name.in_(val))
stmt += lambda s: s.order_by(users.c.id)
return stmt
# note this also requires the type of the bind is copied
self.assert_compile(
go([]),
"SELECT users.id FROM users "
"WHERE users.name IN (NULL) AND (1 != 1) ORDER BY users.id",
literal_binds=True,
)
self.assert_compile(
go(["u1", "u2"]),
"SELECT users.id FROM users "
"WHERE users.name IN ('u1', 'u2') ORDER BY users.id",
literal_binds=True,
)
def test_bind_type(self, user_address_fixture):
users, _ = user_address_fixture
def go(val):
stmt = lambdas.lambda_stmt(lambda: select(users.c.id))
stmt += lambda s: s.where(users.c.name == val)
return stmt
self.assert_compile(
go("u1"),
"SELECT users.id FROM users WHERE users.name = 'u1'",
literal_binds=True,
)
def test_stale_checker_embedded(self):
def go(x):
stmt = select(lambda: x)
return stmt
c1 = column("x")
s1 = go(c1)
s2 = go(c1)
self.assert_compile(s1, "SELECT x")
self.assert_compile(s2, "SELECT x")
c1 = column("q")
s3 = go(c1)
self.assert_compile(s3, "SELECT q")
def test_stale_checker_statement(self):
def go(x):
stmt = lambdas.lambda_stmt(lambda: select(x))
return stmt
c1 = column("x")
s1 = go(c1)
s2 = go(c1)
self.assert_compile(s1, "SELECT x")
self.assert_compile(s2, "SELECT x")
c1 = column("q")
s3 = go(c1)
self.assert_compile(s3, "SELECT q")
def test_stale_checker_linked(self):
def go(x, y):
stmt = lambdas.lambda_stmt(lambda: select(x)) + (
lambda s: s.where(y > 5)
)
return stmt
c1 = oldc1 = column("x")
c2 = oldc2 = column("y")
s1 = go(c1, c2)
s2 = go(c1, c2)
self.assert_compile(s1, "SELECT x WHERE y > :y_1")
self.assert_compile(s2, "SELECT x WHERE y > :y_1")
c1 = column("q")
c2 = column("p")
s3 = go(c1, c2)
self.assert_compile(s3, "SELECT q WHERE p > :p_1")
s4 = go(c1, c2)
self.assert_compile(s4, "SELECT q WHERE p > :p_1")
s5 = go(oldc1, oldc2)
self.assert_compile(s5, "SELECT x WHERE y > :y_1")
def test_maintain_required_bindparam(self):
"""test that the "required" flag doesn't go away for bound
parameters"""
def go():
col_expr = column("x")
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(col_expr == bindparam(None))
return stmt
s1 = go()
with expect_raises_message(
exc.InvalidRequestError, "A value is required for bind parameter"
):
s1.compile().construct_params({})
s2 = go()
with expect_raises_message(
exc.InvalidRequestError, "A value is required for bind parameter"
):
s2.compile().construct_params({})
def test_stmt_lambda_w_additional_hascachekey_variants(self):
def go(col_expr, q):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(col_expr == q)
return stmt
c1 = column("x")
c2 = column("y")
s1 = go(c1, 5)
s2 = go(c2, 10)
s3 = go(c1, 8)
s4 = go(c2, 12)
self.assert_compile(
s1, "SELECT x WHERE x = :q_1", checkparams={"q_1": 5}
)
self.assert_compile(
s2, "SELECT y WHERE y = :q_1", checkparams={"q_1": 10}
)
self.assert_compile(
s3, "SELECT x WHERE x = :q_1", checkparams={"q_1": 8}
)
self.assert_compile(
s4, "SELECT y WHERE y = :q_1", checkparams={"q_1": 12}
)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
s3key = s3._generate_cache_key()
s4key = s4._generate_cache_key()
eq_(s1key[0], s3key[0])
eq_(s2key[0], s4key[0])
ne_(s1key[0], s2key[0])
def test_stmt_lambda_w_atonce_whereclause_values_notrack(self):
def go(col_expr, whereclause):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt = stmt.add_criteria(
lambda stmt: stmt.where(whereclause), enable_tracking=False
)
return stmt
c1 = column("x")
s1 = go(c1, c1 == 5)
s2 = go(c1, c1 == 10)
self.assert_compile(
s1, "SELECT x WHERE x = :x_1", checkparams={"x_1": 5}
)
# and as we see, this is wrong. Because whereclause
# is fixed for the lambda and we do not re-evaluate the closure
# for this value changing. this can't be passed unless
# enable_tracking=False.
self.assert_compile(
s2, "SELECT x WHERE x = :x_1", checkparams={"x_1": 5}
)
def test_stmt_lambda_w_atonce_whereclause_values(self):
c2 = column("y")
def go(col_expr, whereclause, x):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt = stmt.add_criteria(
lambda stmt: stmt.where(whereclause).order_by(c2 > x),
)
return stmt
c1 = column("x")
s1 = go(c1, c1 == 5, 9)
s2 = go(c1, c1 == 10, 15)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
eq_([b.value for b in s1key.bindparams], [5, 9])
eq_([b.value for b in s2key.bindparams], [10, 15])
self.assert_compile(
s1,
"SELECT x WHERE x = :x_1 ORDER BY y > :x_2",
checkparams={"x_1": 5, "x_2": 9},
)
self.assert_compile(
s2,
"SELECT x WHERE x = :x_1 ORDER BY y > :x_2",
checkparams={"x_1": 10, "x_2": 15},
)
def test_conditional_must_be_tracked(self):
tab = table("foo", column("id"), column("col"))
def run_my_statement(parameter, add_criteria=False):
stmt = lambda_stmt(lambda: select(tab))
stmt = stmt.add_criteria(
lambda s: (
s.where(tab.c.col > parameter)
if add_criteria
else s.where(tab.c.col == parameter)
),
)
stmt += lambda s: s.order_by(tab.c.id)
return stmt
assert_raises_message(
exc.InvalidRequestError,
"Closure variable named 'add_criteria' inside of lambda callable",
run_my_statement,
5,
False,
)
def test_boolean_conditionals(self):
tab = table("foo", column("id"), column("col"))
def run_my_statement(parameter, add_criteria=False):
stmt = lambda_stmt(lambda: select(tab))
stmt = stmt.add_criteria(
lambda s: (
s.where(tab.c.col > parameter)
if add_criteria
else s.where(tab.c.col == parameter)
),
track_on=[add_criteria],
)
stmt += lambda s: s.order_by(tab.c.id)
return stmt
c1 = run_my_statement(5, False)
c2 = run_my_statement(10, True)
c3 = run_my_statement(18, False)
ck1 = c1._generate_cache_key()
ck2 = c2._generate_cache_key()
ck3 = c3._generate_cache_key()
eq_(ck1[0], ck3[0])
ne_(ck1[0], ck2[0])
self.assert_compile(
c1,
"SELECT foo.id, foo.col FROM foo WHERE "
"foo.col = :parameter_1 ORDER BY foo.id",
)
self.assert_compile(
c2,
"SELECT foo.id, foo.col FROM foo "
"WHERE foo.col > :parameter_1 ORDER BY foo.id",
)
self.assert_compile(
c3,
"SELECT foo.id, foo.col FROM foo WHERE "
"foo.col = :parameter_1 ORDER BY foo.id",
)
def test_stmt_lambda_plain_customtrack(self):
c2 = column("y")
def go(col_expr, whereclause, p):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt = stmt.add_criteria(lambda stmt: stmt.where(whereclause))
stmt = stmt.add_criteria(
lambda stmt: stmt.order_by(col_expr), track_on=(col_expr,)
)
stmt = stmt.add_criteria(lambda stmt: stmt.where(col_expr == p))
return stmt
c1 = column("x")
c2 = column("y")
s1 = go(c1, c1 == 5, 9)
s2 = go(c1, c1 == 10, 15)
s3 = go(c2, c2 == 18, 12)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
s3key = s3._generate_cache_key()
eq_([b.value for b in s1key.bindparams], [5, 9])
eq_([b.value for b in s2key.bindparams], [10, 15])
eq_([b.value for b in s3key.bindparams], [18, 12])
self.assert_compile(
s1,
"SELECT x WHERE x = :x_1 AND x = :p_1 ORDER BY x",
checkparams={"x_1": 5, "p_1": 9},
)
self.assert_compile(
s2,
"SELECT x WHERE x = :x_1 AND x = :p_1 ORDER BY x",
checkparams={"x_1": 10, "p_1": 15},
)
self.assert_compile(
s3,
"SELECT y WHERE y = :y_1 AND y = :p_1 ORDER BY y",
checkparams={"y_1": 18, "p_1": 12},
)
@testing.combinations(
(True,),
(False,),
)
def test_stmt_lambda_w_atonce_whereclause_customtrack_binds(
self, use_tuple
):
c2 = column("y")
# this pattern is *completely unnecessary*, and I would prefer
# if we can detect this and just raise, because when it is not done
# correctly, it is *extremely* difficult to catch it failing.
# however I also can't come up with a reliable way to catch it.
# so we will keep the use of "track_on" to be internal.
if use_tuple:
def go(col_expr, whereclause, p):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt = stmt.add_criteria(
lambda stmt: stmt.where(whereclause).order_by(
col_expr > p
),
track_on=((whereclause,), whereclause.right.value),
)
return stmt
else:
def go(col_expr, whereclause, p):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt = stmt.add_criteria(
lambda stmt: stmt.where(whereclause).order_by(
col_expr > p
),
track_on=(whereclause, whereclause.right.value),
)
return stmt
c1 = column("x")
c2 = column("y")
s1 = go(c1, c1 == 5, 9)
s2 = go(c1, c1 == 10, 15)
s3 = go(c2, c2 == 18, 12)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
s3key = s3._generate_cache_key()
eq_([b.value for b in s1key.bindparams], [5, 9])
eq_([b.value for b in s2key.bindparams], [10, 15])
eq_([b.value for b in s3key.bindparams], [18, 12])
self.assert_compile(
s1,
"SELECT x WHERE x = :x_1 ORDER BY x > :p_1",
checkparams={"x_1": 5, "p_1": 9},
)
self.assert_compile(
s2,
"SELECT x WHERE x = :x_1 ORDER BY x > :p_1",
checkparams={"x_1": 10, "p_1": 15},
)
self.assert_compile(
s3,
"SELECT y WHERE y = :y_1 ORDER BY y > :p_1",
checkparams={"y_1": 18, "p_1": 12},
)
def test_stmt_lambda_track_closure_binds_one(self):
def go(col_expr, whereclause):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(whereclause)
return stmt
c1 = column("x")
s1 = go(c1, c1 == 5)
s2 = go(c1, c1 == 10)
self.assert_compile(
s1, "SELECT x WHERE x = :x_1", checkparams={"x_1": 5}
)
self.assert_compile(
s2, "SELECT x WHERE x = :x_1", checkparams={"x_1": 10}
)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
eq_(s1key.key, s2key.key)
eq_([b.value for b in s1key.bindparams], [5])
eq_([b.value for b in s2key.bindparams], [10])
def test_stmt_lambda_track_closure_binds_two(self):
def go(col_expr, whereclause, x, y):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(whereclause).where(
and_(c1 == x, c1 < y)
)
return stmt
c1 = column("x")
s1 = go(c1, c1 == 5, 8, 9)
s2 = go(c1, c1 == 10, 12, 14)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
self.assert_compile(
s1,
"SELECT x WHERE x = :x_1 AND x = :x_2 AND x < :y_1",
checkparams={"x_1": 5, "x_2": 8, "y_1": 9},
)
self.assert_compile(
s2,
"SELECT x WHERE x = :x_1 AND x = :x_2 AND x < :y_1",
checkparams={"x_1": 10, "x_2": 12, "y_1": 14},
)
eq_([b.value for b in s1key.bindparams], [5, 8, 9])
eq_([b.value for b in s2key.bindparams], [10, 12, 14])
s1_compiled_cached = s1.compile(cache_key=s1key)
params = s1_compiled_cached.construct_params(
extracted_parameters=s2key[1]
)
eq_(params, {"x_1": 10, "x_2": 12, "y_1": 14})
def test_stmt_lambda_track_closure_binds_three(self):
def go(col_expr, whereclause, x, y):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(whereclause)
stmt += lambda stmt: stmt.where(and_(c1 == x, c1 < y))
return stmt
c1 = column("x")
s1 = go(c1, c1 == 5, 8, 9)
s2 = go(c1, c1 == 10, 12, 14)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
self.assert_compile(
s1,
"SELECT x WHERE x = :x_1 AND x = :x_2 AND x < :y_1",
checkparams={"x_1": 5, "x_2": 8, "y_1": 9},
)
self.assert_compile(
s2,
"SELECT x WHERE x = :x_1 AND x = :x_2 AND x < :y_1",
checkparams={"x_1": 10, "x_2": 12, "y_1": 14},
)
eq_([b.value for b in s1key.bindparams], [5, 8, 9])
eq_([b.value for b in s2key.bindparams], [10, 12, 14])
s1_compiled_cached = s1.compile(cache_key=s1key)
params = s1_compiled_cached.construct_params(
extracted_parameters=s2key[1]
)
eq_(params, {"x_1": 10, "x_2": 12, "y_1": 14})
def test_stmt_lambda_w_atonce_whereclause_novalue(self):
def go(col_expr, whereclause):
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(whereclause)
return stmt
c1 = column("x")
s1 = go(c1, bindparam("x"))
self.assert_compile(s1, "SELECT x WHERE :x")
def test_reject_plain_object(self):
# with #5765 we move to no longer allow closure variables that
# refer to unknown types of objects inside the lambda. these have
# to be resolved outside of the lambda because we otherwise can't
# be sure they can be safely used as cache keys.
class Thing:
def __init__(self, col_expr):
self.col_expr = col_expr
def go(thing, q):
stmt = lambdas.lambda_stmt(lambda: select(thing.col_expr))
stmt += lambda stmt: stmt.where(thing.col_expr == q)
return stmt
c1 = Thing(column("x"))
assert_raises_message(
exc.InvalidRequestError,
"Closure variable named 'thing' inside of lambda callable",
go,
c1,
5,
)
def test_plain_object_ok_w_tracking_disabled(self):
# with #5765 we move to no longer allow closure variables that
# refer to unknown types of objects inside the lambda. these have
# to be resolved outside of the lambda because we otherwise can't
# be sure they can be safely used as cache keys.
class Thing:
def __init__(self, col_expr):
self.col_expr = col_expr
def go(thing, q):
stmt = lambdas.lambda_stmt(
lambda: select(thing.col_expr), track_closure_variables=False
)
stmt = stmt.add_criteria(
lambda stmt: stmt.where(thing.col_expr == q),
track_closure_variables=False,
)
return stmt
c1 = Thing(column("x"))
c2 = Thing(column("y"))
s1 = go(c1, 5)
s2 = go(c2, 10)
s3 = go(c1, 8)
s4 = go(c2, 12)
self.assert_compile(
s1, "SELECT x WHERE x = :q_1", checkparams={"q_1": 5}
)
# note this is wrong, because no tracking
self.assert_compile(
s2, "SELECT x WHERE x = :q_1", checkparams={"q_1": 10}
)
self.assert_compile(
s3, "SELECT x WHERE x = :q_1", checkparams={"q_1": 8}
)
# also wrong
self.assert_compile(
s4, "SELECT x WHERE x = :q_1", checkparams={"q_1": 12}
)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
s3key = s3._generate_cache_key()
s4key = s4._generate_cache_key()
# all one cache key
eq_(s1key[0], s3key[0])
eq_(s2key[0], s4key[0])
eq_(s1key[0], s2key[0])
def test_plain_object_used_outside_lambda(self):
# test the above 'test_reject_plain_object' with the expected
# workaround
class Thing:
def __init__(self, col_expr):
self.col_expr = col_expr
def go(thing, q):
col_expr = thing.col_expr
stmt = lambdas.lambda_stmt(lambda: select(col_expr))
stmt += lambda stmt: stmt.where(col_expr == q)
return stmt
c1 = Thing(column("x"))
c2 = Thing(column("y"))
s1 = go(c1, 5)
s2 = go(c2, 10)
s3 = go(c1, 8)
s4 = go(c2, 12)
self.assert_compile(
s1, "SELECT x WHERE x = :q_1", checkparams={"q_1": 5}
)
self.assert_compile(
s2, "SELECT y WHERE y = :q_1", checkparams={"q_1": 10}
)
self.assert_compile(
s3, "SELECT x WHERE x = :q_1", checkparams={"q_1": 8}
)
self.assert_compile(
s4, "SELECT y WHERE y = :q_1", checkparams={"q_1": 12}
)
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
s3key = s3._generate_cache_key()
s4key = s4._generate_cache_key()
eq_(s1key[0], s3key[0])
eq_(s2key[0], s4key[0])
ne_(s1key[0], s2key[0])
def test_stmt_lambda_w_set_of_opts(self):
stmt = lambdas.lambda_stmt(lambda: select(column("x")))
class MyUncacheable(ExecutableOption):
pass
opts = {MyUncacheable()}
assert_raises_message(
exc.InvalidRequestError,
"Closure variable named 'opts' inside of lambda callable ",
stmt.__add__,
lambda stmt: stmt.options(*opts),
)
def test_detect_embedded_callables_one(self):
t1 = table("t1", column("q"))
x = 1
def go():
def foo():
return x
stmt = select(t1).where(lambda: t1.c.q == foo())
return stmt
assert_raises_message(
exc.InvalidRequestError,
r"Can't invoke Python callable foo\(\) inside of lambda "
"expression ",
go,
)
def test_detect_embedded_callables_two(self):
t1 = table("t1", column("q"), column("y"))
def go():
def foo():
return t1.c.y
stmt = select(t1).where(lambda: t1.c.q == foo())
return stmt
self.assert_compile(
go(), "SELECT t1.q, t1.y FROM t1 WHERE t1.q = t1.y"
)
def test_detect_embedded_callables_three(self):
t1 = table("t1", column("q"), column("y"))
def go():
def foo():
t1.c.y
stmt = select(t1).where(lambda: t1.c.q == getattr(t1.c, "y"))
return stmt
self.assert_compile(
go(), "SELECT t1.q, t1.y FROM t1 WHERE t1.q = t1.y"
)
def test_detect_embedded_callables_four(self):
t1 = table("t1", column("q"))
x = 1
def go():
def foo():
return x
stmt = select(t1).where(
lambdas.LambdaElement(
lambda: t1.c.q == foo(),
roles.WhereHavingRole,
lambdas.LambdaOptions(track_bound_values=False),
)
)
return stmt
self.assert_compile(
go(),
"SELECT t1.q FROM t1 WHERE t1.q = :q_1",
checkparams={"q_1": 1},
)
# we're not tracking it
x = 2
self.assert_compile(
go(),
"SELECT t1.q FROM t1 WHERE t1.q = :q_1",
checkparams={"q_1": 1},
)
def test_offline_cache_key_no_paramtrack(self):
def go():
stmt = lambdas.lambda_stmt(
lambda: select(column("x")).where(
column("y") == bindparam("q")
),
global_track_bound_values=False,
)
return stmt
s1 = go()
eq_(
s1._generate_cache_key().to_offline_string({}, s1, {"q": 5}),
"('SELECT x \\nWHERE y = :q', (5,))",
)
def test_offline_cache_key_paramtrack(self):
def go(param):
stmt = lambdas.lambda_stmt(
lambda: select(column("x")).where(column("y") == param),
)
return stmt
s1 = go(5)
param_key = s1._resolved._where_criteria[0].right.key
eq_(
s1._generate_cache_key().to_offline_string(
{}, s1, {param_key: 10}
),
"('SELECT x \\nWHERE y = :param_1', (10,))",
)
def test_stmt_lambda_w_list_of_opts(self):
def go(opts):
stmt = lambdas.lambda_stmt(lambda: select(column("x")))
stmt += lambda stmt: stmt.options(*opts)
return stmt
class SomeOpt(HasCacheKey, ExecutableOption):
def __init__(self, x):
self.x = x
def _gen_cache_key(self, anon_map, bindparams):
return (SomeOpt, self.x)
s1 = go([SomeOpt("a"), SomeOpt("b")])
s2 = go([SomeOpt("a"), SomeOpt("b")])
s3 = go([SomeOpt("q"), SomeOpt("b")])
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
s3key = s3._generate_cache_key()
eq_(s1key.key, s2key.key)
ne_(s1key.key, s3key.key)
def test_stmt_lambda_opt_w_key(self):
"""test issue related to #6887"""
def go(opts):
stmt = lambdas.lambda_stmt(lambda: select(column("x")))
stmt += lambda stmt: stmt.options(*opts)
return stmt
class SomeOpt(HasCacheKey, ExecutableOption):
def _gen_cache_key(self, anon_map, bindparams):
return ("fixed_key",)
# generates no key, will not be cached
eq_(SomeOpt()._generate_cache_key().key, ("fixed_key",))
s1o, s2o = SomeOpt(), SomeOpt()
s1 = go([s1o])
s2 = go([s2o])
s1key = s1._generate_cache_key()
s2key = s2._generate_cache_key()
eq_(s1key.key[-1], (("fixed_key",),))
eq_(s1key.key, s2key.key)
eq_(s1._resolved._with_options, (s1o,))
eq_(s2._resolved._with_options, (s1o,))
ne_(s2._resolved._with_options, (s2o,))
def test_stmt_lambda_opt_w_no_key(self):
"""test issue related to #6887"""
def go(opts):
stmt = lambdas.lambda_stmt(lambda: select(column("x")))
stmt += lambda stmt: stmt.options(*opts)
return stmt
class SomeOpt(HasCacheKey, ExecutableOption):
inherit_cache = False
# generates no key, will not be cached
eq_(SomeOpt()._generate_cache_key(), None)
s1o, s2o = SomeOpt(), SomeOpt()
s1 = go([s1o])
s2 = go([s2o])
s1key = s1._generate_cache_key()
eq_(s1key, None)
eq_(s1._resolved._with_options, (s1o,))
eq_(s2._resolved._with_options, (s2o,))
ne_(s2._resolved._with_options, (s1o,))
def test_stmt_lambda_hey_theres_multiple_paths(self):
def go(x, y):
stmt = lambdas.lambda_stmt(lambda: select(column("x")))
if x > 5:
stmt += lambda stmt: stmt.where(column("x") == x)
else:
stmt += lambda stmt: stmt.where(column("y") == y)
stmt += lambda stmt: stmt.order_by(column("q"))
# TODO: need more path variety here to exercise
# using a full path key
return stmt
s1 = go(2, 5)
s2 = go(8, 7)
s3 = go(4, 9)
s4 = go(10, 1)
self.assert_compile(s1, "SELECT x WHERE y = :y_1 ORDER BY q")
self.assert_compile(s2, "SELECT x WHERE x = :x_1 ORDER BY q")
self.assert_compile(s3, "SELECT x WHERE y = :y_1 ORDER BY q")
self.assert_compile(s4, "SELECT x WHERE x = :x_1 ORDER BY q")
def test_coercion_cols_clause(self):
assert_raises_message(
exc.ArgumentError,
"Textual column expression 'f' should be explicitly declared",
select,
lambda: "foo",
)
def test_coercion_where_clause(self):
assert_raises_message(
exc.ArgumentError,
"SQL expression for WHERE/HAVING role expected, got 5",
select(column("q")).where,
5,
)
def test_propagate_attrs_full_stmt(self):
col = column("q")
col._propagate_attrs = col._propagate_attrs.union(
{"compile_state_plugin": "x", "plugin_subject": "y"}
)
stmt = lambdas.lambda_stmt(lambda: select(col))
eq_(
stmt._propagate_attrs,
{"compile_state_plugin": "x", "plugin_subject": "y"},
)
def test_propagate_attrs_cols_clause(self):
col = column("q")
col._propagate_attrs = col._propagate_attrs.union(
{"compile_state_plugin": "x", "plugin_subject": "y"}
)
stmt = select(lambda: col)
eq_(
stmt._propagate_attrs,
{"compile_state_plugin": "x", "plugin_subject": "y"},
)
def test_propagate_attrs_from_clause(self):
col = column("q")
t = table("t", column("y"))
t._propagate_attrs = t._propagate_attrs.union(
{"compile_state_plugin": "x", "plugin_subject": "y"}
)
stmt = future_select(lambda: col).join(t)
eq_(
stmt._propagate_attrs,
{"compile_state_plugin": "x", "plugin_subject": "y"},
)
def test_select_legacy_expanding_columns(self):
q, p, r = column("q"), column("p"), column("r")
stmt = select(lambda: (q, p, r))
self.assert_compile(stmt, "SELECT q, p, r")
def test_select_future_expanding_columns(self):
q, p, r = column("q"), column("p"), column("r")
stmt = future_select(lambda: (q, p, r))
self.assert_compile(stmt, "SELECT q, p, r")
def test_select_fromclause(self):
t1 = table("t1", column("q"), column("p"))
t2 = table("t2", column("y"))
def go():
return select(t1).select_from(
lambda: join(t1, t2, lambda: t1.c.q == t2.c.y)
)
self.assert_compile(
go(), "SELECT t1.q, t1.p FROM t1 JOIN t2 ON t1.q = t2.y"
)
self.assert_compile(
go(), "SELECT t1.q, t1.p FROM t1 JOIN t2 ON t1.q = t2.y"
)
def test_in_parameters_one(self):
expr1 = select(1).where(column("q").in_(["a", "b", "c"]))
self.assert_compile(expr1, "SELECT 1 WHERE q IN (__[POSTCOMPILE_q_1])")
self.assert_compile(
expr1,
"SELECT 1 WHERE q IN (:q_1_1, :q_1_2, :q_1_3)",
render_postcompile=True,
checkparams={"q_1_1": "a", "q_1_2": "b", "q_1_3": "c"},
)
def test_in_parameters_two(self):
expr2 = select(1).where(lambda: column("q").in_(["a", "b", "c"]))
self.assert_compile(expr2, "SELECT 1 WHERE q IN (__[POSTCOMPILE_q_1])")
self.assert_compile(
expr2,
"SELECT 1 WHERE q IN (:q_1_1, :q_1_2, :q_1_3)",
render_postcompile=True,
checkparams={"q_1_1": "a", "q_1_2": "b", "q_1_3": "c"},
)
def test_in_parameters_three(self):
expr3 = lambdas.lambda_stmt(
lambda: select(1).where(column("q").in_(["a", "b", "c"]))
)
self.assert_compile(expr3, "SELECT 1 WHERE q IN (__[POSTCOMPILE_q_1])")
self.assert_compile(
expr3,
"SELECT 1 WHERE q IN (:q_1_1, :q_1_2, :q_1_3)",
render_postcompile=True,
checkparams={"q_1_1": "a", "q_1_2": "b", "q_1_3": "c"},
)
def test_in_parameters_four(self):
def go(names):
return lambdas.lambda_stmt(
lambda: select(1).where(column("q").in_(names))
)
expr4 = go(["a", "b", "c"])
self.assert_compile(
expr4, "SELECT 1 WHERE q IN (__[POSTCOMPILE_names_1])"
)
self.assert_compile(
expr4,
"SELECT 1 WHERE q IN (:names_1_1, :names_1_2, :names_1_3)",
render_postcompile=True,
checkparams={"names_1_1": "a", "names_1_2": "b", "names_1_3": "c"},
)
def test_in_parameters_five(self):
def go(n1, n2):
stmt = lambdas.lambda_stmt(
lambda: select(1).where(column("q", ARRAY(String)).in_(n1))
)
stmt += lambda s: s.where(column("y", ARRAY(String)).in_(n2))
return stmt
expr = go(["a", "b", "c"], ["d", "e", "f"])
self.assert_compile(
expr,
"SELECT 1 WHERE q IN (:n1_1_1, :n1_1_2, :n1_1_3) "
"AND y IN (:n2_1_1, :n2_1_2, :n2_1_3)",
render_postcompile=True,
checkparams={
"n1_1_1": "a",
"n1_1_2": "b",
"n1_1_3": "c",
"n2_1_1": "d",
"n2_1_2": "e",
"n2_1_3": "f",
},
)
def test_in_columnelement(self):
# test issue #5768
def go():
v = [literal("a"), literal("b")]
expr1 = select(1).where(lambda: column("q").in_(v))
return expr1
self.assert_compile(go(), "SELECT 1 WHERE q IN (:param_1, :param_2)")
self.assert_compile(
go(),
"SELECT 1 WHERE q IN (:param_1, :param_2)",
render_postcompile=True,
checkparams={"param_1": "a", "param_2": "b"},
)
def test_select_columns_clause(self):
t1 = table("t1", column("q"), column("p"))
g = 5
def go():
return select(lambda: t1.c.q, lambda: t1.c.p + g)
stmt = go()
self.assert_compile(
stmt,
"SELECT t1.q, t1.p + :g_1 AS anon_1 FROM t1",
checkparams={"g_1": 5},
)
eq_(stmt._generate_cache_key()._generate_param_dict(), {"g_1": 5})
g = 10
stmt = go()
self.assert_compile(
stmt,
"SELECT t1.q, t1.p + :g_1 AS anon_1 FROM t1",
checkparams={"g_1": 10},
)
eq_(stmt._generate_cache_key()._generate_param_dict(), {"g_1": 10})
@testing.metadata_fixture()
def user_address_fixture(self, metadata):
users = Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
addresses = Table(
"addresses",
metadata,
Column("id", Integer),
Column("user_id", ForeignKey("users.id")),
Column("email", String(50)),
)
return users, addresses
@testing.metadata_fixture()
def boolean_table_fixture(self, metadata):
return Table(
"boolean_data",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Boolean),
)
def test_adapt_select(self, user_address_fixture):
users, addresses = user_address_fixture
stmt = (
select(users)
.select_from(
users.join(
addresses, lambda: users.c.id == addresses.c.user_id
)
)
.where(lambda: users.c.name == "ed")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"WHERE users.name = :name_1",
)
u1 = users.alias()
adapter = sql_util.ClauseAdapter(u1)
s2 = adapter.traverse(stmt)
self.assert_compile(
s2,
"SELECT users_1.id, users_1.name FROM users AS users_1 "
"JOIN addresses ON users_1.id = addresses.user_id "
"WHERE users_1.name = :name_1",
)
def test_no_var_dict_keys(self, user_address_fixture):
users, addresses = user_address_fixture
names = {"x": "some name"}
foo = "x"
expr = lambda: users.c.name == names[foo] # noqa
assert_raises_message(
exc.InvalidRequestError,
"Dictionary keys / list indexes inside of a cached "
"lambda must be Python literals only",
coercions.expect,
roles.WhereHavingRole,
expr,
)
def test_reject_dict_literal_keys(self, user_address_fixture):
users, addresses = user_address_fixture
names = {"x": "some name"}
lmb = lambda: users.c.name == names["x"] # noqa
assert_raises_message(
exc.InvalidRequestError,
"Closure variable named 'names' inside of lambda callable",
coercions.expect,
roles.WhereHavingRole,
lmb,
)
def test_dict_literal_keys_proper_use(self, user_address_fixture):
users, addresses = user_address_fixture
names = {"x": "some name"}
x = names["x"]
lmb = lambda: users.c.name == x # noqa
expr = coercions.expect(roles.WhereHavingRole, lmb)
self.assert_compile(
expr,
"users.name = :x_1",
params=expr._param_dict(),
checkparams={"x_1": "some name"},
)
def test_assignment_one(self, user_address_fixture):
users, addresses = user_address_fixture
x = 5
def my_lambda():
y = 10
z = y + 18
expr1 = users.c.name > x
expr2 = users.c.name < z
return and_(expr1, expr2)
expr = coercions.expect(roles.WhereHavingRole, my_lambda)
self.assert_compile(
expr,
"users.name > :x_1 AND users.name < :name_1",
params=expr._param_dict(),
checkparams={"name_1": 28, "x_1": 5},
)
expr = coercions.expect(roles.WhereHavingRole, my_lambda)
self.assert_compile(
expr,
"users.name > :x_1 AND users.name < :name_1",
params=expr._param_dict(),
checkparams={"name_1": 28, "x_1": 5},
)
def test_assignment_two(self, user_address_fixture):
users, addresses = user_address_fixture
x = 5
z = 10
def my_lambda():
y = x + z
expr1 = users.c.name > x
expr2 = users.c.name < y
return and_(expr1, expr2)
expr = coercions.expect(roles.WhereHavingRole, my_lambda)
self.assert_compile(
expr,
"users.name > :x_1 AND users.name < :x_1 + :z_1",
params=expr._param_dict(),
checkparams={"x_1": 5, "z_1": 10},
)
x = 15
z = 18
expr = coercions.expect(roles.WhereHavingRole, my_lambda)
self.assert_compile(
expr,
"users.name > :x_1 AND users.name < :x_1 + :z_1",
params=expr._param_dict(),
checkparams={"x_1": 15, "z_1": 18},
)
def test_assignment_three(self, user_address_fixture):
users, addresses = user_address_fixture
x = 5
z = 10
def my_lambda():
y = 10 + z
expr1 = users.c.name > x
expr2 = users.c.name < y
return and_(expr1, expr2)
expr = coercions.expect(roles.WhereHavingRole, my_lambda)
self.assert_compile(
expr,
"users.name > :x_1 AND users.name < :param_1 + :z_1",
params=expr._param_dict(),
checkparams={"x_1": 5, "z_1": 10, "param_1": 10},
)
x = 15
z = 18
expr = coercions.expect(roles.WhereHavingRole, my_lambda)
self.assert_compile(
expr,
"users.name > :x_1 AND users.name < :param_1 + :z_1",
params=expr._param_dict(),
checkparams={"x_1": 15, "z_1": 18, "param_1": 10},
)
def test_op_reverse(self, user_address_fixture):
user, addresses = user_address_fixture
x = "foo"
def mylambda():
return x + user.c.name
expr = coercions.expect(roles.WhereHavingRole, mylambda)
self.assert_compile(
expr, ":x_1 || users.name", checkparams={"x_1": "foo"}
)
x = "bar"
expr = coercions.expect(roles.WhereHavingRole, mylambda)
self.assert_compile(
expr, ":x_1 || users.name", checkparams={"x_1": "bar"}
)
def test_op_forwards(self, user_address_fixture):
user, addresses = user_address_fixture
x = "foo"
def mylambda():
return user.c.name + x
expr = coercions.expect(roles.WhereHavingRole, mylambda)
self.assert_compile(
expr, "users.name || :x_1", checkparams={"x_1": "foo"}
)
x = "bar"
expr = coercions.expect(roles.WhereHavingRole, mylambda)
self.assert_compile(
expr, "users.name || :x_1", checkparams={"x_1": "bar"}
)
def test_rhs_type_detection_from_left(self):
"""test #9029"""
tt = table("tt", column("q", JSON))
x = {"foo": "bar"}
def mylambda():
return tt.c.q._null_operate(x)
expr = coercions.expect(roles.WhereHavingRole, mylambda)
is_(expr._resolved.right.type._type_affinity, JSON)
def test_rhs_type_detection_standalone(self):
"""test related to #9029, as type coercion rule was changed"""
x = 5
def mylambda():
return x
expr = coercions.expect(roles.OrderByRole, mylambda)
is_(expr._resolved.type._type_affinity, Integer)
x = "now im a string"
# stays as int b.c. _resolved is cached
is_(expr._resolved.type._type_affinity, Integer)
# make a new one! now it will be string
expr = coercions.expect(roles.OrderByRole, mylambda)
is_(expr._resolved.type._type_affinity, String)
@testing.only_on("sqlite")
@testing.variation("stmt_type", ["lambda_stmt", "lambda_crit"])
@testing.variation("callable_type", ["none", "closure", "parameter"])
def test_9029_integration(
self, metadata, connection, stmt_type, callable_type
):
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", JSON),
)
t.create(connection)
connection.execute(
t.insert(),
{
"id": 12,
"data": {"key": "value", "key2": {"subkey": [1, 2, 3]}},
},
)
d = {"key": "value", "key2": {"subkey": [1, 2, 3]}}
if callable_type.none:
if stmt_type.lambda_stmt:
stmt = lambda_stmt(lambda: select(t).filter(t.c.data == d))
elif stmt_type.lambda_crit:
stmt = select(t).filter(lambda: t.c.data == d)
else:
stmt_type.fail()
to_run = stmt
elif callable_type.closure:
def go():
if stmt_type.lambda_stmt:
stmt = lambda_stmt(lambda: select(t).filter(t.c.data == d))
elif stmt_type.lambda_crit:
stmt = select(t).filter(lambda: t.c.data == d)
else:
stmt_type.fail()
return stmt
to_run = go()
elif callable_type.parameter:
def go(data):
if stmt_type.lambda_stmt:
stmt = lambda_stmt(
lambda: select(t).filter(t.c.data == data)
)
elif stmt_type.lambda_crit:
stmt = select(t).filter(lambda: t.c.data == data)
else:
stmt_type.fail()
return stmt
to_run = go(d)
eq_(
connection.execute(to_run).first(),
(12, {"key": "value", "key2": {"subkey": [1, 2, 3]}}),
)
def test_execute_constructed_uncached(self, user_address_fixture):
users, addresses = user_address_fixture
def go(name):
stmt = select(lambda: users.c.id).where(
lambda: users.c.name == name
)
with testing.db.connect().execution_options(
compiled_cache=None
) as conn:
conn.execute(stmt)
with self.sql_execution_asserter(testing.db) as asserter:
go("name1")
go("name2")
go("name1")
go("name3")
asserter.assert_(
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name2"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name3"}],
),
)
def test_execute_full_uncached(self, user_address_fixture):
users, addresses = user_address_fixture
def go(name):
stmt = lambda_stmt(
lambda: select(users.c.id).where(users.c.name == name) # noqa
)
with testing.db.connect().execution_options(
compiled_cache=None
) as conn:
conn.execute(stmt)
with self.sql_execution_asserter(testing.db) as asserter:
go("name1")
go("name2")
go("name1")
go("name3")
asserter.assert_(
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name2"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name3"}],
),
)
def test_execute_constructed_cached(self, user_address_fixture):
users, addresses = user_address_fixture
cache = {}
def go(name):
stmt = select(lambda: users.c.id).where(
lambda: users.c.name == name
)
with testing.db.connect().execution_options(
compiled_cache=cache
) as conn:
conn.execute(stmt)
with self.sql_execution_asserter(testing.db) as asserter:
go("name1")
go("name2")
go("name1")
go("name3")
asserter.assert_(
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name2"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name3"}],
),
)
def test_execute_full_cached(self, user_address_fixture):
users, addresses = user_address_fixture
cache = {}
def go(name):
stmt = lambda_stmt(
lambda: select(users.c.id).where(users.c.name == name) # noqa
)
with testing.db.connect().execution_options(
compiled_cache=cache
) as conn:
conn.execute(stmt)
with self.sql_execution_asserter(testing.db) as asserter:
go("name1")
go("name2")
go("name1")
go("name3")
asserter.assert_(
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name2"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name1"}],
),
CompiledSQL(
"SELECT users.id FROM users WHERE users.name = :name_1",
lambda ctx: [{"name_1": "name3"}],
),
)
def test_cache_key_bindparam_matches(self):
t1 = table("t1", column("q"), column("p"))
def go(x):
return coercions.expect(roles.WhereHavingRole, lambda: t1.c.q == x)
expr1 = go(5)
expr2 = go(10)
is_(expr1._generate_cache_key().bindparams[0], expr1._resolved.right)
is_(expr2._generate_cache_key().bindparams[0], expr2._resolved.right)
def test_cache_key_bindparam_matches_annotations(self):
t1 = table("t1", column("q"), column("p"))
def go():
expr = sql_util._deep_annotate((t1.c.q == 5), {"foo": "bar"})
stmt = coercions.expect(roles.WhereHavingRole, lambda: expr)
return stmt
self.assert_compile(go(), "t1.q = :q_1", checkparams={"q_1": 5})
self.assert_compile(go(), "t1.q = :q_1", checkparams={"q_1": 5})
def test_cache_key_instance_variable_issue_incorrect(self):
t1 = table("t1", column("q"), column("p"))
class Foo:
def __init__(self, value):
self.value = value
def go(foo):
return coercions.expect(
roles.WhereHavingRole, lambda: t1.c.q == foo.value
)
assert_raises_message(
exc.InvalidRequestError,
"Closure variable named 'foo' inside of lambda callable",
go,
Foo(5),
)
def test_cache_key_instance_variable_issue_correct_one(self):
t1 = table("t1", column("q"), column("p"))
class Foo:
def __init__(self, value):
self.value = value
def go(foo):
value = foo.value
return coercions.expect(
roles.WhereHavingRole, lambda: t1.c.q == value
)
expr1 = go(Foo(5))
expr2 = go(Foo(10))
c1 = expr1._generate_cache_key()
c2 = expr2._generate_cache_key()
eq_(c1, c2)
def test_cache_key_instance_variable_issue_correct_two(self):
t1 = table("t1", column("q"), column("p"))
class Foo:
def __init__(self, value):
self.value = value
def go(foo):
return coercions.expect(
roles.WhereHavingRole,
lambda: t1.c.q == foo.value,
track_on=[self],
)
expr1 = go(Foo(5))
expr2 = go(Foo(10))
c1 = expr1._generate_cache_key()
c2 = expr2._generate_cache_key()
eq_(c1, c2)
def test_insert_statement(self, user_address_fixture):
users, addresses = user_address_fixture
def ins(id_, name):
stmt = lambda_stmt(lambda: users.insert())
stmt += lambda s: s.values(id=id_, name=name)
return stmt
with testing.db.begin() as conn:
conn.execute(ins(12, "foo"))
eq_(
conn.execute(select(users).where(users.c.id == 12)).first(),
(12, "foo"),
)
def test_update_statement(self, user_address_fixture):
users, addresses = user_address_fixture
def upd(id_, newname):
stmt = lambda_stmt(lambda: users.update())
stmt += lambda s: s.values(name=newname)
stmt += lambda s: s.where(users.c.id == id_)
return stmt
with testing.db.begin() as conn:
conn.execute(users.insert().values(id=7, name="bar"))
conn.execute(upd(7, "foo"))
eq_(
conn.execute(select(users).where(users.c.id == 7)).first(),
(7, "foo"),
)
def test_bindparam_not_cached(self, user_address_fixture, testing_engine):
"""test #12084"""
users, addresses = user_address_fixture
engine = testing_engine(
options={"query_cache_size": 0, "sqlite_share_pool": True}
)
with engine.begin() as conn:
conn.execute(
users.insert(),
[{"id": 7, "name": "bar"}, {"id": 8, "name": "foo"}],
)
def make_query(stmt, *criteria):
for crit in criteria:
stmt += lambda s: s.where(crit)
return stmt
for i in range(2):
with engine.connect() as conn:
stmt = lambda_stmt(lambda: select(users))
# create a filter criterion that will never match anything
stmt1 = make_query(
stmt,
users.c.name == "bar",
users.c.name == "foo",
)
assert len(conn.scalars(stmt1).all()) == 0
stmt2 = make_query(
stmt,
users.c.name == "bar",
users.c.name == "bar",
users.c.name == "foo",
)
assert len(conn.scalars(stmt2).all()) == 0
|
LambdaElementTest
|
python
|
getsentry__sentry
|
tests/sentry/integrations/api/endpoints/test_organization_integration_channel_validate.py
|
{
"start": 4836,
"end": 5856
}
|
class ____(BaseChannelValidateTest):
def test_missing_channel_param(self):
integration = self.create_integration(
organization=self.organization, provider="slack", name="Slack", external_id="slack:1"
)
resp = self.get_error_response(self.organization.slug, integration.id, status_code=400)
assert "channel" in resp.data
def test_integration_not_found(self):
resp = self.get_error_response(self.organization.slug, 99999, status_code=404, channel="#x")
assert resp.status_code == 404
def test_unsupported_provider(self):
integration = self.create_integration(
organization=self.organization, provider="github", name="GitHub", external_id="github:1"
)
resp = self.get_error_response(
self.organization.slug, integration.id, status_code=400, channel="#x"
)
assert resp.data["valid"] is False
assert "Unsupported provider" in resp.data.get("detail", "")
|
ChannelValidateErrorCasesTest
|
python
|
scrapy__scrapy
|
tests/test_exporters.py
|
{
"start": 3694,
"end": 5809
}
|
class ____(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
return PythonItemExporter(**kwargs)
def test_invalid_option(self):
with pytest.raises(TypeError, match="Unexpected options: invalid_option"):
PythonItemExporter(invalid_option="something")
def test_nested_item(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = {"name": "Maria", "age": i1}
i3 = self.item_class(name="Jesus", age=i2)
ie = self._get_exporter()
exported = ie.export_item(i3)
assert isinstance(exported, dict)
assert exported == {
"age": {"age": {"age": "22", "name": "Joseph"}, "name": "Maria"},
"name": "Jesus",
}
assert isinstance(exported["age"], dict)
assert isinstance(exported["age"]["age"], dict)
def test_export_list(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = self.item_class(name="Maria", age=[i1])
i3 = self.item_class(name="Jesus", age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
assert exported == {
"age": [{"age": [{"age": "22", "name": "Joseph"}], "name": "Maria"}],
"name": "Jesus",
}
assert isinstance(exported["age"][0], dict)
assert isinstance(exported["age"][0]["age"][0], dict)
def test_export_item_dict_list(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = {"name": "Maria", "age": [i1]}
i3 = self.item_class(name="Jesus", age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
assert exported == {
"age": [{"age": [{"age": "22", "name": "Joseph"}], "name": "Maria"}],
"name": "Jesus",
}
assert isinstance(exported["age"][0], dict)
assert isinstance(exported["age"][0]["age"][0], dict)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
ie = self._get_exporter()
exported = ie.export_item(item)
assert exported == item
|
TestPythonItemExporter
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/memory_stream.py
|
{
"start": 3406,
"end": 4995
}
|
class ____(Generic[T]):
"""Stream data from a writer to a reader even if they are in different threads.
Uses asyncio queues to communicate between two co-routines. This implementation
should work even if the writer and reader co-routines belong to two different
event loops (e.g. one running from an event loop in the main thread
and the other running in an event loop in a background thread).
This implementation is meant to be used with a single writer and a single reader.
This is an internal implementation to LangChain. Please do not use it directly.
"""
def __init__(self, loop: AbstractEventLoop) -> None:
"""Create a channel for the given loop.
Args:
loop: The event loop to use for the channel. The reader is assumed
to be running in the same loop as the one passed to this constructor.
This will NOT be validated at run time.
"""
self._loop = loop
self._queue: asyncio.Queue = asyncio.Queue(maxsize=0)
self._done = object()
def get_send_stream(self) -> _SendStream[T]:
"""Get a writer for the channel.
Returns:
The writer for the channel.
"""
return _SendStream[T](
reader_loop=self._loop, queue=self._queue, done=self._done
)
def get_receive_stream(self) -> _ReceiveStream[T]:
"""Get a reader for the channel.
Returns:
The reader for the channel.
"""
return _ReceiveStream[T](queue=self._queue, done=self._done)
|
_MemoryStream
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 261483,
"end": 262409
}
|
class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('profileId', c_uint),
('paramId', c_uint),
('value', c_double),
]
def __init__(self):
super(c_nvmlPowerSmoothingProfile_v1_t, self).__init__(version=nvmlPowerSmoothingProfile_v1)
def nvmlDevicePowerSmoothingActivatePresetProfile(device, profile):
fn = _nvmlGetFunctionPointer("nvmlDevicePowerSmoothingActivatePresetProfile")
ret = fn(device, profile)
_nvmlCheckReturn(ret)
def nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile):
fn = _nvmlGetFunctionPointer("nvmlDevicePowerSmoothingUpdatePresetProfileParam")
ret = fn(device, profile)
_nvmlCheckReturn(ret)
def nvmlDevicePowerSmoothingSetState(device, state):
fn = _nvmlGetFunctionPointer("nvmlDevicePowerSmoothingSetState")
ret = fn(device, state)
_nvmlCheckReturn(ret)
|
c_nvmlPowerSmoothingProfile_v1_t
|
python
|
dask__dask
|
dask/dataframe/dask_expr/io/io.py
|
{
"start": 17158,
"end": 18339
}
|
class ____(FromPandas):
_parameters = [
"frame",
"divisions",
"columns",
"pyarrow_strings_enabled",
"_partitions",
"_series",
"_pd_length_stats",
]
_defaults = {
"columns": None,
"_partitions": None,
"_series": False,
"_pd_length_stats": None,
}
sort = True
@functools.cached_property
def _name(self):
return "from_pd_divs" + "-" + self.deterministic_token
@property
def _divisions_and_locations(self):
assert isinstance(self.frame, _BackendData)
key = tuple(self.operand("divisions"))
_division_info_cache = self.frame._division_info
if key not in _division_info_cache:
data = self.frame._data
if data.index.is_unique:
indexer = data.index.get_indexer(key, method="bfill")
else:
# get_indexer for doesn't support method
indexer = np.searchsorted(data.index.values, key, side="left")
indexer[-1] = len(data)
_division_info_cache[key] = key, indexer
return _division_info_cache[key]
|
FromPandasDivisions
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-removable-characters.py
|
{
"start": 918,
"end": 1758
}
|
class ____(object):
def maximumRemovals(self, s, p, removable):
"""
:type s: str
:type p: str
:type removable: List[int]
:rtype: int
"""
def check(s, p, lookup, x):
j = 0
for i in xrange(len(s)):
if lookup[i] <= x or s[i] != p[j]:
continue
j += 1
if j == len(p):
return True
return False
lookup = [float("inf")]*len(s)
for i, r in enumerate(removable):
lookup[r] = i+1
left, right = 0, len(removable)
while left <= right:
mid = left + (right-left)//2
if not check(s, p, lookup, mid):
right = mid-1
else:
left = mid+1
return right
|
Solution2
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/dataviews.py
|
{
"start": 30229,
"end": 33119
}
|
class ____(NonStrictDataModel):
"""
:param cls: Augmentation class
:type cls: str
:param types: Augmentation type
:type types: Sequence[str]
:param strength: Augmentation strength. Range [0,).
:type strength: float
:param arguments: Arguments dictionary per custom augmentation type.
:type arguments: dict
"""
_schema = {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {"description": "Augmentation class", "type": ["string", "null"]},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, cls=None, types=None, strength=None, arguments=None, **kwargs):
super(AugmentationSet, self).__init__(**kwargs)
self.cls = cls
self.types = types
self.strength = strength
self.arguments = arguments
@schema_property("cls")
def cls(self):
return self._property_cls
@cls.setter
def cls(self, value):
if value is None:
self._property_cls = None
return
self.assert_isinstance(value, "cls", six.string_types)
self._property_cls = value
@schema_property("types")
def types(self):
return self._property_types
@types.setter
def types(self, value):
if value is None:
self._property_types = None
return
self.assert_isinstance(value, "types", (list, tuple))
self.assert_isinstance(value, "types", six.string_types, is_array=True)
self._property_types = value
@schema_property("strength")
def strength(self):
return self._property_strength
@strength.setter
def strength(self, value):
if value is None:
self._property_strength = None
return
self.assert_isinstance(value, "strength", six.integer_types + (float,))
self._property_strength = value
@schema_property("arguments")
def arguments(self):
return self._property_arguments
@arguments.setter
def arguments(self, value):
if value is None:
self._property_arguments = None
return
self.assert_isinstance(value, "arguments", (dict,))
self._property_arguments = value
|
AugmentationSet
|
python
|
plotly__plotly.py
|
plotly/graph_objs/densitymapbox/hoverlabel/_font.py
|
{
"start": 233,
"end": 17174
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymapbox.hoverlabel"
_path_str = "densitymapbox.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.densitymapbox.
hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymapbox.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymapbox.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
docker__docker-py
|
docker/models/containers.py
|
{
"start": 18559,
"end": 46782
}
|
class ____(Collection):
model = Container
def run(self, image, command=None, stdout=True, stderr=False,
remove=False, **kwargs):
"""
Run a container. By default, it will wait for the container to finish
and return its logs, similar to ``docker run``.
If the ``detach`` argument is ``True``, it will start the container
and immediately return a :py:class:`Container` object, similar to
``docker run -d``.
Example:
Run a container and get its output:
>>> import docker
>>> client = docker.from_env()
>>> client.containers.run('alpine', 'echo hello world')
b'hello world\\n'
Run a container and detach:
>>> container = client.containers.run('bfirsh/reticulate-splines',
detach=True)
>>> container.logs()
'Reticulating spline 1...\\nReticulating spline 2...\\n'
Args:
image (str): The image to run.
command (str or list): The command to run in the container.
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cgroupns (str): Override the default cgroup namespace mode for the
container. One of:
- ``private`` the container runs in its own private cgroup
namespace.
- ``host`` use the host system's cgroup namespace.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_rt_period (int): Limit CPU real-time period in microseconds.
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
device_requests (:py:class:`list`): Expose host resources such as
GPUs to the container, as a list of
:py:class:`docker.types.DeviceRequest` instances.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
dns_search (:py:class:`list`): DNS search domains.
domainname (str or list): Set custom DNS search domains.
entrypoint (str or list): The entrypoint for the container.
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
healthcheck (dict): Specify a test to perform to check that the
container is healthy. The dict takes the following keys:
- test (:py:class:`list` or str): Test to perform to determine
container health. Possible values:
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: Run command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
command.
- interval (int): The time to wait between checks in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
- timeout (int): The time to wait before considering the check
to have hung. It should be 0 or at least 1000000 (1 ms).
- retries (int): The number of consecutive failures needed to
consider a container as unhealthy.
- start_period (int): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards
signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
links (dict): Mapping of links using the
``{'container': 'alias'}`` format. The alias is optional.
Containers declared in this dict will be linked to the new
container using the provided alias. Default: ``None``.
log_config (LogConfig): Logging configuration.
lxc_conf (dict): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
mounts (:py:class:`list`): Specification for mounts to be added to
the container. More powerful alternative to ``volumes``. Each
item in the list is expected to be a
:py:class:`docker.types.Mount` object.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected
to at creation time. You can connect to additional networks
using :py:meth:`Network.connect`. Incompatible with
``network_mode``.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
This mode is incompatible with ``ports``.
Incompatible with ``network``.
networking_config (Dict[str, EndpointConfig]):
Dictionary of EndpointConfig objects for each container network.
The key is the name of the network.
Defaults to ``None``.
Used in conjuction with ``network``.
Incompatible with ``network_mode``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Only used if the method needs to pull the requested image.
ports (dict): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
``port/protocol``, where the protocol is either ``tcp``,
``udp``, or ``sctp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
- The port number, as an integer. For example,
``{'2222/tcp': 3333}`` will expose port 2222 inside the
container as port 3333 on the host.
- ``None``, to assign a random host port. For example,
``{'2222/tcp': None}``.
- A tuple of ``(address, port)`` if you want to specify the
host interface. For example,
``{'1111/tcp': ('127.0.0.1', 1111)}``.
- A list of integers, if you want to bind multiple host ports
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
Incompatible with ``host`` network mode.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
remove (bool): Remove the container when it has finished running.
Default: ``False``.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
stdin_open (bool): Keep ``STDIN`` open even if not attached.
stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
Default: ``True``.
stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
Default: ``False``.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
stream (bool): If true and ``detach`` is false, return a log
generator instead of a string. Ignored if ``detach`` is true.
Default: ``False``.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
tty (bool): Allocate a pseudo-TTY.
ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of :py:class:`docker.types.Ulimit` instances.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: ``host``
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
volume name, and the value is a dictionary with the keys:
- ``bind`` The path to mount the volume inside the container
- ``mode`` Either ``rw`` to mount the volume read/write, or
``ro`` to mount it read-only.
For example:
.. code-block:: python
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
Or a list of strings which each one of its elements specifies a
mount volume.
For example:
.. code-block:: python
['/home/user1/:/mnt/vol2','/var/www:/mnt/vol1']
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
depending on the value of the ``stdout`` and ``stderr`` arguments.
``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
or ``journald`` logging driver used. Thus, if you are using none of
these drivers, a ``None`` object is returned instead. See the
`Engine API documentation
<https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
for full details.
If ``detach`` is ``True``, a :py:class:`Container` object is
returned instead.
Raises:
:py:class:`docker.errors.ContainerError`
If the container exits with a non-zero exit code and
``detach`` is ``False``.
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
stream = kwargs.pop('stream', False)
detach = kwargs.pop('detach', False)
platform = kwargs.get('platform', None)
if detach and remove:
if version_gte(self.client.api._version, '1.25'):
kwargs["auto_remove"] = True
else:
raise RuntimeError("The options 'detach' and 'remove' cannot "
"be used together in api versions < 1.25.")
if kwargs.get('network') and kwargs.get('network_mode'):
raise RuntimeError(
'The options "network" and "network_mode" can not be used '
'together.'
)
if kwargs.get('networking_config') and not kwargs.get('network'):
raise RuntimeError(
'The option "networking_config" can not be used '
'without "network".'
)
try:
container = self.create(image=image, command=command,
detach=detach, **kwargs)
except ImageNotFound:
self.client.images.pull(image, platform=platform)
container = self.create(image=image, command=command,
detach=detach, **kwargs)
container.start()
if detach:
return container
logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
out = None
if logging_driver == 'json-file' or logging_driver == 'journald':
out = container.logs(
stdout=stdout, stderr=stderr, stream=True, follow=True
)
exit_status = container.wait()['StatusCode']
if exit_status != 0:
out = None
if not kwargs.get('auto_remove'):
out = container.logs(stdout=False, stderr=True)
if remove:
container.remove()
if exit_status != 0:
raise ContainerError(
container, exit_status, command, image, out
)
if stream or out is None:
return out
return b''.join(out)
def create(self, image, command=None, **kwargs):
"""
Create a container without starting it. Similar to ``docker create``.
Takes the same arguments as :py:meth:`run`, except for ``stdout``,
``stderr``, and ``remove``.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
kwargs['image'] = image
kwargs['command'] = command
kwargs['version'] = self.client.api._version
create_kwargs = _create_container_args(kwargs)
resp = self.client.api.create_container(**create_kwargs)
return self.get(resp['Id'])
def get(self, container_id):
"""
Get a container by name or ID.
Args:
container_id (str): Container name or ID.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.NotFound`
If the container does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.inspect_container(container_id)
return self.prepare_model(resp)
def list(self, all=False, before=None, filters=None, limit=-1, since=None,
sparse=False, ignore_removed=False):
"""
List containers. Similar to the ``docker ps`` command.
Args:
all (bool): Show all containers. Only running containers are shown
by default
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``<image-name>[:tag]``, ``<image-id>``, or
``<image@digest>``.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
<https://docs.docker.com/engine/reference/commandline/ps>`_.
sparse (bool): Do not inspect containers. Returns partial
information, but guaranteed not to block. Use
:py:meth:`Container.reload` on resulting objects to retrieve
all attributes. Default: ``False``
ignore_removed (bool): Ignore failures due to missing containers
when attempting to inspect containers from the original list.
Set to ``True`` if race conditions are likely. Has no effect
if ``sparse=True``. Default: ``False``
Returns:
(list of :py:class:`Container`)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.containers(all=all, before=before,
filters=filters, limit=limit,
since=since)
if sparse:
return [self.prepare_model(r) for r in resp]
else:
containers = []
for r in resp:
try:
containers.append(self.get(r['Id']))
# a container may have been removed while iterating
except NotFound:
if not ignore_removed:
raise
return containers
def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters)
prune.__doc__ = APIClient.prune_containers.__doc__
# kwargs to copy straight from run to create
RUN_CREATE_KWARGS = [
'command',
'detach',
'domainname',
'entrypoint',
'environment',
'healthcheck',
'hostname',
'image',
'labels',
'mac_address',
'name',
'network_disabled',
'platform',
'stdin_open',
'stop_signal',
'tty',
'use_config_proxy',
'user',
'working_dir',
]
# kwargs to copy straight from run to host_config
RUN_HOST_CONFIG_KWARGS = [
'auto_remove',
'blkio_weight_device',
'blkio_weight',
'cap_add',
'cap_drop',
'cgroup_parent',
'cgroupns',
'cpu_count',
'cpu_percent',
'cpu_period',
'cpu_quota',
'cpu_shares',
'cpuset_cpus',
'cpuset_mems',
'cpu_rt_period',
'cpu_rt_runtime',
'device_cgroup_rules',
'device_read_bps',
'device_read_iops',
'device_write_bps',
'device_write_iops',
'devices',
'device_requests',
'dns_opt',
'dns_search',
'dns',
'extra_hosts',
'group_add',
'init',
'init_path',
'ipc_mode',
'isolation',
'kernel_memory',
'links',
'log_config',
'lxc_conf',
'mem_limit',
'mem_reservation',
'mem_swappiness',
'memswap_limit',
'mounts',
'nano_cpus',
'network_mode',
'oom_kill_disable',
'oom_score_adj',
'pid_mode',
'pids_limit',
'privileged',
'publish_all_ports',
'read_only',
'restart_policy',
'security_opt',
'shm_size',
'storage_opt',
'sysctls',
'tmpfs',
'ulimits',
'userns_mode',
'uts_mode',
'version',
'volume_driver',
'volumes_from',
'runtime'
]
def _create_container_args(kwargs):
"""
Convert arguments to create() to arguments to create_container().
"""
# Copy over kwargs which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_CREATE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
host_config_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_HOST_CONFIG_KWARGS:
host_config_kwargs[key] = kwargs.pop(key)
# Process kwargs which are split over both create and host_config
ports = kwargs.pop('ports', {})
if ports:
host_config_kwargs['port_bindings'] = ports
volumes = kwargs.pop('volumes', {})
if volumes:
host_config_kwargs['binds'] = volumes
network = kwargs.pop('network', None)
networking_config = kwargs.pop('networking_config', None)
if network:
if networking_config:
# Sanity check: check if the network is defined in the
# networking config dict, otherwise switch to None
if network not in networking_config:
networking_config = None
create_kwargs['networking_config'] = NetworkingConfig(
networking_config
) if networking_config else {network: None}
host_config_kwargs['network_mode'] = network
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error('run', kwargs)
create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
# Fill in any kwargs which need processing by create_host_config first
port_bindings = create_kwargs['host_config'].get('PortBindings')
if port_bindings:
# sort to make consistent for tests
create_kwargs['ports'] = [tuple(p.split('/', 1))
for p in sorted(port_bindings.keys())]
if volumes:
if isinstance(volumes, dict):
create_kwargs['volumes'] = [
v.get('bind') for v in volumes.values()
]
else:
create_kwargs['volumes'] = [
_host_volume_from_bind(v) for v in volumes
]
return create_kwargs
def _host_volume_from_bind(bind):
drive, rest = ntpath.splitdrive(bind)
bits = rest.split(':', 1)
if len(bits) == 1 or bits[1] in ('ro', 'rw'):
return drive + bits[0]
elif bits[1].endswith(':ro') or bits[1].endswith(':rw'):
return bits[1][:-3]
else:
return bits[1]
ExecResult = namedtuple('ExecResult', 'exit_code,output')
""" A result of Container.exec_run with the properties ``exit_code`` and
``output``. """
|
ContainerCollection
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/parallel_for/math_test.py
|
{
"start": 23409,
"end": 27654
}
|
class ____(PForTestCase):
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (
math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
def test_log_matrix_determinant(self):
for x_shape in ([3, 4, 2, 2], [3, 2, 2]):
x = random_ops.random_normal(x_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return linalg_ops.log_matrix_determinant(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_inverse(self):
x = (random_ops.random_uniform([3, 4, 2, 2]) + 10 * linalg_ops.eye(2)
) # Ensure well-conditioned.
for adjoint in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return linalg_ops.matrix_inverse(
array_ops.gather(x, i), adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_matrix_solve(self):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = (random_ops.random_uniform(shape_a) + 10 * linalg_ops.eye(3)
) # Ensure well-conditioned.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_solve(a, b, adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_matrix_triangular_solve(self):
for lower in (True, False):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = array_ops.matrix_band_part(
random_ops.random_uniform(shape_a) +
linalg_ops.eye(3), # Ensure well-conditioned.
*((-1, 0) if lower else (0, -1))) # Ensure triangular.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_triangular_solve(
a, b, lower=lower, adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_self_adjoint_eig(self):
z = random_ops.random_normal([2, 3, 3])
x = z + array_ops.matrix_transpose(z) # Ensure self-adjoint.
def loop_fn(i):
return (linalg_ops.self_adjoint_eig(array_ops.gather(x, i)),
linalg_ops.self_adjoint_eigvals(array_ops.gather(x, i)))
self._test_loop_fn(loop_fn, 2)
@test_util.run_without_tensor_float_32(
"Calls einsum in parallel for-loop and compares result to calling einsum "
"in sequential for-loop")
def test_einsum(self):
b = 10
x_series = random_ops.random_uniform([b, 9, 9])
y_series = random_ops.random_uniform([b, 9, 1])
def loop_fn(i):
x = array_ops.gather(x_series, 0) # invariant.
y = array_ops.gather(y_series, 0) # invariant.
x_i = array_ops.gather(x_series, i)
y_i = array_ops.gather(y_series, i)
z0 = special_math_ops.einsum("ab->b", x_i)
z1 = special_math_ops.einsum("ab,bc->ac", x_i, y)
z2 = special_math_ops.einsum("ab,bc->ac", x, y_i)
z3 = special_math_ops.einsum("ab,bc->ac", x, y)
z4 = special_math_ops.einsum("ab,bc->ac", x_i, y_i)
z5 = special_math_ops.einsum("cd,ce->de", y_i, x_i) # Includes transpose.
outputs = [z0, z1, z2, z3, z4, z5]
return outputs
self._test_loop_fn(loop_fn, b)
if __name__ == "__main__":
test.main()
|
LinalgTest
|
python
|
facebook__pyre-check
|
stubs/integration_test/fixture_source/integration_test/cache.py
|
{
"start": 573,
"end": 625
}
|
class ____:
def method(self, x):
pass
|
Base
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 356170,
"end": 366279
}
|
class ____(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
See Also
--------
scipy.stats.vonmises_fisher : Von-Mises Fisher distribution on a
hypersphere
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa \ge 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in SciPy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
Note about distribution parameters: `vonmises` and `vonmises_line` take
``kappa`` as a shape parameter (concentration) and ``loc`` as the location
(circular mean). A ``scale`` parameter is accepted but does not have any
effect.
Examples
--------
Import the necessary modules.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import vonmises
Define distribution parameters.
>>> loc = 0.5 * np.pi # circular mean
>>> kappa = 1 # concentration
Compute the probability density at ``x=0`` via the ``pdf`` method.
>>> vonmises.pdf(0, loc=loc, kappa=kappa)
0.12570826359722018
Verify that the percentile function ``ppf`` inverts the cumulative
distribution function ``cdf`` up to floating point accuracy.
>>> x = 1
>>> cdf_value = vonmises.cdf(x, loc=loc, kappa=kappa)
>>> ppf_value = vonmises.ppf(cdf_value, loc=loc, kappa=kappa)
>>> x, cdf_value, ppf_value
(1, 0.31489339900904967, 1.0000000000000004)
Draw 1000 random variates by calling the ``rvs`` method.
>>> sample_size = 1000
>>> sample = vonmises(loc=loc, kappa=kappa).rvs(sample_size)
Plot the von Mises density on a Cartesian and polar grid to emphasize
that it is a circular distribution.
>>> fig = plt.figure(figsize=(12, 6))
>>> left = plt.subplot(121)
>>> right = plt.subplot(122, projection='polar')
>>> x = np.linspace(-np.pi, np.pi, 500)
>>> vonmises_pdf = vonmises.pdf(x, loc=loc, kappa=kappa)
>>> ticks = [0, 0.15, 0.3]
The left image contains the Cartesian plot.
>>> left.plot(x, vonmises_pdf)
>>> left.set_yticks(ticks)
>>> number_of_bins = int(np.sqrt(sample_size))
>>> left.hist(sample, density=True, bins=number_of_bins)
>>> left.set_title("Cartesian plot")
>>> left.set_xlim(-np.pi, np.pi)
>>> left.grid(True)
The right image contains the polar plot.
>>> right.plot(x, vonmises_pdf, label="PDF")
>>> right.set_yticks(ticks)
>>> right.hist(sample, density=True, bins=number_of_bins,
... label="Histogram")
>>> right.set_title("Polar plot")
>>> right.legend(bbox_to_anchor=(0.15, 1.06))
"""
def _shape_info(self):
return [_ShapeInfo("kappa", False, (0, np.inf), (True, False))]
def _argcheck(self, kappa):
return kappa >= 0
def _rvs(self, kappa, size=None, random_state=None):
return random_state.vonmises(0.0, kappa, size=size)
@inherit_docstring_from(rv_continuous)
def rvs(self, *args, **kwds):
rvs = super().rvs(*args, **kwds)
return np.mod(rvs + np.pi, 2*np.pi) - np.pi
def _pdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
# = exp(kappa * (cos(x) - 1)) /
# (2*pi*exp(-kappa)*I[0](kappa))
# = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return np.exp(kappa*sc.cosm1(x)) / (2*np.pi*sc.i0e(kappa))
def _logpdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return kappa * sc.cosm1(x) - np.log(2*np.pi) - np.log(sc.i0e(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
# vonmises.entropy(kappa) = -kappa * I[1](kappa) / I[0](kappa) +
# log(2 * np.pi * I[0](kappa))
# = -kappa * I[1](kappa) * exp(-kappa) /
# (I[0](kappa) * exp(-kappa)) +
# log(2 * np.pi *
# I[0](kappa) * exp(-kappa) / exp(-kappa))
# = -kappa * sc.i1e(kappa) / sc.i0e(kappa) +
# log(2 * np.pi * i0e(kappa)) + kappa
return (-kappa * sc.i1e(kappa) / sc.i0e(kappa) +
np.log(2 * np.pi * sc.i0e(kappa)) + kappa)
@extend_notes_in_docstring(rv_continuous, notes="""\
The default limits of integration are endpoints of the interval
of width ``2*pi`` centered at `loc` (e.g. ``[-pi, pi]`` when
``loc=0``).\n\n""")
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
_a, _b = -np.pi, np.pi
if lb is None:
lb = loc + _a
if ub is None:
ub = loc + _b
return super().expect(func, args, loc,
scale, lb, ub, conditional, **kwds)
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Fit data is assumed to represent angles and will be wrapped onto the
unit circle. `f0` and `fscale` are ignored; the returned shape is
always the maximum likelihood estimate and the scale is always
1. Initial guesses are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, fshape, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
if self.a == -np.pi:
# vonmises line case, here the default fit method will be used
return super().fit(data, *args, **kwds)
# wrap data to interval [0, 2*pi]
data = np.mod(data, 2 * np.pi)
def find_mu(data):
return stats.circmean(data)
def find_kappa(data, loc):
# Usually, sources list the following as the equation to solve for
# the MLE of the shape parameter:
# r = I[1](kappa)/I[0](kappa), where r = mean resultant length
# This is valid when the location is the MLE of location.
# More generally, when the location may be fixed at an arbitrary
# value, r should be defined as follows:
r = np.sum(np.cos(loc - data))/len(data)
# See gh-18128 for more information.
# The function r[0](kappa) := I[1](kappa)/I[0](kappa) is monotonic
# increasing from r[0](0) = 0 to r[0](+inf) = 1. The partial
# derivative of the log likelihood function with respect to kappa
# is monotonic decreasing in kappa.
if r == 1:
# All observations are (almost) equal to the mean. Return
# some large kappa such that r[0](kappa) = 1.0 numerically.
return 1e16
elif r > 0:
def solve_for_kappa(kappa):
return sc.i1e(kappa)/sc.i0e(kappa) - r
# The bounds of the root of r[0](kappa) = r are derived from
# selected bounds of r[0](x) given in [1, Eq. 11 & 16]. See
# gh-20102 for details.
#
# [1] Amos, D. E. (1973). Computation of Modified Bessel
# Functions and Their Ratios. Mathematics of Computation,
# 28(125): 239-251.
lower_bound = r/(1-r)/(1+r)
upper_bound = 2*lower_bound
# The bounds are violated numerically for certain values of r,
# where solve_for_kappa evaluated at the bounds have the same
# sign. This indicates numerical imprecision of i1e()/i0e().
# Return the violated bound in this case as it's more accurate.
if solve_for_kappa(lower_bound) >= 0:
return lower_bound
elif solve_for_kappa(upper_bound) <= 0:
return upper_bound
else:
root_res = root_scalar(solve_for_kappa, method="brentq",
bracket=(lower_bound, upper_bound))
return root_res.root
else:
# if the provided floc is very far from the circular mean,
# the mean resultant length r can become negative.
# In that case, the equation
# I[1](kappa)/I[0](kappa) = r does not have a solution.
# The maximum likelihood kappa is then 0 which practically
# results in the uniform distribution on the circle. As
# vonmises is defined for kappa > 0, return instead the
# smallest floating point value.
# See gh-18190 for more information
return np.finfo(float).tiny
# location likelihood equation has a solution independent of kappa
loc = floc if floc is not None else find_mu(data)
# shape likelihood equation depends on location
shape = fshape if fshape is not None else find_kappa(data, loc)
loc = np.mod(loc + np.pi, 2 * np.pi) - np.pi # ensure in [-pi, pi]
return shape, loc, 1 # scale is not handled
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
|
vonmises_gen
|
python
|
doocs__leetcode
|
lcp/LCP 30. 魔塔游戏/Solution.py
|
{
"start": 0,
"end": 392
}
|
class ____:
def magicTower(self, nums: List[int]) -> int:
q = []
blood = 1
ans = v = 0
for x in nums:
if x < 0:
heappush(q, x)
blood += x
if blood <= 0:
ans += 1
v += q[0]
blood -= heappop(q)
blood += v
return -1 if blood <= 0 else ans
|
Solution
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_decomp_update.py
|
{
"start": 24959,
"end": 25022
}
|
class ____(BaseQRdelete):
dtype = np.dtype('F')
|
TestQRdelete_F
|
python
|
Netflix__metaflow
|
metaflow/plugins/argo/argo_events.py
|
{
"start": 384,
"end": 6017
}
|
class ____(object):
"""
ArgoEvent is a small event, a message, that can be published to Argo Workflows. The
event will eventually start all flows which have been previously deployed with `@trigger`
to wait for this particular named event.
Parameters
----------
name : str,
Name of the event
url : str, optional
Override the event endpoint from `ARGO_EVENTS_WEBHOOK_URL`.
payload : Dict, optional
A set of key-value pairs delivered in this event. Used to set parameters of triggered flows.
"""
def __init__(
self, name, url=ARGO_EVENTS_WEBHOOK_URL, payload=None, access_token=None
):
# TODO: Introduce support for NATS
self._name = name
self._url = url
self._payload = payload or {}
self._access_token = access_token
def add_to_payload(self, key, value):
"""
Add a key-value pair in the payload. This is typically used to set parameters
of triggered flows. Often, `key` is the parameter name you want to set to
`value`. Overrides any existing value of `key`.
Parameters
----------
key : str
Key
value : str
Value
"""
self._payload[key] = str(value)
return self
def safe_publish(self, payload=None, ignore_errors=True):
"""
Publishes an event when called inside a deployed workflow. Outside a deployed workflow
this function does nothing.
Use this function inside flows to create events safely. As this function is a no-op
for local runs, you can safely call it during local development without causing unintended
side-effects. It takes effect only when deployed on Argo Workflows.
Parameters
----------
payload : dict
Additional key-value pairs to add to the payload.
ignore_errors : bool, default True
If True, events are created on a best effort basis - errors are silently ignored.
"""
return self.publish(payload=payload, force=False, ignore_errors=ignore_errors)
def publish(self, payload=None, force=True, ignore_errors=True):
"""
Publishes an event.
Note that the function returns immediately after the event has been sent. It
does not wait for flows to start, nor it guarantees that any flows will start.
Parameters
----------
payload : dict
Additional key-value pairs to add to the payload.
ignore_errors : bool, default True
If True, events are created on a best effort basis - errors are silently ignored.
"""
if payload == None:
payload = {}
# Publish event iff forced or running on Argo Workflows
if force or os.environ.get("ARGO_WORKFLOW_TEMPLATE"):
try:
headers = {}
if self._access_token:
# TODO: Test with bearer tokens
headers = {"Authorization": "Bearer {}".format(self._access_token)}
if ARGO_EVENTS_WEBHOOK_AUTH == "service":
headers.update(SERVICE_HEADERS)
# TODO: do we need to worry about certs?
# Use urllib to avoid introducing any dependency in Metaflow
data = {
"name": self._name,
"payload": {
# Add default fields here...
"name": self._name,
"id": str(uuid.uuid4()),
"timestamp": int(time.time()),
"utc_date": datetime.utcnow().strftime("%Y%m%d"),
"generated-by-metaflow": True,
**self._payload,
**payload,
},
}
request = urllib.request.Request(
self._url,
method="POST",
headers={"Content-Type": "application/json", **headers},
data=json.dumps(data).encode("utf-8"),
)
for i in range(SERVICE_RETRY_COUNT):
try:
# we do not want to wait indefinitely for a response on the event broadcast, as this will keep the task running.
urllib.request.urlopen(request, timeout=60)
print(
"Argo Event (%s) published." % self._name, file=sys.stderr
)
return data["payload"]["id"]
except urllib.error.HTTPError as e:
# TODO: Retry retryable HTTP error codes
raise e
except urllib.error.URLError as e:
if i == SERVICE_RETRY_COUNT - 1:
raise e
else:
time.sleep(2**i)
except Exception as e:
msg = "Unable to publish Argo Event (%s): %s" % (self._name, e)
if ignore_errors:
print(msg, file=sys.stderr)
else:
raise ArgoEventException(msg)
else:
msg = (
"Argo Event (%s) was not published. Use "
+ "ArgoEvent(...).publish(...) "
+ "to force publish."
) % self._name
if ignore_errors:
print(msg, file=sys.stderr)
else:
raise ArgoEventException(msg)
|
ArgoEvent
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/envs/classic_control/continuous_mountain_car.py
|
{
"start": 609,
"end": 11337
}
|
class ____(gym.Env):
"""
## Description
The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically
at the bottom of a sinusoidal valley, with the only possible actions being the accelerations
that can be applied to the car in either direction. The goal of the MDP is to strategically
accelerate the car to reach the goal state on top of the right hill. There are two versions
of the mountain car domain in gymnasium: one with discrete actions and one with continuous.
This version is the one with continuous actions.
This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf)
```
@TECHREPORT{Moore90efficientmemory-based,
author = {Andrew William Moore},
title = {Efficient Memory-based Learning for Robot Control},
institution = {University of Cambridge},
year = {1990}
}
```
## Observation Space
The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following:
| Num | Observation | Min | Max | Unit |
|-----|--------------------------------------|-------|------|---------------|
| 0 | position of the car along the x-axis | -1.2 | 0.6 | position (m) |
| 1 | velocity of the car | -0.07 | 0.07 | velocity (v) |
## Action Space
The action is a `ndarray` with shape `(1,)`, representing the directional force applied on the car.
The action is clipped in the range `[-1,1]` and multiplied by a power of 0.0015.
## Transition Dynamics:
Given an action, the mountain car follows the following transition dynamics:
*velocity<sub>t+1</sub> = velocity<sub>t</sub> + force * self.power - 0.0025 * cos(3 * position<sub>t</sub>)*
*position<sub>t+1</sub> = position<sub>t</sub> + velocity<sub>t+1</sub>*
where force is the action clipped to the range `[-1,1]` and power is a constant 0.0015.
The collisions at either end are inelastic with the velocity set to 0 upon collision with the wall.
The position is clipped to the range [-1.2, 0.6] and velocity is clipped to the range [-0.07, 0.07].
## Reward
A negative reward of *-0.1 * action<sup>2</sup>* is received at each timestep to penalise for
taking actions of large magnitude. If the mountain car reaches the goal then a positive reward of +100
is added to the negative reward for that timestep.
## Starting State
The position of the car is assigned a uniform random value in `[-0.6 , -0.4]`.
The starting velocity of the car is always assigned to 0.
## Episode End
The episode ends if either of the following happens:
1. Termination: The position of the car is greater than or equal to 0.45 (the goal position on top of the right hill)
2. Truncation: The length of the episode is 999.
## Arguments
Continuous Mountain Car has two parameters for `gymnasium.make` with `render_mode` and `goal_velocity`.
On reset, the `options` parameter allows the user to change the bounds used to determine the new random state.
```python
>>> import gymnasium as gym
>>> env = gym.make("MountainCarContinuous-v0", render_mode="rgb_array", goal_velocity=0.1) # default goal_velocity=0
>>> env
<TimeLimit<OrderEnforcing<PassiveEnvChecker<Continuous_MountainCarEnv<MountainCarContinuous-v0>>>>>
>>> env.reset(seed=123, options={"low": -0.7, "high": -0.5}) # default low=-0.6, high=-0.4
(array([-0.5635296, 0. ], dtype=float32), {})
```
## Version History
* v0: Initial versions release
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 30,
}
def __init__(self, render_mode: str | None = None, goal_velocity=0):
self.min_action = -1.0
self.max_action = 1.0
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = (
0.45 # was 0.5 in gymnasium, 0.45 in Arnaud de Broissia's version
)
self.goal_velocity = goal_velocity
self.power = 0.0015
self.low_state = np.array(
[self.min_position, -self.max_speed], dtype=np.float32
)
self.high_state = np.array(
[self.max_position, self.max_speed], dtype=np.float32
)
self.render_mode = render_mode
self.screen_width = 600
self.screen_height = 400
self.screen = None
self.clock = None
self.isopen = True
self.action_space = spaces.Box(
low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(
low=self.low_state, high=self.high_state, dtype=np.float32
)
def step(self, action: np.ndarray):
position = self.state[0]
velocity = self.state[1]
force = min(max(action[0], self.min_action), self.max_action)
velocity += force * self.power - 0.0025 * math.cos(3 * position)
if velocity > self.max_speed:
velocity = self.max_speed
if velocity < -self.max_speed:
velocity = -self.max_speed
position += velocity
if position > self.max_position:
position = self.max_position
if position < self.min_position:
position = self.min_position
if position == self.min_position and velocity < 0:
velocity = 0
# Convert a possible numpy bool to a Python bool.
terminated = bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
reward = 0
if terminated:
reward = 100.0
reward -= math.pow(action[0], 2) * 0.1
self.state = np.array([position, velocity], dtype=np.float32)
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return self.state, reward, terminated, False, {}
def reset(self, *, seed: int | None = None, options: dict | None = None):
super().reset(seed=seed)
# Note that if you use custom reset bounds, it may lead to out-of-bound
# state/observations.
low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4)
self.state = np.array([self.np_random.uniform(low=low, high=high), 0])
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), {}
def _height(self, xs):
return np.sin(3 * xs) * 0.45 + 0.55
def render(self):
if self.render_mode is None:
assert self.spec is not None
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym.make("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic_control]"`'
) from e
if self.screen is None:
pygame.init()
if self.render_mode == "human":
pygame.display.init()
self.screen = pygame.display.set_mode(
(self.screen_width, self.screen_height)
)
else: # mode == "rgb_array":
self.screen = pygame.Surface((self.screen_width, self.screen_height))
if self.clock is None:
self.clock = pygame.time.Clock()
world_width = self.max_position - self.min_position
scale = self.screen_width / world_width
carwidth = 40
carheight = 20
self.surf = pygame.Surface((self.screen_width, self.screen_height))
self.surf.fill((255, 255, 255))
pos = self.state[0]
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0))
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
coords = []
for c in [(l, b), (l, t), (r, t), (r, b)]:
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
coords.append(
(
c[0] + (pos - self.min_position) * scale,
c[1] + clearance + self._height(pos) * scale,
)
)
gfxdraw.aapolygon(self.surf, coords, (0, 0, 0))
gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0))
for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]:
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
wheel = (
int(c[0] + (pos - self.min_position) * scale),
int(c[1] + clearance + self._height(pos) * scale),
)
gfxdraw.aacircle(
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
)
gfxdraw.filled_circle(
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
)
flagx = int((self.goal_position - self.min_position) * scale)
flagy1 = int(self._height(self.goal_position) * scale)
flagy2 = flagy1 + 50
gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0))
gfxdraw.aapolygon(
self.surf,
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
(204, 204, 0),
)
gfxdraw.filled_polygon(
self.surf,
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
(204, 204, 0),
)
self.surf = pygame.transform.flip(self.surf, False, True)
self.screen.blit(self.surf, (0, 0))
if self.render_mode == "human":
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
)
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
|
Continuous_MountainCarEnv
|
python
|
altair-viz__altair
|
tools/generate_schema_wrapper.py
|
{
"start": 6995,
"end": 7562
}
|
class ____:
_encoding_name: str
def to_dict(
self,
validate: bool = True,
ignore: list[str] | None = None,
context: dict[str, Any] | None = None,
) -> dict:
context = context or {}
ignore = ignore or []
datum = self._get("datum", Undefined) # type: ignore[attr-defined] # noqa
copy = self # don't copy unless we need to
return super(DatumChannelMixin, copy).to_dict(
validate=validate, ignore=ignore, context=context
)
"""
MARK_MIXIN: Final = '''
|
DatumChannelMixin
|
python
|
getsentry__sentry
|
src/sentry/core/endpoints/scim/members.py
|
{
"start": 3529,
"end": 3996
}
|
class ____(serializers.Serializer):
op = serializers.CharField(required=True)
value = OperationValue()
path = serializers.CharField(required=False)
def validate_op(self, value: str) -> str:
value = value.lower()
if value in [MemberPatchOps.REPLACE]:
return value
raise serializers.ValidationError(f'"{value}" is not a valid choice')
@extend_schema_serializer(exclude_fields=("schemas",))
|
SCIMPatchOperationSerializer
|
python
|
scrapy__scrapy
|
scrapy/dupefilters.py
|
{
"start": 612,
"end": 1318
}
|
class ____:
"""Dummy duplicate request filtering class (:setting:`DUPEFILTER_CLASS`)
that does not filter out any request."""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls()
def request_seen(self, request: Request) -> bool:
return False
def open(self) -> Deferred[None] | None:
pass
def close(self, reason: str) -> Deferred[None] | None:
pass
def log(self, request: Request, spider: Spider) -> None:
"""Log that a request has been filtered"""
warn(
"Calling BaseDupeFilter.log() is deprecated.",
ScrapyDeprecationWarning,
stacklevel=2,
)
|
BaseDupeFilter
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_stackdriver.py
|
{
"start": 9882,
"end": 10426
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.stackdriver.StackdriverHook")
def test_execute(self, mock_hook):
operator = StackdriverDeleteNotificationChannelOperator(
task_id=TEST_TASK_ID,
name="test-channel",
)
operator.execute(context=mock.MagicMock())
mock_hook.return_value.delete_notification_channel.assert_called_once_with(
name="test-channel", retry=DEFAULT, timeout=None, metadata=()
)
|
TestStackdriverDeleteNotificationChannelOperator
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 32644,
"end": 36241
}
|
class ____(PythonBaseNode):
"""
It's a helper class that makes business logic with params much easier. The
Python grammar defines no ``param`` node. It defines it in a different way
that is not really suited to working with parameters.
"""
type = 'param'
def __init__(self, children, parent=None):
super().__init__(children)
self.parent = parent
@property
def star_count(self):
"""
Is `0` in case of `foo`, `1` in case of `*foo` or `2` in case of
`**foo`.
"""
first = self.children[0]
if first in ('*', '**'):
return len(first.value)
return 0
@property
def default(self):
"""
The default is the test node that appears after the `=`. Is `None` in
case no default is present.
"""
has_comma = self.children[-1] == ','
try:
if self.children[-2 - int(has_comma)] == '=':
return self.children[-1 - int(has_comma)]
except IndexError:
return None
@property
def annotation(self):
"""
The default is the test node that appears after `:`. Is `None` in case
no annotation is present.
"""
tfpdef = self._tfpdef()
if tfpdef.type == 'tfpdef':
assert tfpdef.children[1] == ":"
assert len(tfpdef.children) == 3
annotation = tfpdef.children[2]
return annotation
else:
return None
def _tfpdef(self):
"""
tfpdef: see e.g. grammar36.txt.
"""
offset = int(self.children[0] in ('*', '**'))
return self.children[offset]
@property
def name(self):
"""
The `Name` leaf of the param.
"""
if self._tfpdef().type == 'tfpdef':
return self._tfpdef().children[0]
else:
return self._tfpdef()
def get_defined_names(self, include_setitem=False):
return [self.name]
@property
def position_index(self):
"""
Property for the positional index of a paramter.
"""
index = self.parent.children.index(self)
try:
keyword_only_index = self.parent.children.index('*')
if index > keyword_only_index:
# Skip the ` *, `
index -= 2
except ValueError:
pass
try:
keyword_only_index = self.parent.children.index('/')
if index > keyword_only_index:
# Skip the ` /, `
index -= 2
except ValueError:
pass
return index - 1
def get_parent_function(self):
"""
Returns the function/lambda of a parameter.
"""
return self.search_ancestor('funcdef', 'lambdef')
def get_code(self, include_prefix=True, include_comma=True):
"""
Like all the other get_code functions, but includes the param
`include_comma`.
:param include_comma bool: If enabled includes the comma in the string output.
"""
if include_comma:
return super().get_code(include_prefix)
children = self.children
if children[-1] == ',':
children = children[:-1]
return self._get_code_for_children(
children,
include_prefix=include_prefix
)
def __repr__(self):
default = '' if self.default is None else '=%s' % self.default.get_code()
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
|
Param
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-artifacts-that-can-be-extracted.py
|
{
"start": 611,
"end": 1253
}
|
class ____(object):
def digArtifacts(self, n, artifacts, dig):
"""
:type n: int
:type artifacts: List[List[int]]
:type dig: List[List[int]]
:rtype: int
"""
lookup = {(i, j):idx for idx, (r1, c1, r2, c2) in enumerate(artifacts) for i in xrange(r1, r2+1) for j in xrange(c1, c2+1)}
cnt = [(r2-r1+1)*(c2-c1+1) for r1, c1, r2, c2 in artifacts]
result = 0
for i, j in dig:
if (i, j) not in lookup:
continue
cnt[lookup[i, j]] -= 1
if not cnt[lookup[i, j]]:
result += 1
return result
|
Solution2
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_image_anchor04.py
|
{
"start": 315,
"end": 948
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image_anchor04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"D7",
self.image_dir + "yellow.png",
{"x_offset": 1, "y_offset": 2, "positioning": 3},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py
|
{
"start": 69641,
"end": 77168
}
|
class ____(Phi3ForCausalLM):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.model = Phi4MultimodalModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_pixel_values: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
image_attention_mask=None,
audio_input_features: Optional[torch.FloatTensor] = None,
audio_embed_sizes=None,
audio_attention_mask=None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> CausalLMOutputWithPast:
r"""
image_pixel_values (`torch.FloatTensor`, *optional*):
If the input contains images, these correspond to the pixel values after transformations (as returned by
the Processor)
image_sizes (`torch.LongTensor`, *optional*):
If the input contains images, these correspond to size of each image.
image_attention_mask (`torch.LongTensor`, *optional*):
Attention mask for the images.
audio_input_features (`torch.FloatTensor`, *optional*):
If the input contains audio samples, these correspond to the values after transformation (as returned by
the Processor).
audio_embed_sizes (`torch.Tensor`, *optional*):
Size of the audio inputs.
audio_attention_mask (`torch.Tensor, *optional*):
Attention mask for the audio inputs.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Phi4MultimodalForCausalLM
>>> model = Phi4MultimodalForCausalLM.from_pretrained("TBA")
>>> tokenizer = AutoTokenizer.from_pretrained("TBA")
>>> prompt = "This is an example script ."
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
image_pixel_values=image_pixel_values,
image_sizes=image_sizes,
image_attention_mask=image_attention_mask,
audio_input_features=audio_input_features,
audio_embed_sizes=audio_embed_sizes,
audio_attention_mask=audio_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
image_pixel_values=None,
image_sizes=None,
image_attention_mask=None,
audio_input_features=None,
audio_embed_sizes=None,
audio_attention_mask=None,
cache_position=None,
position_ids=None,
use_cache=True,
logits_to_keep=0,
**kwargs,
):
# Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
# process
# When the first time input length reached long and short factor switching point, enforce re-compute cache
# It will cause downside of slower at this single token position, however, better than current failure.
if (
past_key_values
and self.config.rope_parameters
and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
):
past_length = cache_position[0]
if past_length <= self.config.original_max_position_embeddings:
past_key_values = None
model_inputs = super().prepare_inputs_for_generation(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
image_pixel_values=image_pixel_values,
image_sizes=image_sizes,
image_attention_mask=image_attention_mask,
audio_input_features=audio_input_features,
audio_embed_sizes=audio_embed_sizes,
audio_attention_mask=audio_attention_mask,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
logits_to_keep=logits_to_keep,
**kwargs,
)
return model_inputs
__all__ = [
"Phi4MultimodalAudioPreTrainedModel",
"Phi4MultimodalAudioModel",
"Phi4MultimodalVisionPreTrainedModel",
"Phi4MultimodalVisionModel",
"Phi4MultimodalPreTrainedModel",
"Phi4MultimodalModel",
"Phi4MultimodalForCausalLM",
"Phi4MultimodalVisionConfig",
"Phi4MultimodalAudioConfig",
"Phi4MultimodalConfig",
]
|
Phi4MultimodalForCausalLM
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 15355,
"end": 15466
}
|
class ____(PydanticValueError):
msg_template = 'value is not a valid IPv4 or IPv6 network'
|
IPvAnyNetworkError
|
python
|
ray-project__ray
|
python/ray/data/_internal/datasource/webdataset_datasource.py
|
{
"start": 9204,
"end": 12562
}
|
class ____(FileBasedDatasource):
"""A Datasource for WebDataset datasets (tar format with naming conventions)."""
_FILE_EXTENSIONS = ["tar"]
def __init__(
self,
paths: Union[str, List[str]],
decoder: Optional[Union[bool, str, callable, list]] = True,
fileselect: Optional[Union[bool, callable, list]] = None,
filerename: Optional[Union[bool, callable, list]] = None,
suffixes: Optional[Union[bool, callable, list]] = None,
verbose_open: bool = False,
expand_json: bool = False,
**file_based_datasource_kwargs,
):
super().__init__(paths, **file_based_datasource_kwargs)
self.decoder = decoder
self.fileselect = fileselect
self.filerename = filerename
self.suffixes = suffixes
self.verbose_open = verbose_open
self.expand_json = expand_json
def _read_stream(self, stream: "pyarrow.NativeFile", path: str):
"""Read and decode samples from a stream.
Note that fileselect selects files during reading, while suffixes
selects files during the grouping step.
Args:
stream: File descriptor to read from.
path: Path to the data.
decoder: decoder or list of decoders to be applied to samples
fileselect: Predicate for skipping files in tar decoder.
Defaults to lambda_:False.
suffixes: List of suffixes to be extracted. Defaults to None.
verbose_open: Print message when opening files. Defaults to False.
Yields:
List[Dict[str, Any]]: List of sample (list of length 1).
"""
import pandas as pd
def get_tar_file_iterator():
return _tar_file_iterator(
stream,
fileselect=self.fileselect,
filerename=self.filerename,
verbose_open=self.verbose_open,
)
# S3 can raise transient errors during iteration
files = iterate_with_retry(
get_tar_file_iterator,
"iterate tar file",
match=self._data_context.retried_io_errors,
)
samples = _group_by_keys(files, meta=dict(__url__=path), suffixes=self.suffixes)
for sample in samples:
if self.decoder is not None:
sample = _apply_list(self.decoder, sample, default=_default_decoder)
if self.expand_json:
if isinstance(sample["json"], bytes):
parsed_json = json.loads(sample["json"].decode("utf-8"))
elif isinstance(sample["json"], str):
parsed_json = json.loads(sample["json"])
elif isinstance(sample["json"], dict):
parsed_json = sample["json"]
else:
raise TypeError(
f"Unsupported data type" f" {type(sample['json'])} for sample"
)
for k, v in parsed_json.items():
if k not in sample:
sample[k] = []
sample[k].append(v)
yield pd.DataFrame(
{
k: v if isinstance(v, list) and len(v) == 1 else [v]
for k, v in sample.items()
}
)
|
WebDatasetDatasource
|
python
|
apache__airflow
|
task-sdk/tests/conftest.py
|
{
"start": 8512,
"end": 12362
}
|
class ____(Protocol):
def __call__(
self,
dag_id: str = ...,
run_id: str = ...,
logical_date: str = ...,
data_interval_start: str | datetime = ...,
data_interval_end: str | datetime = ...,
clear_number: int = ...,
start_date: str | datetime = ...,
run_after: str | datetime = ...,
run_type: str = ...,
task_reschedule_count: int = ...,
conf=None,
consumed_asset_events: Sequence[AssetEventDagRunReference] = ...,
) -> dict[str, Any]: ...
@pytest.fixture
def make_ti_context() -> MakeTIContextCallable:
"""Factory for creating TIRunContext objects."""
from airflow.sdk import DagRunState
from airflow.sdk.api.datamodels._generated import DagRun, TIRunContext
def _make_context(
dag_id: str = "test_dag",
run_id: str = "test_run",
logical_date: str | datetime = "2024-12-01T01:00:00Z",
data_interval_start: str | datetime = "2024-12-01T00:00:00Z",
data_interval_end: str | datetime = "2024-12-01T01:00:00Z",
clear_number: int = 0,
start_date: str | datetime = "2024-12-01T01:00:00Z",
run_after: str | datetime = "2024-12-01T01:00:00Z",
run_type: str = "manual",
task_reschedule_count: int = 0,
conf: dict[str, Any] | None = None,
should_retry: bool = False,
max_tries: int = 0,
consumed_asset_events: Sequence[AssetEventDagRunReference] = (),
) -> TIRunContext:
return TIRunContext(
dag_run=DagRun(
dag_id=dag_id,
run_id=run_id,
logical_date=logical_date, # type: ignore
data_interval_start=data_interval_start, # type: ignore
data_interval_end=data_interval_end, # type: ignore
clear_number=clear_number, # type: ignore
start_date=start_date, # type: ignore
run_type=run_type, # type: ignore
run_after=run_after, # type: ignore
state=DagRunState.RUNNING,
conf=conf, # type: ignore
consumed_asset_events=list(consumed_asset_events),
),
task_reschedule_count=task_reschedule_count,
max_tries=max_tries,
should_retry=should_retry,
)
return _make_context
@pytest.fixture
def make_ti_context_dict(make_ti_context: MakeTIContextCallable) -> MakeTIContextDictCallable:
"""Factory for creating context dictionaries suited for API Server response."""
def _make_context_dict(
dag_id: str = "test_dag",
run_id: str = "test_run",
logical_date: str | datetime = "2024-12-01T00:00:00Z",
data_interval_start: str | datetime = "2024-12-01T00:00:00Z",
data_interval_end: str | datetime = "2024-12-01T01:00:00Z",
clear_number: int = 0,
start_date: str | datetime = "2024-12-01T00:00:00Z",
run_after: str | datetime = "2024-12-01T00:00:00Z",
run_type: str = "manual",
task_reschedule_count: int = 0,
conf=None,
consumed_asset_events: Sequence[AssetEventDagRunReference] = (),
) -> dict[str, Any]:
context = make_ti_context(
dag_id=dag_id,
run_id=run_id,
logical_date=logical_date,
data_interval_start=data_interval_start,
data_interval_end=data_interval_end,
clear_number=clear_number,
start_date=start_date,
run_after=run_after,
run_type=run_type,
conf=conf,
task_reschedule_count=task_reschedule_count,
consumed_asset_events=consumed_asset_events,
)
return context.model_dump(exclude_unset=True, mode="json")
return _make_context_dict
|
MakeTIContextDictCallable
|
python
|
gevent__gevent
|
src/gevent/_threading.py
|
{
"start": 4261,
"end": 4374
}
|
class ____(Exception):
"""Raised from :meth:`Queue.get` if no item is available in the timeout."""
|
EmptyTimeout
|
python
|
graphql-python__graphene
|
graphene/types/base.py
|
{
"start": 766,
"end": 1386
}
|
class ____(SubclassWithMeta):
@classmethod
def create_type(cls, class_name, **options):
return type(class_name, (cls,), {"Meta": options})
@classmethod
def __init_subclass_with_meta__(
cls, name=None, description=None, _meta=None, **_kwargs
):
assert "_meta" not in cls.__dict__, "Can't assign meta directly"
if not _meta:
return
_meta.name = name or cls.__name__
_meta.description = description or trim_docstring(cls.__doc__)
_meta.freeze()
cls._meta = _meta
super(BaseType, cls).__init_subclass_with_meta__()
|
BaseType
|
python
|
getsentry__sentry
|
src/sentry/core/endpoints/scim/utils.py
|
{
"start": 4254,
"end": 4598
}
|
class ____(OrganizationSCIMPermission):
scope_map = {
"GET": ["member:read", "member:write", "member:admin"],
"POST": ["member:write", "member:admin"],
"PATCH": ["member:write", "member:admin"],
"PUT": ["member:write", "member:admin"],
"DELETE": ["member:admin"],
}
|
OrganizationSCIMMemberPermission
|
python
|
aio-libs__aiohttp
|
aiohttp/web_routedef.py
|
{
"start": 3973,
"end": 6113
}
|
class ____(Sequence[AbstractRouteDef]):
"""Route definition table"""
def __init__(self) -> None:
self._items: list[AbstractRouteDef] = []
def __repr__(self) -> str:
return f"<RouteTableDef count={len(self._items)}>"
@overload
def __getitem__(self, index: int) -> AbstractRouteDef: ...
@overload
def __getitem__(self, index: "slice[int, int, int]") -> list[AbstractRouteDef]: ...
def __getitem__(
self, index: Union[int, "slice[int, int, int]"]
) -> AbstractRouteDef | list[AbstractRouteDef]:
return self._items[index]
def __iter__(self) -> Iterator[AbstractRouteDef]:
return iter(self._items)
def __len__(self) -> int:
return len(self._items)
def __contains__(self, item: object) -> bool:
return item in self._items
def route(self, method: str, path: str, **kwargs: Any) -> _Deco:
def inner(handler: _HandlerType) -> _HandlerType:
self._items.append(RouteDef(method, path, handler, kwargs))
return handler
return inner
def head(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_HEAD, path, **kwargs)
def get(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_GET, path, **kwargs)
def post(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_POST, path, **kwargs)
def put(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_PUT, path, **kwargs)
def patch(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_PATCH, path, **kwargs)
def delete(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_DELETE, path, **kwargs)
def options(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_OPTIONS, path, **kwargs)
def view(self, path: str, **kwargs: Any) -> _Deco:
return self.route(hdrs.METH_ANY, path, **kwargs)
def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None:
self._items.append(StaticDef(prefix, path, kwargs))
|
RouteTableDef
|
python
|
getsentry__sentry
|
src/sentry/backup/services/import_export/impl.py
|
{
"start": 5431,
"end": 31027
}
|
class ____(ImportExportService):
"""
This implementation is universal regardless of which mode (CONTROL, REGION, or MONOLITH) it is
run in. All import/export codepaths must be executed in REGION or MONOLITH instances only, so
the only case in which the caller should use the remote implementation are when trying to
import/export a CONTROL model from a REGION instance. In such cases, it is up to the caller to
manually select the correct remote/local instance based on the model being being
imported/exported with a block of code like:
if SiloMode.CONTROL in model._meta.silo_limit.modes:
import_export_service.export_by_model(...)
else:
ImportExportService.get_local_implementation().export_by_model(...)
"""
def import_by_model(
self,
*,
import_model_name: str = "",
scope: RpcImportScope | None = None,
flags: RpcImportFlags = DEFAULT_IMPORT_FLAGS,
filter_by: list[RpcFilter],
pk_map: RpcPrimaryKeyMap,
json_data: str = "",
min_ordinal: int,
) -> RpcImportResult:
if min_ordinal < 1:
return RpcImportError(
kind=RpcImportErrorKind.InvalidMinOrdinal,
on=InstanceID(import_model_name),
reason=f"The model `{import_model_name}` was offset with an invalid `min_ordinal` of `{min_ordinal}`",
)
batch_model_name = NormalizedModelName(import_model_name)
model = get_model(batch_model_name)
if model is None:
return RpcImportError(
kind=RpcImportErrorKind.UnknownModel,
on=InstanceID(import_model_name),
reason=f"The model `{import_model_name}` could not be found",
)
silo_mode = SiloMode.get_current_mode()
model_modes = model._meta.silo_limit.modes # type: ignore[attr-defined]
if silo_mode != SiloMode.MONOLITH and silo_mode not in model_modes:
return RpcImportError(
kind=RpcImportErrorKind.IncorrectSiloModeForModel,
on=InstanceID(import_model_name),
reason=f"The model `{import_model_name}` was forwarded to the incorrect silo (it cannot be imported from the {silo_mode} silo)",
)
if scope is None:
return RpcImportError(
kind=RpcImportErrorKind.UnspecifiedScope,
on=InstanceID(import_model_name),
reason="The RPC was called incorrectly, please set an `ImportScope` parameter",
)
import_flags = flags.from_rpc()
if import_flags.import_uuid is None:
return RpcImportError(
kind=RpcImportErrorKind.MissingImportUUID,
on=InstanceID(import_model_name),
reason="Must specify `import_uuid` when importing",
)
import_scope = scope.from_rpc()
in_pk_map = pk_map.from_rpc()
filters: list[Filter] = []
for fb in filter_by:
if NormalizedModelName(fb.on_model) == batch_model_name:
filters.append(fb.from_rpc())
import_chunk_type = (
ControlImportChunk
if SiloMode.CONTROL in dependencies()[batch_model_name].silos
else RegionImportChunk
)
extra = {
"model_name": batch_model_name,
"import_uuid": import_flags.import_uuid,
"min_ordinal": min_ordinal,
}
try:
# It's possible that this write has already occurred, and we are simply retrying
# because the response got lost in transit. If so, just re-use that reply. We do
# this in the transaction because, while `import_by_model` is generally called in a
# sequential manner, cases like timeouts or long queues may cause a previous call to
# still be active when the next one is made. We'll check once here for an existing
# copy of this (uniquely identifiable) import chunk here to short circuit and avoid
# doing frivolous work. However, this doesn't fully solve our data race error, as it
# is possible that another runaway process makes the colliding write while we're
# building our transaction. Thus, we'll check `get_existing_import_chunk()` again if
# we catch an `IntegrityError` below.
existing_import_chunk = get_existing_import_chunk(
batch_model_name, import_flags, import_chunk_type, min_ordinal
)
if existing_import_chunk is not None:
logger.info("import_by_model.already_imported", extra=extra)
return existing_import_chunk
# We don't need the control and region silo synced into the correct `*Replica` tables
# immediately. The locally silo-ed versions of the models are written by the scripts
# themselves, and the remote versions will be synced a few minutes later, well before
# any users are likely ot need to get ahold of them to view actual data in the UI.
using = router.db_for_write(model)
# HACK(azaslavsky): Need to figure out why `OrganizationMemberTeam` in particular is failing, but we can just use async outboxes for it for now.
with outbox_context(
transaction.atomic(using=using),
flush=import_model_name != "sentry.organizationmemberteam",
):
ok_relocation_scopes = import_scope.value
out_pk_map = PrimaryKeyMap()
min_old_pk = 0
max_old_pk = 0
min_inserted_pk: int | None = None
max_inserted_pk: int | None = None
last_seen_ordinal = min_ordinal - 1
json_data = fixup_array_fields(json_data)
json_data = fixup_json_fields(json_data)
for deserialized_object in deserialize(
"json", json_data, use_natural_keys=False, ignorenonexistent=True
):
model_instance = deserialized_object.object
inst_model_name = get_model_name(model_instance)
if not isinstance(model_instance, BaseModel):
return RpcImportError(
kind=RpcImportErrorKind.UnexpectedModel,
on=InstanceID(model=str(inst_model_name), ordinal=None),
left_pk=model_instance.pk,
reason=f"Received non-sentry model of kind `{inst_model_name}`",
)
if model_instance._meta.app_label not in EXCLUDED_APPS or model_instance:
if model_instance.get_possible_relocation_scopes() & ok_relocation_scopes:
if inst_model_name != batch_model_name:
return RpcImportError(
kind=RpcImportErrorKind.UnexpectedModel,
on=InstanceID(model=str(inst_model_name), ordinal=None),
left_pk=model_instance.pk,
reason=f"Received model of kind `{inst_model_name}` when `{batch_model_name}` was expected",
)
for f in filters:
if getattr(model_instance, f.field, None) not in f.values:
break
else:
try:
# We can only be sure `get_relocation_scope()` will be correct
# if it is fired AFTER normalization, as some
# `get_relocation_scope()` methods rely on being able to
# correctly resolve foreign keys, which is only possible after
# normalization.
old_pk = model_instance.normalize_before_relocation_import(
in_pk_map, import_scope, import_flags
)
if old_pk is None:
continue
# Now that the model has been normalized, we can ensure that
# this particular instance has a `RelocationScope` that permits
# importing.
if (
not model_instance.get_relocation_scope()
in ok_relocation_scopes
):
continue
# Perform the actual database write.
written = model_instance.write_relocation_import(
import_scope, import_flags
)
if written is None:
continue
# For models that may have circular references to themselves
# (unlikely), keep track of the new pk in the input map as well.
last_seen_ordinal += 1
new_pk, import_kind = written
slug = getattr(model_instance, "slug", None)
in_pk_map.insert(
inst_model_name, old_pk, new_pk, import_kind, slug
)
out_pk_map.insert(
inst_model_name, old_pk, new_pk, import_kind, slug
)
# Do a little bit of book-keeping for our future `ImportChunk`.
if min_old_pk == 0:
min_old_pk = old_pk
if old_pk > max_old_pk:
max_old_pk = old_pk
if import_kind == ImportKind.Inserted:
if min_inserted_pk is None:
min_inserted_pk = new_pk
if max_inserted_pk is None or new_pk > max_inserted_pk:
max_inserted_pk = new_pk
except DjangoValidationError as e:
errs = {field: error for field, error in e.message_dict.items()}
return RpcImportError(
kind=RpcImportErrorKind.ValidationError,
on=InstanceID(import_model_name, ordinal=last_seen_ordinal),
left_pk=model_instance.pk,
reason=f"Django validation error encountered: {errs}",
)
except DjangoRestFrameworkValidationError as e:
return RpcImportError(
kind=RpcImportErrorKind.ValidationError,
on=InstanceID(import_model_name, ordinal=last_seen_ordinal),
left_pk=model_instance.pk,
reason=str(e),
)
# If the `last_seen_ordinal` has not been incremented, no actual writes were done.
if last_seen_ordinal == min_ordinal - 1:
logger.info("import_by_model.none_imported", extra=extra)
return RpcImportOk(
mapped_pks=RpcPrimaryKeyMap.into_rpc(out_pk_map),
min_ordinal=None,
max_ordinal=None,
min_source_pk=None,
max_source_pk=None,
min_inserted_pk=None,
max_inserted_pk=None,
)
# We wrote at least one model, so make sure to write an appropriate `ImportChunk`
# and update the sequences too.
table = model_instance._meta.db_table
seq = f"{table}_id_seq"
with connections[using].cursor() as cursor:
cursor.execute(f"SELECT setval(%s, (SELECT MAX(id) FROM {table}))", [seq])
inserted = out_pk_map.partition({batch_model_name}, {ImportKind.Inserted}).mapping[
import_model_name
]
existing = out_pk_map.partition({batch_model_name}, {ImportKind.Existing}).mapping[
import_model_name
]
overwrite = out_pk_map.partition(
{batch_model_name}, {ImportKind.Overwrite}
).mapping[import_model_name]
import_chunk_args = {
"import_uuid": flags.import_uuid,
"model": import_model_name,
"min_ordinal": min_ordinal,
"max_ordinal": last_seen_ordinal,
"min_source_pk": min_old_pk,
"max_source_pk": max_old_pk,
"min_inserted_pk": min_inserted_pk,
"max_inserted_pk": max_inserted_pk,
"inserted_map": {k: v[0] for k, v in inserted.items()},
"existing_map": {k: v[0] for k, v in existing.items()},
"overwrite_map": {k: v[0] for k, v in overwrite.items()},
"inserted_identifiers": {
k: v[2] for k, v in inserted.items() if v[2] is not None
},
}
if import_chunk_type == ControlImportChunk:
ControlImportChunk(**import_chunk_args).save()
else:
# XXX: Monitors and Files are stored in non-default connections in saas.
with in_test_hide_transaction_boundary():
RegionImportChunk(**import_chunk_args).save()
logger.info("import_by_model.successfully_imported", extra=extra)
return RpcImportOk(
mapped_pks=RpcPrimaryKeyMap.into_rpc(out_pk_map),
min_ordinal=min_ordinal,
max_ordinal=last_seen_ordinal,
min_source_pk=min_old_pk,
max_source_pk=max_old_pk,
min_inserted_pk=min_inserted_pk,
max_inserted_pk=max_inserted_pk,
)
except DeserializationError as err:
sentry_sdk.capture_exception()
reason = str(err) or "No additional information"
if err.__cause__:
reason += f", {err.__cause__}"
return RpcImportError(
kind=RpcImportErrorKind.DeserializationFailed,
on=InstanceID(import_model_name),
reason=f"The submitted JSON could not be deserialized into Django model instances. {reason}",
)
except DatabaseError as e:
# Any `UniqueViolation` indicates the possibility that we've lost a race. Check for
# this explicitly by seeing if an `ImportChunk` with a matching unique signature has
# been written to the database already.
if isinstance(e.__cause__, psycopg2.errors.UniqueViolation):
try:
existing_import_chunk = get_existing_import_chunk(
batch_model_name, import_flags, import_chunk_type, min_ordinal
)
if existing_import_chunk is not None:
logger.warning("import_by_model.lost_import_race", extra=extra)
return existing_import_chunk
except Exception:
sentry_sdk.capture_exception()
return RpcImportError(
kind=RpcImportErrorKind.Unknown,
on=InstanceID(import_model_name),
reason=f"Unknown internal error occurred: {traceback.format_exc()}",
)
# All non-`ImportChunk`-related kinds of `IntegrityError` mean that the user's data was
# not properly sanitized against collision. This could be the fault of either the import
# logic, or the user's data itself.
if isinstance(e, IntegrityError):
sentry_sdk.capture_exception()
return RpcImportError(
kind=RpcImportErrorKind.IntegrityError,
on=InstanceID(import_model_name),
reason=str(e),
)
sentry_sdk.capture_exception()
return RpcImportError(
kind=RpcImportErrorKind.DatabaseError,
on=InstanceID(import_model_name),
reason=str(e),
)
except Exception:
sentry_sdk.capture_exception()
return RpcImportError(
kind=RpcImportErrorKind.Unknown,
on=InstanceID(import_model_name),
reason=f"Unknown internal error occurred: {traceback.format_exc()}",
)
def export_by_model(
self,
*,
export_model_name: str = "",
from_pk: int = 0,
scope: RpcExportScope | None = None,
filter_by: list[RpcFilter],
pk_map: RpcPrimaryKeyMap,
indent: int = 2,
) -> RpcExportResult:
try:
from sentry.db.models.base import BaseModel
deps = dependencies()
batch_model_name = NormalizedModelName(export_model_name)
model = get_model(batch_model_name)
if model is None or not issubclass(model, BaseModel):
return RpcExportError(
kind=RpcExportErrorKind.UnknownModel,
on=InstanceID(export_model_name),
reason=f"The model `{export_model_name}` could not be found",
)
silo_mode = SiloMode.get_current_mode()
model_modes = model._meta.silo_limit.modes # type: ignore[attr-defined]
if silo_mode != SiloMode.MONOLITH and silo_mode not in model_modes:
return RpcExportError(
kind=RpcExportErrorKind.IncorrectSiloModeForModel,
on=InstanceID(export_model_name),
reason=f"The model `{export_model_name}` was forwarded to the incorrect silo (it cannot be exported from the {silo_mode} silo)",
)
if scope is None:
return RpcExportError(
kind=RpcExportErrorKind.UnspecifiedScope,
on=InstanceID(export_model_name),
reason="The RPC was called incorrectly, please set an `ExportScope` parameter",
)
export_scope = scope.from_rpc()
in_pk_map = pk_map.from_rpc()
allowed_relocation_scopes = export_scope.value
possible_relocation_scopes = model.get_possible_relocation_scopes()
includable = possible_relocation_scopes & allowed_relocation_scopes
if not includable:
return RpcExportError(
kind=RpcExportErrorKind.UnexportableModel,
on=InstanceID(export_model_name),
reason=f"The model `{batch_model_name}` is not exportable",
)
max_pk = from_pk
out_pk_map = PrimaryKeyMap()
filters: list[Filter] = []
for fb in filter_by:
if NormalizedModelName(fb.on_model) == batch_model_name:
filters.append(fb.from_rpc())
def filter_objects(queryset_iterator):
# Intercept each value from the queryset iterator, ensure that it has the correct
# relocation scope and that all of its dependencies have already been exported. If
# they have, store it in the `pk_map`, and then yield it again. If they have not, we
# know that some upstream model was filtered out, so we ignore this one as well.
for item in queryset_iterator:
if not item.get_relocation_scope() in allowed_relocation_scopes:
continue
model = type(item)
model_name = get_model_name(model)
# Make sure this model is not explicitly being filtered.
for f in filters:
if f.model == model and getattr(item, f.field, None) not in f.values:
break
else:
# Now make sure its not transitively filtered either.
for field, foreign_field in deps[model_name].foreign_keys.items():
dependency_model_name = get_model_name(foreign_field.model)
field_id = field if field.endswith("_id") else f"{field}_id"
# Special case: We never want to filter on
# `OrganizationMember.inviter_id`, since the inviter could be the
# `user_id` of a `User` who is not in this `Organization`, and is
# therefore not being exported. There is probably a more generic and
# broadly applicable way to handle exceptional cases like this, but
# since it is a one off for now, it seems easiest to just handle it
# explicitly.
if model == OrganizationMember and field_id == "inviter_id":
continue
fk = getattr(item, field_id, None)
if fk is None:
# Null deps are allowed.
continue
if in_pk_map.get_pk(dependency_model_name, fk) is None:
# The foreign key value exists, but not found! An upstream model
# must have been filtered out, so we can filter this one out as
# well.
break
else:
nonlocal max_pk
if item.pk > max_pk:
max_pk = item.pk
# For models that may have circular references to themselves (unlikely),
# keep track of the new pk in the input map as well.
in_pk_map.insert(model_name, item.pk, item.pk, ImportKind.Inserted)
out_pk_map.insert(model_name, item.pk, item.pk, ImportKind.Inserted)
yield item
def yield_objects():
q = Q(pk__gt=from_pk)
# Only do database query filtering if this is a non-global export. If it is a
# global export, we want absolutely every relocatable model, so no need to
# filter.
if export_scope != ExportScope.Global:
# Create a Django filter from the relevant `filter_by` clauses.
query = dict()
for f in filters:
if f.model == model:
query[f.field + "__in"] = f.values
q &= Q(**query)
q = model.query_for_relocation_export(q, in_pk_map)
pk_name = model._meta.pk.name
queryset = model._base_manager.filter(q).order_by(pk_name)
return filter_objects(queryset.iterator())
json_data = serialize(
"json",
yield_objects(),
indent=indent,
use_natural_foreign_keys=False,
cls=DatetimeSafeDjangoJSONEncoder,
)
return RpcExportOk(
mapped_pks=RpcPrimaryKeyMap.into_rpc(out_pk_map), max_pk=max_pk, json_data=json_data
)
except Exception:
sentry_sdk.capture_exception()
return RpcExportError(
kind=RpcExportErrorKind.Unknown,
on=InstanceID(export_model_name),
reason=f"Unknown internal error occurred: {traceback.format_exc()}",
)
def get_all_globally_privileged_users(self) -> set[int]:
admin_user_pks: set[int] = set()
admin_user_pks.update(
User.objects.filter(Q(is_staff=True) | Q(is_superuser=True)).values_list(
"id", flat=True
)
)
admin_user_pks.update(UserPermission.objects.values_list("user_id", flat=True))
admin_user_pks.update(UserRoleUser.objects.values_list("user_id", flat=True))
return admin_user_pks
|
UniversalImportExportService
|
python
|
scipy__scipy
|
scipy/io/_harwell_boeing/hb.py
|
{
"start": 1177,
"end": 12957
}
|
class ____:
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse array or matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
m = m.tocsc(copy=False)
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
message = f"type {values.dtype.kind} not implemented yet"
raise NotImplementedError(message)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError(
f"type {values.dtype} for values not implemented")
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containing a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
f"got: \n{line}")
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
f"got: \n{line}")
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError(f"Expected at least 72 character for third line, "
f"got:\n{line}")
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
f"now (detected {mxtype})")
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
f"now (detected {mxtype})")
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError(f"Malformed data for third line: {line}")
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError(
f"Unexpected value {nelementals} for nltvl (last entry of line 3)"
)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError(f"Expected 3 formats, got {ct}")
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn(f"key is > 8 characters (key is {key})",
LineOverflow, stacklevel=3)
self.title = title
self.key = key
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got "
f"{pointer_format}")
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got "
f"{indices_format}")
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError(f"Inconsistency between matrix type {mxtype} and "
f"value type {values_format}")
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError(f"Inconsistency between matrix type {mxtype} and "
f"value type {values_format}")
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError(f"Unsupported format for values {values_format!r}")
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append(f"{self.total_nlines:14d}{self.pointer_nlines:14d}{self.indices_nlines:14d}{self.values_nlines:14d}")
header.append(f"{self.mxtype.fortran_format.ljust(14):14s}{self.nrows:14d}{self.ncols:14d}{self.nnon_zeros:14d}{0:14d}")
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append(f"{pffmt.ljust(16):16s}{iffmt.ljust(16):16s}{vffmt.ljust(20):20s}")
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError as e:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value) from e
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
return csc_array((val, ind-1, ptr-1), shape=(header.nrows, header.ncols))
def _write_data(m, fid, header):
m = m.tocsc(copy=False)
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for Fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
|
HBInfo
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/lambda5.py
|
{
"start": 319,
"end": 473
}
|
class ____: ...
def check(func: "Callable[[MsgT, int], object]") -> MsgT: ...
notification: Msg[Request] = check(lambda msg, foo: (msg.body, foo))
|
Request
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 16969,
"end": 17277
}
|
class ____(admin.ModelAdmin):
readonly_fields = ("name", "toppings")
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return True
def has_delete_permission(self, request, obj=None):
return True
|
ReadOnlyPizzaAdmin
|
python
|
google__pytype
|
pytype/rewrite/abstract/functions.py
|
{
"start": 21403,
"end": 21781
}
|
class ____(SimpleFunction[SimpleReturn]):
def call_with_mapped_args(
self, mapped_args: MappedArgs[FrameType]) -> SimpleReturn:
log.info('Calling function %s:\n Sig: %s\n Args: %s',
self.full_name, mapped_args.signature, mapped_args.argdict)
ret = mapped_args.signature.annotations['return'].instantiate()
return SimpleReturn(ret)
|
PytdFunction
|
python
|
getsentry__sentry
|
src/sentry/plugins/migrator.py
|
{
"start": 675,
"end": 2593
}
|
class ____:
integration: RpcIntegration
organization: RpcOrganization
def run(self) -> None:
for project in self.projects:
for plugin in plugins.for_project(project):
if plugin.slug != self.integration.provider:
continue
if self.all_repos_migrated(plugin.slug):
try:
if plugin.is_enabled(project):
logger.info(
"plugin.disabled",
extra=self._logging_context(
{"project": project.slug, "plugin": plugin.slug}
),
)
plugin.disable(project=project)
except NotImplementedError:
pass
def all_repos_migrated(self, provider: str) -> bool:
return all(r.integration_id is not None for r in self.repos_for_provider(provider))
def repos_for_provider(self, provider: str) -> list[RpcRepository]:
return [r for r in self.repositories if r.provider == provider]
@property
def repositories(self) -> list[RpcRepository]:
return repository_service.get_repositories(organization_id=self.organization.id)
@cached_property
def projects(self) -> list[RpcProject]:
return list(self.organization.projects)
@property
def plugins(self) -> list[Plugin2 | Plugin]:
return [plugins.configurable_for_project(project) for project in self.projects]
def _logging_context(self, context: dict[str, Any]) -> dict[str, Any]:
context.update(
{
"org": self.organization.slug,
"integration_id": self.integration.id,
"integration_provider": self.integration.provider,
}
)
return context
|
Migrator
|
python
|
doocs__leetcode
|
solution/2700-2799/2799.Count Complete Subarrays in an Array/Solution.py
|
{
"start": 0,
"end": 324
}
|
class ____:
def countCompleteSubarrays(self, nums: List[int]) -> int:
cnt = len(set(nums))
ans, n = 0, len(nums)
for i in range(n):
s = set()
for x in nums[i:]:
s.add(x)
if len(s) == cnt:
ans += 1
return ans
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/models/rollbackorganization.py
|
{
"start": 284,
"end": 829
}
|
class ____(DefaultFieldsModel):
"""
Stores a summary of every organization's year-in-review information to power the 2024 Sentry Rollback.
"""
__relocation_scope__ = RelocationScope.Excluded
organization = FlexibleForeignKey("sentry.Organization")
data = models.JSONField(null=True, default=None)
class Meta:
app_label = "sentry"
db_table = "sentry_rollbackorganization"
constraints = [
UniqueConstraint(fields=["organization_id"], name="unique_org"),
]
|
RollbackOrganization
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/categoricals.py
|
{
"start": 2028,
"end": 3264
}
|
class ____:
def setup(self):
N = 10**5
random_pick = np.random.default_rng().choice
categories = {
"str": list(string.ascii_letters),
"int": np.random.randint(2**16, size=154),
"float": sys.maxsize * np.random.random((38,)),
"timestamp": [
pd.Timestamp(x, unit="s") for x in np.random.randint(2**18, size=578)
],
}
self.df = pd.DataFrame(
{col: random_pick(cats, N) for col, cats in categories.items()}
)
for col in ("int", "float", "timestamp"):
self.df[f"{col}_as_str"] = self.df[col].astype(str)
for col in self.df.columns:
self.df[col] = self.df[col].astype("category")
def astype_str(self):
[self.df[col].astype("str") for col in "int float timestamp".split()]
def astype_int(self):
[self.df[col].astype("int") for col in "int_as_str timestamp".split()]
def astype_float(self):
[
self.df[col].astype("float")
for col in "float_as_str int int_as_str timestamp".split()
]
def astype_datetime(self):
self.df["float"].astype(pd.DatetimeTZDtype(tz="US/Pacific"))
|
AsType
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/dingtalk/provider.py
|
{
"start": 486,
"end": 941
}
|
class ____(OAuth2Provider):
id = "dingtalk"
name = "DingTalk"
account_class = DingTalkAccount
oauth2_adapter_class = DingTalkOAuth2Adapter
def extract_uid(self, data):
return data["openId"]
def get_default_scope(self):
return ["openid", "corpid"]
def extract_common_fields(self, data):
return dict(username=data.get("nick"), name=data.get("nick"))
provider_classes = [DingTalkProvider]
|
DingTalkProvider
|
python
|
apache__airflow
|
providers/celery/src/airflow/providers/celery/sensors/celery_queue.py
|
{
"start": 1348,
"end": 3148
}
|
class ____(BaseSensorOperator):
"""
Waits for a Celery queue to be empty.
By default, in order to be considered empty, the queue must not have
any tasks in the ``reserved``, ``scheduled`` or ``active`` states.
:param celery_queue: The name of the Celery queue to wait for.
:param target_task_id: Task id for checking
"""
def __init__(self, *, celery_queue: str, target_task_id: str | None = None, **kwargs) -> None:
super().__init__(**kwargs)
self.celery_queue = celery_queue
self.target_task_id = target_task_id
def _check_task_id(self, context: Context) -> bool:
"""
Get the Celery result from the Airflow task ID and return True if the result has finished execution.
:param context: Airflow's execution context
:return: True if task has been executed, otherwise False
"""
ti = context["ti"]
celery_result = ti.xcom_pull(task_ids=self.target_task_id)
return celery_result.ready()
def poke(self, context: Context) -> bool:
if self.target_task_id:
return self._check_task_id(context)
inspect_result = control.Inspect()
reserved = inspect_result.reserved()
scheduled = inspect_result.scheduled()
active = inspect_result.active()
try:
reserved = len(reserved[self.celery_queue])
scheduled = len(scheduled[self.celery_queue])
active = len(active[self.celery_queue])
self.log.info("Checking if celery queue %s is empty.", self.celery_queue)
return reserved == 0 and scheduled == 0 and active == 0
except KeyError:
message = f"Could not locate Celery queue {self.celery_queue}"
raise KeyError(message)
|
CeleryQueueSensor
|
python
|
doocs__leetcode
|
solution/1700-1799/1784.Check if Binary String Has at Most One Segment of Ones/Solution.py
|
{
"start": 0,
"end": 93
}
|
class ____:
def checkOnesSegment(self, s: str) -> bool:
return '01' not in s
|
Solution
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_dataproc.py
|
{
"start": 46416,
"end": 50451
}
|
class ____:
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
op = DataprocDeleteClusterOperator(
task_id=TASK_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
request_id=REQUEST_ID,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.delete_cluster.assert_called_once_with(
region=GCP_REGION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
cluster_uuid=None,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
@mock.patch(DATAPROC_TRIGGERS_PATH.format("DataprocAsyncHook"))
def test_create_execute_call_defer_method(self, mock_trigger_hook, mock_hook):
mock_hook.return_value.create_cluster.return_value = None
operator = DataprocDeleteClusterOperator(
task_id=TASK_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
request_id=REQUEST_ID,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
impersonation_chain=IMPERSONATION_CHAIN,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
operator.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_REGION,
cluster_name=CLUSTER_NAME,
cluster_uuid=None,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.wait_for_operation.assert_not_called()
assert isinstance(exc.value.trigger, DataprocDeleteClusterTrigger)
assert exc.value.method_name == GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
@mock.patch(DATAPROC_PATH.format("DataprocDeleteClusterOperator.defer"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
@mock.patch(DATAPROC_TRIGGERS_PATH.format("DataprocAsyncHook"))
def test_create_execute_call_finished_before_defer(self, mock_trigger_hook, mock_hook, mock_defer):
mock_hook.return_value.create_cluster.return_value = None
mock_hook.return_value.get_cluster.side_effect = NotFound("test")
operator = DataprocDeleteClusterOperator(
task_id=TASK_ID,
region=GCP_REGION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
request_id=REQUEST_ID,
gcp_conn_id=GCP_CONN_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
impersonation_chain=IMPERSONATION_CHAIN,
deferrable=True,
)
operator.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_REGION,
cluster_name=CLUSTER_NAME,
cluster_uuid=None,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.wait_for_operation.assert_not_called()
assert not mock_defer.called
|
TestDataprocClusterDeleteOperator
|
python
|
run-llama__llama_index
|
llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py
|
{
"start": 1689,
"end": 1808
}
|
class ____(MessagesSnapshotEvent, Event):
type: EventType = EventType.MESSAGES_SNAPSHOT
|
MessagesSnapshotWorkflowEvent
|
python
|
getsentry__sentry
|
tests/snuba/models/test_group.py
|
{
"start": 1179,
"end": 6425
}
|
class ____(TestCase, SnubaTestCase, PerformanceIssueTestCase, OccurrenceTestMixin):
def test_get_oldest_latest_for_environments(self) -> None:
project = self.create_project()
self.store_event(
data={
"event_id": "a" * 32,
"environment": "production",
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-1"],
},
project_id=project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"environment": "production",
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-1"],
},
project_id=project.id,
)
self.store_event(
data={
"event_id": "c" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
},
project_id=project.id,
)
group = Group.objects.get()
assert _get_latest_non_null(group).event_id == "c" * 32
assert group.get_latest_event_for_environments(["staging"]) is None
assert _get_latest_non_null(group, ["production"]).event_id == "b" * 32
assert _get_oldest_non_null(group).event_id == "a" * 32
assert _get_oldest_non_null(group, ["staging", "production"]).event_id == "a" * 32
assert group.get_oldest_event_for_environments(["staging"]) is None
def test_error_issue_get_helpful_for_environments(self) -> None:
project = self.create_project()
replay_id = uuid.uuid4().hex
event_all_helpful_params = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-1"],
"contexts": {
"replay": {"replay_id": replay_id},
"trace": {
"sampled": True,
"span_id": "babaae0d4b7512d9",
"trace_id": "a7d67cf796774551a95be6543cacd459",
},
},
"errors": [],
},
project_id=project.id,
assert_no_errors=False,
)
self.store_event(
data={
"event_id": "b" * 32,
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-1"],
"contexts": {
"replay": {"replay_id": replay_id},
},
"errors": [{"type": "one"}, {"type": "two"}],
},
project_id=project.id,
assert_no_errors=False,
)
event_none_helpful_params = self.store_event(
data={
"event_id": "c" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
},
project_id=project.id,
)
group = Group.objects.get()
assert _get_recommended_non_null(group).event_id == event_all_helpful_params.event_id
assert _get_latest_non_null(group).event_id == event_none_helpful_params.event_id
assert _get_oldest_non_null(group).event_id == event_all_helpful_params.event_id
@patch("sentry.quotas.backend.get_event_retention")
def test_get_recommended_event_for_environments_retention_limit(
self, mock_get_event_retention: MagicMock
) -> None:
"""
If last_seen is outside of the retention limit, falls back to the latest event behavior.
"""
mock_get_event_retention.return_value = 90
project = self.create_project()
outside_retention_date = before_now(days=91)
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": outside_retention_date.isoformat(),
"fingerprint": ["group-1"],
"contexts": {},
"errors": [],
},
project_id=project.id,
assert_no_errors=False,
)
group = Group.objects.get()
group.last_seen = before_now(days=91)
assert _get_recommended_non_null(group).event_id == event.event_id
def _get_recommended(
g: Group,
conditions: Sequence[Condition] | None = None,
start: datetime | None = None,
end: datetime | None = None,
) -> GroupEvent:
ret = g.get_recommended_event(conditions=conditions, start=start, end=end)
assert ret is not None
return ret
def _get_latest(
g: Group,
conditions: Sequence[Condition] | None = None,
start: datetime | None = None,
end: datetime | None = None,
) -> GroupEvent:
ret = g.get_latest_event(conditions=conditions, start=start, end=end)
assert ret is not None
return ret
def _get_oldest(
g: Group,
conditions: Sequence[Condition] | None = None,
start: datetime | None = None,
end: datetime | None = None,
) -> GroupEvent:
ret = g.get_oldest_event(conditions=conditions, start=start, end=end)
assert ret is not None
return ret
@freeze_time()
|
GroupTestSnuba
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_joins.py
|
{
"start": 8181,
"end": 67103
}
|
class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
@testing.combinations_list(
set(
itertools.product(
[
"relationship",
"relationship_only",
"none",
"explicit",
"table_none",
"table_explicit",
],
[True, False],
)
),
argnames="onclause_type, use_legacy",
)
def test_filter_by_from_join(self, onclause_type, use_legacy):
User, Address = self.classes("User", "Address")
(address_table,) = self.tables("addresses")
(user_table,) = self.tables("users")
if use_legacy:
sess = fixture_session()
q = sess.query(User)
else:
q = select(User).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
if onclause_type == "relationship":
q = q.join(Address, User.addresses)
elif onclause_type == "relationship_only":
q = q.join(User.addresses)
elif onclause_type == "none":
q = q.join(Address)
elif onclause_type == "explicit":
q = q.join(Address, User.id == Address.user_id)
elif onclause_type == "table_none":
q = q.join(address_table)
elif onclause_type == "table_explicit":
q = q.join(
address_table, user_table.c.id == address_table.c.user_id
)
else:
assert False
q2 = q.filter_by(email_address="foo")
self.assert_compile(
q2,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE addresses.email_address = :email_address_1",
)
if use_legacy:
q2 = q.reset_joinpoint().filter_by(name="user")
self.assert_compile(
q2,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE users.name = :name_1",
)
def test_join_relationship_propagate_attrs(self):
"""test #6558"""
User = self.classes.User
users = self.tables.users
stmt = select(users).join(User.addresses)
eq_(
stmt._propagate_attrs,
{"compile_state_plugin": "orm", "plugin_subject": inspect(User)},
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
@testing.combinations((True,), (False,), argnames="legacy")
@testing.combinations((True,), (False,), argnames="threelevel")
def test_join_with_entities(self, legacy, threelevel):
"""test issue #6503"""
User, Address, Dingaling = self.classes("User", "Address", "Dingaling")
if legacy:
sess = fixture_session()
stmt = sess.query(User).join(Address).with_entities(Address.id)
else:
stmt = select(User).join(Address).with_only_columns(Address.id)
stmt = stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
if threelevel:
if legacy:
stmt = stmt.join(Address.dingaling).with_entities(Dingaling.id)
else:
stmt = stmt.join(Address.dingaling).with_only_columns(
Dingaling.id
)
if threelevel:
self.assert_compile(
stmt,
"SELECT dingalings.id AS dingalings_id "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"JOIN dingalings ON addresses.id = dingalings.address_id",
)
else:
self.assert_compile(
stmt,
"SELECT addresses.id AS addresses_id FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
@testing.combinations((True,), (False,), argnames="legacy")
@testing.combinations((True,), (False,), argnames="threelevel")
def test_join_and_union_with_entities(self, legacy, threelevel):
"""test issue #6698, regression caused by #6503"""
User, Address, Dingaling = self.classes("User", "Address", "Dingaling")
if legacy:
sess = fixture_session()
stmt = sess.query(User).join(Address).with_entities(Address.id)
else:
stmt = select(User).join(Address).with_only_columns(Address.id)
stmt = stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
if threelevel:
if legacy:
stmt = stmt.join(Address.dingaling).with_entities(Dingaling.id)
to_union = sess.query(Dingaling.id)
else:
stmt = stmt.join(Address.dingaling).with_only_columns(
Dingaling.id
)
to_union = select(Dingaling.id).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
else:
if legacy:
to_union = sess.query(Address.id)
else:
to_union = select(Address.id).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
if legacy:
stmt = stmt.union(to_union)
else:
stmt = (
union(stmt, to_union)
.subquery()
.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
if threelevel:
self.assert_compile(
stmt,
"SELECT anon_1.dingalings_id AS anon_1_dingalings_id FROM "
"(SELECT dingalings.id AS dingalings_id "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"JOIN dingalings ON addresses.id = dingalings.address_id "
"UNION "
"SELECT dingalings.id AS dingalings_id FROM dingalings) "
"AS anon_1",
)
else:
self.assert_compile(
stmt,
"SELECT anon_1.addresses_id AS anon_1_addresses_id FROM "
"(SELECT addresses.id AS addresses_id FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"UNION "
"SELECT addresses.id AS addresses_id FROM addresses) "
"AS anon_1",
)
def test_invalid_kwarg_join(self):
User = self.classes.User
sess = fixture_session()
assert_raises_message(
TypeError,
r".*join\(\) .*unexpected .*keyword",
sess.query(User).join,
"address",
foob="bar",
bar="bat",
)
assert_raises_message(
TypeError,
r".*outerjoin\(\) .*unexpected .*keyword",
sess.query(User).outerjoin,
"address",
foob="bar",
bar="bat",
)
def test_left_w_no_entity(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
self.assert_compile(
sess.query(User, literal_column("x")).join(Address),
"SELECT users.id AS users_id, users.name AS users_name, x "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
self.assert_compile(
sess.query(literal_column("x"), User).join(Address),
"SELECT x, users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
def test_left_is_none_and_query_has_no_entities(self):
Address = self.classes.Address
sess = fixture_session()
assert_raises_message(
sa_exc.InvalidRequestError,
r"No entities to join from; please use select_from\(\) to "
r"establish the left entity/selectable of this join",
sess.query().join(Address)._compile_context,
)
def test_isouter_flag(self):
User = self.classes.User
self.assert_compile(
fixture_session().query(User).join(User.orders, isouter=True),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users LEFT OUTER JOIN orders ON users.id = orders.user_id",
)
def test_full_flag(self):
User = self.classes.User
self.assert_compile(
fixture_session().query(User).outerjoin(User.orders, full=True),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users FULL OUTER JOIN orders ON users.id = orders.user_id",
)
def test_single_prop_1(self):
User = self.classes.User
sess = fixture_session()
self.assert_compile(
sess.query(User).join(User.orders),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id",
)
def test_single_prop_2(self):
Order, User = (self.classes.Order, self.classes.User)
sess = fixture_session()
self.assert_compile(
sess.query(User).join(Order.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders JOIN users ON users.id = orders.user_id",
)
def test_single_prop_3(self):
Order, User = (self.classes.Order, self.classes.User)
sess = fixture_session()
oalias1 = aliased(Order)
self.assert_compile(
sess.query(User).join(oalias1.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders AS orders_1 JOIN users "
"ON users.id = orders_1.user_id",
)
def test_single_prop_4(self):
(
Order,
User,
) = (self.classes.Order, self.classes.User)
sess = fixture_session()
oalias1 = aliased(Order)
oalias2 = aliased(Order)
# another nonsensical query. (from [ticket:1537]).
# in this case, the contract of "left to right" is honored
self.assert_compile(
sess.query(User).join(oalias1.user).join(oalias2.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders AS orders_1 JOIN users "
"ON users.id = orders_1.user_id, "
"orders AS orders_2 JOIN users ON users.id = orders_2.user_id",
)
def test_single_prop_6(self):
User = self.classes.User
sess = fixture_session()
ualias = aliased(User)
self.assert_compile(
sess.query(ualias).join(ualias.orders),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 JOIN orders ON users_1.id = orders.user_id",
)
def test_single_prop_9(self):
User = self.classes.User
sess = fixture_session()
subq = (
sess.query(User)
.filter(User.name == "ed")
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
ua = aliased(User, subq)
self.assert_compile(
sess.query(ua).join(ua.orders),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE users.name = :name_1) AS anon_1 JOIN orders "
"ON anon_1.users_id = orders.user_id",
)
def test_single_prop_12(self):
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
oalias1 = aliased(Order)
# test #1 for [ticket:1706]
ualias = aliased(User)
self.assert_compile(
sess.query(ualias)
.join(oalias1, ualias.orders)
.join(Address, ualias.addresses),
"SELECT users_1.id AS users_1_id, users_1.name AS "
"users_1_name FROM users AS users_1 JOIN orders AS orders_1 "
"ON users_1.id = orders_1.user_id JOIN addresses ON users_1.id "
"= addresses.user_id",
)
def test_single_prop_13(self):
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
# test #2 for [ticket:1706]
ualias = aliased(User)
ualias2 = aliased(User)
self.assert_compile(
sess.query(ualias)
.join(Address, ualias.addresses)
.join(ualias2, Address.user)
.join(Order, ualias.orders),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users "
"AS users_1 JOIN addresses ON users_1.id = addresses.user_id "
"JOIN users AS users_2 "
"ON users_2.id = addresses.user_id JOIN orders "
"ON users_1.id = orders.user_id",
)
def test_overlapping_paths_one_legacy(self):
User = self.classes.User
Order = self.classes.Order
sess = fixture_session()
# test overlapping paths. User->orders is used by both joins, but
# rendered once.
self.assert_compile(
sess.query(User)
.join(User.orders)
.join(Order.items)
.join(User.orders)
.join(Order.address),
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"JOIN orders "
"ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id JOIN addresses "
"ON addresses.id = orders.address_id",
)
def test_overlapping_paths_multilevel_legacy(self):
User = self.classes.User
Order = self.classes.Order
Address = self.classes.Address
s = fixture_session()
q = (
s.query(User)
.join(User.orders)
.join(User.addresses)
.join(User.orders)
.join(Order.items)
.join(User.addresses)
.join(Address.dingaling)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN addresses ON users.id = addresses.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"JOIN dingalings ON addresses.id = dingalings.address_id",
)
def test_overlapping_paths_one_modern(self):
User = self.classes.User
Order = self.classes.Order
# test overlapping paths. User->orders is used by both joins, but
# rendered once.
# label style is for comparison to legacy version. 1.4 version
# of select().join() did not behave the same as Query.join()
self.assert_compile(
select(User)
.join(User.orders)
.join(Order.items)
.join(User.orders)
.join(Order.address)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"JOIN orders "
"ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id JOIN addresses "
"ON addresses.id = orders.address_id",
)
def test_overlapping_paths_multilevel_modern(self):
User = self.classes.User
Order = self.classes.Order
Address = self.classes.Address
# label style is for comparison to legacy version. 1.4 version
# of select().join() did not behave the same as Query.join()
q = (
select(User)
.join(User.orders)
.join(User.addresses)
.join(User.orders)
.join(Order.items)
.join(User.addresses)
.join(Address.dingaling)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN addresses ON users.id = addresses.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"JOIN dingalings ON addresses.id = dingalings.address_id",
)
def test_join_nonmapped_column(self):
"""test that the search for a 'left' doesn't trip on non-mapped cols"""
Order, User = self.classes.Order, self.classes.User
sess = fixture_session()
# intentionally join() with a non-existent "left" side
self.assert_compile(
sess.query(User.id, literal_column("foo")).join(Order.user),
"SELECT users.id AS users_id, foo FROM "
"orders JOIN users ON users.id = orders.user_id",
)
def test_backwards_join(self):
User, Address = self.classes.User, self.classes.Address
# a more controversial feature. join from
# User->Address, but the onclause is Address.user.
sess = fixture_session()
eq_(
sess.query(User)
.join(Address.user)
.filter(Address.email_address == "ed@wood.com")
.all(),
[User(id=8, name="ed")],
)
# its actually not so controversial if you view it in terms
# of multiple entities.
eq_(
sess.query(User, Address)
.join(Address.user)
.filter(Address.email_address == "ed@wood.com")
.all(),
[(User(id=8, name="ed"), Address(email_address="ed@wood.com"))],
)
# this was the controversial part. now, raise an error if the feature
# is abused.
# before the error raise was added, this would silently work.....
assert_raises(
sa_exc.InvalidRequestError,
sess.query(User).join(Address, Address.user)._compile_context,
)
# but this one would silently fail
adalias = aliased(Address)
assert_raises(
sa_exc.InvalidRequestError,
sess.query(User).join(adalias, Address.user)._compile_context,
)
def test_multiple_with_aliases(self):
Order, User = self.classes.Order, self.classes.User
sess = fixture_session()
ualias = aliased(User)
oalias1 = aliased(Order)
oalias2 = aliased(Order)
self.assert_compile(
sess.query(ualias)
.join(oalias1, ualias.orders)
.join(oalias2, ualias.orders)
.filter(or_(oalias1.user_id == 9, oalias2.user_id == 7)),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 "
"JOIN orders AS orders_1 ON users_1.id = orders_1.user_id "
"JOIN orders AS orders_2 ON "
"users_1.id = orders_2.user_id "
"WHERE orders_1.user_id = :user_id_1 "
"OR orders_2.user_id = :user_id_2",
use_default_dialect=True,
)
def test_select_from_orm_joins(self):
User, Order = self.classes.User, self.classes.Order
sess = fixture_session()
ualias = aliased(User)
oalias1 = aliased(Order)
oalias2 = aliased(Order)
self.assert_compile(
join(User, oalias2, User.id == oalias2.user_id),
"users JOIN orders AS orders_1 ON users.id = orders_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(User, oalias2, User.id == oalias2.user_id, full=True),
"users FULL OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(User, oalias2, User.id == oalias2.user_id, isouter=True),
"users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(
User,
oalias2,
User.id == oalias2.user_id,
isouter=True,
full=True,
),
"users FULL OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(User, oalias1).join(oalias2),
"users JOIN orders AS orders_1 ON users.id = orders_1.user_id "
"JOIN orders AS orders_2 ON users.id = orders_2.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(User, oalias1).join(oalias2, isouter=True),
"users JOIN orders AS orders_1 ON users.id = orders_1.user_id "
"LEFT OUTER JOIN orders AS orders_2 "
"ON users.id = orders_2.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(User, oalias1).join(oalias2, full=True),
"users JOIN orders AS orders_1 ON users.id = orders_1.user_id "
"FULL OUTER JOIN orders AS orders_2 "
"ON users.id = orders_2.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(User, oalias1).join(oalias2, full=True, isouter=True),
"users JOIN orders AS orders_1 ON users.id = orders_1.user_id "
"FULL OUTER JOIN orders AS orders_2 "
"ON users.id = orders_2.user_id",
use_default_dialect=True,
)
self.assert_compile(
join(ualias, oalias1, ualias.orders),
"users AS users_1 JOIN orders AS orders_1 "
"ON users_1.id = orders_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
sess.query(ualias).select_from(
join(ualias, oalias1, ualias.orders)
),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 "
"JOIN orders AS orders_1 ON users_1.id = orders_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
sess.query(User, ualias).select_from(
join(ualias, oalias1, ualias.orders)
),
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, "
"users_1.name AS users_1_name FROM users AS users_1 "
"JOIN orders AS orders_1 ON users_1.id = orders_1.user_id, users",
use_default_dialect=True,
)
# this fails (and we can't quite fix right now).
if False:
self.assert_compile(
sess.query(User, ualias)
.join(oalias1, ualias.orders)
.join(oalias2, User.id == oalias2.user_id)
.filter(or_(oalias1.user_id == 9, oalias2.user_id == 7)),
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, users_1.name AS "
"users_1_name FROM users JOIN orders AS orders_2 "
"ON users.id = orders_2.user_id, "
"users AS users_1 JOIN orders AS orders_1 "
"ON users_1.id = orders_1.user_id "
"WHERE orders_1.user_id = :user_id_1 "
"OR orders_2.user_id = :user_id_2",
use_default_dialect=True,
)
# this is the same thing using explicit orm.join() (which now offers
# multiple again)
self.assert_compile(
sess.query(User, ualias)
.select_from(
join(ualias, oalias1, ualias.orders),
join(User, oalias2, User.id == oalias2.user_id),
)
.filter(or_(oalias1.user_id == 9, oalias2.user_id == 7)),
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, users_1.name AS "
"users_1_name FROM users AS users_1 JOIN orders AS orders_1 "
"ON users_1.id = orders_1.user_id, "
"users JOIN orders AS orders_2 ON users.id = orders_2.user_id "
"WHERE orders_1.user_id = :user_id_1 "
"OR orders_2.user_id = :user_id_2",
use_default_dialect=True,
)
def test_overlapping_backwards_joins(self):
User, Order = self.classes.User, self.classes.Order
sess = fixture_session()
oalias1 = aliased(Order)
oalias2 = aliased(Order)
# this is invalid SQL - joins from orders_1/orders_2 to User twice.
# but that is what was asked for so they get it !
self.assert_compile(
sess.query(User).join(oalias1.user).join(oalias2.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM orders AS orders_1 "
"JOIN users ON users.id = orders_1.user_id, orders AS orders_2 "
"JOIN users ON users.id = orders_2.user_id",
use_default_dialect=True,
)
def test_replace_multiple_from_clause(self):
"""test adding joins onto multiple FROM clauses"""
User, Order, Address = (
self.classes.User,
self.classes.Order,
self.classes.Address,
)
sess = fixture_session()
self.assert_compile(
sess.query(Address, User)
.join(Address.dingaling)
.join(User.orders)
.join(Order.items),
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address, "
"users.id AS users_id, "
"users.name AS users_name FROM addresses JOIN dingalings "
"ON addresses.id = dingalings.address_id, "
"users JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items "
"ON items.id = order_items_1.item_id",
use_default_dialect=True,
)
def test_invalid_join_entity_from_single_from_clause(self):
Address, Item = (self.classes.Address, self.classes.Item)
sess = fixture_session()
q = sess.query(Address).select_from(Address)
assert_raises_message(
sa.exc.InvalidRequestError,
"Don't know how to join to .*Item.*. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
q.join(Item)._compile_context,
)
def test_invalid_join_entity_from_no_from_clause(self):
Address, Item = (self.classes.Address, self.classes.Item)
sess = fixture_session()
q = sess.query(Address)
assert_raises_message(
sa.exc.InvalidRequestError,
"Don't know how to join to .*Item.*. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
q.join(Item)._compile_context,
)
def test_invalid_join_entity_from_multiple_from_clause(self):
"""test adding joins onto multiple FROM clauses where
we still need to say there's nothing to JOIN from"""
User, Address, Item = (
self.classes.User,
self.classes.Address,
self.classes.Item,
)
sess = fixture_session()
q = sess.query(Address, User).join(Address.dingaling).join(User.orders)
assert_raises_message(
sa.exc.InvalidRequestError,
"Don't know how to join to .*Item.*. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
q.join(Item)._compile_context,
)
def test_join_explicit_left_multiple_from_clause(self):
"""test adding joins onto multiple FROM clauses where
it is ambiguous which FROM should be used when an
ON clause is given"""
User = self.classes.User
sess = fixture_session()
u1 = aliased(User)
# in this case, two FROM objects, one
# is users, the other is u1_alias.
# User.addresses looks for the "users" table and can match
# to both u1_alias and users if the match is not specific enough
q = sess.query(User, u1).select_from(User, u1).join(User.addresses)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM "
"users JOIN addresses ON users.id = addresses.user_id, "
"users AS users_1",
)
q = sess.query(User, u1).select_from(User, u1).join(u1.addresses)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users, "
"users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
)
def test_join_explicit_left_multiple_adapted(self):
"""test adding joins onto multiple FROM clauses where
it is ambiguous which FROM should be used when an
ON clause is given"""
User = self.classes.User
sess = fixture_session()
u1 = aliased(User)
u2 = aliased(User)
# in this case, two FROM objects, one
# is users, the other is u1_alias.
# User.addresses looks for the "users" table and can match
# to both u1_alias and users if the match is not specific enough
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't identify which entity in which to assign the "
"left side of this join.",
sess.query(u1, u2)
.select_from(u1, u2)
.join(User.addresses)
._compile_context,
)
# more specific ON clause
self.assert_compile(
sess.query(u1, u2).select_from(u1, u2).join(u2.addresses),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name, "
"users_2.id AS users_2_id, users_2.name AS users_2_name "
"FROM users AS users_1, "
"users AS users_2 JOIN addresses "
"ON users_2.id = addresses.user_id",
)
def test_join_entity_from_multiple_from_clause(self):
"""test adding joins onto multiple FROM clauses where
it is ambiguous which FROM should be used"""
User, Order, Address, Dingaling = (
self.classes.User,
self.classes.Order,
self.classes.Address,
self.classes.Dingaling,
)
sess = fixture_session()
q = sess.query(Address, User).join(Address.dingaling).join(User.orders)
a1 = aliased(Address)
assert_raises_message(
sa.exc.InvalidRequestError,
"Can't determine which FROM clause to join from, there are "
"multiple FROMS which can join to this entity. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
q.join(a1)._compile_context,
)
# to resolve, add an ON clause
# the user->orders join is chosen to join to a1
self.assert_compile(
q.join(a1, Order.address_id == a1.id),
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address, "
"users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN dingalings "
"ON addresses.id = dingalings.address_id, "
"users JOIN orders "
"ON users.id = orders.user_id "
"JOIN addresses AS addresses_1 "
"ON orders.address_id = addresses_1.id",
)
# the address->dingalings join is chosen to join to a1
self.assert_compile(
q.join(a1, Dingaling.address_id == a1.id),
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address, "
"users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN dingalings "
"ON addresses.id = dingalings.address_id "
"JOIN addresses AS addresses_1 "
"ON dingalings.address_id = addresses_1.id, "
"users JOIN orders ON users.id = orders.user_id",
)
def test_join_entity_from_multiple_entities(self):
"""test adding joins onto multiple FROM clauses where
it is ambiguous which FROM should be used"""
Order, Address, Dingaling = (
self.classes.Order,
self.classes.Address,
self.classes.Dingaling,
)
sess = fixture_session()
q = sess.query(Order, Dingaling)
a1 = aliased(Address)
assert_raises_message(
sa.exc.InvalidRequestError,
"Can't determine which FROM clause to join from, there are "
"multiple FROMS which can join to this entity. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
q.join(a1)._compile_context,
)
# to resolve, add an ON clause
# Order is chosen to join to a1
self.assert_compile(
q.join(a1, Order.address_id == a1.id),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, dingalings.id AS dingalings_id, "
"dingalings.address_id AS dingalings_address_id, "
"dingalings.data AS dingalings_data "
"FROM orders "
"JOIN addresses AS addresses_1 "
"ON orders.address_id = addresses_1.id, dingalings",
)
# Dingaling is chosen to join to a1
self.assert_compile(
q.join(a1, Dingaling.address_id == a1.id),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, dingalings.id AS dingalings_id, "
"dingalings.address_id AS dingalings_address_id, "
"dingalings.data AS dingalings_data "
"FROM dingalings JOIN addresses AS addresses_1 "
"ON dingalings.address_id = addresses_1.id, orders",
)
def test_clause_present_in_froms_twice_w_onclause(self):
# test [ticket:4584]
Order, Address, User = (
self.classes.Order,
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
a1 = aliased(Address)
q = sess.query(Order).select_from(Order, a1, User)
assert_raises_message(
sa.exc.InvalidRequestError,
"Can't determine which FROM clause to join from, there are "
"multiple FROMS which can join to this entity. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
q.outerjoin(a1)._compile_context,
)
# the condition which occurs here is: Query._from_obj contains both
# "a1" by itself as well as a join that "a1" is part of.
# find_left_clause_to_join_from() needs to include removal of froms
# that are in the _hide_froms of joins the same way
# Selectable._get_display_froms does.
q = sess.query(Order).select_from(Order, a1, User)
q = q.outerjoin(a1, a1.id == Order.address_id)
q = q.outerjoin(User, a1.user_id == User.id)
self.assert_compile(
q,
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders.address_id "
"LEFT OUTER JOIN users ON addresses_1.user_id = users.id",
)
def test_clause_present_in_froms_twice_wo_onclause(self):
# test [ticket:4584]
Address, Dingaling, User = (
self.classes.Address,
self.classes.Dingaling,
self.classes.User,
)
sess = fixture_session()
a1 = aliased(Address)
# the condition which occurs here is: Query._from_obj contains both
# "a1" by itself as well as a join that "a1" is part of.
# find_left_clause_to_join_from() needs to include removal of froms
# that are in the _hide_froms of joins the same way
# Selectable._get_display_froms does.
q = sess.query(User).select_from(Dingaling, a1, User)
q = q.outerjoin(a1, User.id == a1.user_id)
q = q.outerjoin(Dingaling)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"LEFT OUTER JOIN dingalings "
"ON addresses_1.id = dingalings.address_id",
)
def test_pure_expression(self):
# this was actually false-passing due to the assertions
# fixture not following the regular codepath for Query
addresses, users = self.tables.addresses, self.tables.users
sess = fixture_session()
self.assert_compile(
sess.query(users).join(addresses),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id",
)
def test_no_onclause(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
eq_(
sess.query(User)
.select_from(join(User, Order).join(Item, Order.items))
.filter(Item.description == "item 4")
.all(),
[User(name="jack")],
)
eq_(
sess.query(User.name)
.select_from(join(User, Order).join(Item, Order.items))
.filter(Item.description == "item 4")
.all(),
[("jack",)],
)
eq_(
sess.query(User)
.join(Order)
.join(Item, Order.items)
.filter(Item.description == "item 4")
.all(),
[User(name="jack")],
)
def test_clause_onclause(self):
Item, Order, order_items, User = (
self.classes.Item,
self.classes.Order,
self.tables.order_items,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User)
.join(Order, User.id == Order.user_id)
.join(order_items, Order.id == order_items.c.order_id)
.join(Item, order_items.c.item_id == Item.id)
.filter(Item.description == "item 4")
.all(),
[User(name="jack")],
)
eq_(
sess.query(User.name)
.join(Order, User.id == Order.user_id)
.join(order_items, Order.id == order_items.c.order_id)
.join(Item, order_items.c.item_id == Item.id)
.filter(Item.description == "item 4")
.all(),
[("jack",)],
)
ualias = aliased(User)
eq_(
sess.query(ualias.name)
.join(Order, ualias.id == Order.user_id)
.join(order_items, Order.id == order_items.c.order_id)
.join(Item, order_items.c.item_id == Item.id)
.filter(Item.description == "item 4")
.all(),
[("jack",)],
)
# explicit onclause with from_self(), means
# the onclause must be aliased against the query's custom
# FROM object
subq = sess.query(User).order_by(User.id).offset(2).subquery()
ua = aliased(User, subq)
eq_(
sess.query(ua).join(Order, ua.id == Order.user_id).all(),
[User(name="fred")],
)
def test_str_not_accepted_orm_join(self):
User, Address = self.classes.User, self.classes.Address
with expect_raises_message(
sa.exc.ArgumentError,
"ON clause, typically a SQL expression or ORM "
"relationship attribute expected, got 'addresses'.",
):
outerjoin(User, Address, "addresses")
def test_aliased_classes(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
(address1, address2, address3, address4, address5) = sess.query(
Address
).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
q = sess.query(User)
AdAlias = aliased(Address)
q = q.add_entity(AdAlias).select_from(outerjoin(User, AdAlias))
result = q.order_by(User.id, AdAlias.id).all()
eq_(result, expected)
sess.expunge_all()
q = sess.query(User).add_entity(AdAlias)
result = (
q.select_from(outerjoin(User, AdAlias))
.filter(AdAlias.email_address == "ed@bettyboop.com")
.all()
)
eq_(result, [(user8, address3)])
result = (
q.select_from(outerjoin(User, AdAlias, User.addresses))
.filter(AdAlias.email_address == "ed@bettyboop.com")
.all()
)
eq_(result, [(user8, address3)])
result = (
q.select_from(outerjoin(User, AdAlias, User.id == AdAlias.user_id))
.filter(AdAlias.email_address == "ed@bettyboop.com")
.all()
)
eq_(result, [(user8, address3)])
# this is the first test where we are joining "backwards" - from
# AdAlias to User even though
# the query is against User
q = sess.query(User, AdAlias)
result = (
q.join(AdAlias.user)
.filter(User.name == "ed")
.order_by(User.id, AdAlias.id)
)
eq_(
result.all(),
[(user8, address2), (user8, address3), (user8, address4)],
)
q = (
sess.query(User, AdAlias)
.select_from(join(AdAlias, User, AdAlias.user))
.filter(User.name == "ed")
)
eq_(
result.all(),
[(user8, address2), (user8, address3), (user8, address4)],
)
def test_expression_onclauses(self):
Order, User = self.classes.Order, self.classes.User
sess = fixture_session()
subq = sess.query(User).subquery()
self.assert_compile(
sess.query(User).join(subq, User.name == subq.c.name),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN (SELECT users.id AS id, users.name "
"AS name FROM users) AS anon_1 ON users.name = anon_1.name",
use_default_dialect=True,
)
subq = sess.query(Order).subquery()
self.assert_compile(
sess.query(User).join(subq, User.id == subq.c.user_id),
"SELECT users.id AS users_id, users.name AS users_name FROM "
"users JOIN (SELECT orders.id AS id, orders.user_id AS user_id, "
"orders.address_id AS address_id, orders.description AS "
"description, orders.isopen AS isopen FROM orders) AS "
"anon_1 ON users.id = anon_1.user_id",
use_default_dialect=True,
)
self.assert_compile(
sess.query(User).join(Order, User.id == Order.user_id),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN orders ON users.id = orders.user_id",
use_default_dialect=True,
)
def test_aliased_classes_m2m(self):
Item, Order = self.classes.Item, self.classes.Order
sess = fixture_session()
(order1, order2, order3, order4, order5) = sess.query(Order).all()
(item1, item2, item3, item4, item5) = sess.query(Item).all()
expected = [
(order1, item1),
(order1, item2),
(order1, item3),
(order2, item1),
(order2, item2),
(order2, item3),
(order3, item3),
(order3, item4),
(order3, item5),
(order4, item1),
(order4, item5),
(order5, item5),
]
q = sess.query(Order)
q = (
q.add_entity(Item)
.select_from(join(Order, Item, Order.items))
.order_by(Order.id, Item.id)
)
result = q.all()
eq_(result, expected)
IAlias = aliased(Item)
q = (
sess.query(Order, IAlias)
.select_from(join(Order, IAlias, Order.items))
.filter(IAlias.description == "item 3")
)
result = q.all()
eq_(result, [(order1, item3), (order2, item3), (order3, item3)])
def test_joins_from_adapted_entities(self):
User = self.classes.User
# test for #1853
session = fixture_session()
first = session.query(User)
second = session.query(User)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.outerjoin(*join)
self.assert_compile(
joined,
"SELECT anon_1.users_id AS "
"anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name FROM (SELECT users.id "
"AS users_id, users.name AS users_name "
"FROM users UNION SELECT users.id AS "
"users_id, users.name AS users_name FROM "
"users) AS anon_1 LEFT OUTER JOIN (SELECT "
"users.id AS id FROM users) AS anon_2 ON "
"anon_2.id = anon_1.users_id",
use_default_dialect=True,
)
first = session.query(User.id)
second = session.query(User.id)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.outerjoin(*join)
self.assert_compile(
joined,
"SELECT anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS users_id FROM "
"users UNION SELECT users.id AS users_id "
"FROM users) AS anon_1 LEFT OUTER JOIN "
"(SELECT users.id AS id FROM users) AS "
"anon_2 ON anon_2.id = anon_1.users_id",
use_default_dialect=True,
)
def test_joins_from_adapted_entities_isouter(self):
User = self.classes.User
# test for #1853
session = fixture_session()
first = session.query(User)
second = session.query(User)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.join(*join, isouter=True)
self.assert_compile(
joined,
"SELECT anon_1.users_id AS "
"anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name FROM (SELECT users.id "
"AS users_id, users.name AS users_name "
"FROM users UNION SELECT users.id AS "
"users_id, users.name AS users_name FROM "
"users) AS anon_1 LEFT OUTER JOIN (SELECT "
"users.id AS id FROM users) AS anon_2 ON "
"anon_2.id = anon_1.users_id",
use_default_dialect=True,
)
first = session.query(User.id)
second = session.query(User.id)
unioned = first.union(second)
subquery = session.query(User.id).subquery()
join = subquery, subquery.c.id == User.id
joined = unioned.join(*join, isouter=True)
self.assert_compile(
joined,
"SELECT anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS users_id FROM "
"users UNION SELECT users.id AS users_id "
"FROM users) AS anon_1 LEFT OUTER JOIN "
"(SELECT users.id AS id FROM users) AS "
"anon_2 ON anon_2.id = anon_1.users_id",
use_default_dialect=True,
)
def test_overlap_with_aliases(self):
orders, User, users = (
self.tables.orders,
self.classes.User,
self.tables.users,
)
Order = self.classes.Order
oalias = orders.alias("oalias")
result = (
fixture_session()
.query(User)
.select_from(users.join(oalias))
.filter(
oalias.c.description.in_(["order 1", "order 2", "order 3"])
)
.join(User.orders)
.join(Order.items)
.order_by(User.id)
.all()
)
assert [User(id=7, name="jack"), User(id=9, name="fred")] == result
result = (
fixture_session()
.query(User)
.select_from(users.join(oalias))
.filter(
oalias.c.description.in_(["order 1", "order 2", "order 3"])
)
.join(User.orders)
.join(Order.items)
.filter_by(id=4)
.all()
)
assert [User(id=7, name="jack")] == result
def test_aliased_order_by(self):
User = self.classes.User
sess = fixture_session()
ualias = aliased(User)
eq_(
sess.query(User, ualias)
.filter(User.id > ualias.id)
.order_by(desc(ualias.id), User.name)
.all(),
[
(User(id=10, name="chuck"), User(id=9, name="fred")),
(User(id=10, name="chuck"), User(id=8, name="ed")),
(User(id=9, name="fred"), User(id=8, name="ed")),
(User(id=10, name="chuck"), User(id=7, name="jack")),
(User(id=8, name="ed"), User(id=7, name="jack")),
(User(id=9, name="fred"), User(id=7, name="jack")),
],
)
def test_plain_table(self):
addresses, User = self.tables.addresses, self.classes.User
sess = fixture_session()
eq_(
sess.query(User.name)
.join(addresses, User.id == addresses.c.user_id)
.order_by(User.id)
.all(),
[("jack",), ("ed",), ("ed",), ("ed",), ("fred",)],
)
def test_no_joinpoint_expr(self):
User, users = self.classes.User, self.tables.users
sess = fixture_session()
# these are consistent regardless of
# select_from() being present.
assert_raises_message(
sa_exc.InvalidRequestError,
"Don't know how to join to .*User.*. "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
sess.query(users.c.id).join(User)._compile_context,
)
assert_raises_message(
sa_exc.InvalidRequestError,
"Don't know how to join to .*User.* "
r"Please use the .select_from\(\) "
"method to establish an explicit left side, as well as",
sess.query(users.c.id)
.select_from(users)
.join(User)
._compile_context,
)
def test_on_clause_no_right_side_one(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
# coercions does not catch this due to the
# legacy=True flag for JoinTargetRole
with expect_raises_message(
sa_exc.ArgumentError,
"Join target, typically a FROM expression, or ORM relationship "
"attribute expected, got",
):
sess.query(User).join(User.id == Address.user_id)
def test_on_clause_no_right_side_one_future(self):
User = self.classes.User
Address = self.classes.Address
# future mode can raise a more specific error at the coercions level
assert_raises_message(
sa_exc.ArgumentError,
"Join target, typically a FROM expression, "
"or ORM relationship attribute expected",
select(User).join,
User.id == Address.user_id,
)
def test_no_legacy_multi_join_two_element(self):
User = self.classes.User
Order = self.classes.Order
sess = fixture_session()
with expect_raises_message(
sa_exc.InvalidRequestError,
"No 'on clause' argument may be passed when joining to a "
"relationship path as a target",
):
sess.query(User).join(User.orders, Order.items)._compile_context()
def test_no_modern_multi_join_two_element(self):
User = self.classes.User
Order = self.classes.Order
sess = fixture_session()
with expect_raises_message(
sa_exc.InvalidRequestError,
"No 'on clause' argument may be passed when joining to a "
"relationship path as a target",
):
sess.execute(select(User).join(User.orders, Order.items))
def test_kw_only_blocks_legacy_multi_join(self):
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
sess = fixture_session()
with expect_raises_message(
TypeError,
r".*join\(\) takes from 2 to 3 positional arguments but "
"4 were given",
):
sess.query(User).join(User.orders, Order.items, Item.keywords)
def test_on_clause_no_right_side_two(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
assert_raises_message(
sa_exc.ArgumentError,
"Join target Address.user_id does not refer to a mapped entity",
sess.query(User).join(Address.user_id)._compile_context,
)
def test_on_clause_no_right_side_two_future(self):
User = self.classes.User
Address = self.classes.Address
stmt = select(User).join(Address.user_id)
assert_raises_message(
sa_exc.ArgumentError,
"Join target Address.user_id does not refer to a mapped entity",
stmt.compile,
)
def test_no_strings_for_single_onclause_legacy_query(self):
User = self.classes.User
sess = fixture_session()
with expect_raises_message(
sa_exc.ArgumentError,
"Join target, typically a FROM expression, or ORM relationship "
"attribute expected, got 'addresses'",
):
sess.query(User).join("addresses")
def test_no_strings_for_single_onclause_newstyle(self):
User = self.classes.User
with expect_raises_message(
sa_exc.ArgumentError,
"Join target, typically a FROM expression, or ORM relationship "
"attribute expected, got 'addresses'",
):
select(User).join("addresses")
def test_no_strings_for_dual_onclause_legacy_query(self):
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
with expect_raises_message(
sa_exc.ArgumentError,
"ON clause, typically a SQL expression or ORM relationship "
"attribute expected, got 'addresses'",
):
sess.query(User).join(Address, "addresses")
def test_no_strings_for_dual_onclause_newstyle(self):
User = self.classes.User
Address = self.classes.Address
with expect_raises_message(
sa_exc.ArgumentError,
"ON clause, typically a SQL expression or ORM relationship "
"attribute expected, got 'addresses'.",
):
select(User).join(Address, "addresses")
def test_select_from(self):
"""Test that the left edge of the join can be set reliably with
select_from()."""
Item, Order, User = (
self.classes.Item,
self.classes.Order,
self.classes.User,
)
sess = fixture_session()
self.assert_compile(
sess.query(Item.id)
.select_from(User)
.join(User.orders)
.join(Order.items),
"SELECT items.id AS items_id FROM users JOIN orders ON "
"users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items ON items.id = "
"order_items_1.item_id",
use_default_dialect=True,
)
# here, the join really wants to add a second FROM clause
# for "Item". but select_from disallows that
self.assert_compile(
sess.query(Item.id)
.select_from(User)
.join(Item, User.id == Item.id),
"SELECT items.id AS items_id FROM users JOIN items "
"ON users.id = items.id",
use_default_dialect=True,
)
|
JoinTest
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/spaces/multi_discrete.py
|
{
"start": 389,
"end": 11922
}
|
class ____(Space[NDArray[np.integer]]):
"""This represents the cartesian product of arbitrary :class:`Discrete` spaces.
It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space.
Note:
Some environment wrappers assume a value of 0 always represents the NOOP action.
e.g. Nintendo Game Controller - Can be conceptualized as 3 discrete action spaces:
1. Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2. Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3. Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
It can be initialized as ``MultiDiscrete([ 5, 2, 2 ])`` such that a sample might be ``array([3, 1, 0])``.
Although this feature is rarely used, :class:`MultiDiscrete` spaces may also have several axes
if ``nvec`` has several axes:
Example:
>>> from gymnasium.spaces import MultiDiscrete
>>> import numpy as np
>>> observation_space = MultiDiscrete(np.array([[1, 2], [3, 4]]), seed=42)
>>> observation_space.sample()
array([[0, 0],
[2, 2]])
"""
def __init__(
self,
nvec: NDArray[np.integer[Any]] | list[int],
dtype: str | type[np.integer[Any]] = np.int64,
seed: int | np.random.Generator | None = None,
start: NDArray[np.integer[Any]] | list[int] | None = None,
):
"""Constructor of :class:`MultiDiscrete` space.
The argument ``nvec`` will determine the number of values each categorical variable can take. If
``start`` is provided, it will define the minimal values corresponding to each categorical variable.
Args:
nvec: vector of counts of each categorical variable. This will usually be a list of integers. However,
you may also pass a more complicated numpy array if you'd like the space to have several axes.
dtype: This should be some kind of integer type.
seed: Optionally, you can use this argument to seed the RNG that is used to sample from the space.
start: Optionally, the starting value the element of each class will take (defaults to 0).
"""
# determine dtype
if dtype is None:
raise ValueError(
"MultiDiscrete dtype must be explicitly provided, cannot be None."
)
self.dtype = np.dtype(dtype)
# * check that dtype is an accepted dtype
if not (np.issubdtype(self.dtype, np.integer)):
raise ValueError(
f"Invalid MultiDiscrete dtype ({self.dtype}), must be an integer dtype"
)
self.nvec = np.array(nvec, dtype=dtype, copy=True)
if start is not None:
self.start = np.array(start, dtype=dtype, copy=True)
else:
self.start = np.zeros(self.nvec.shape, dtype=dtype)
assert (
self.start.shape == self.nvec.shape
), "start and nvec (counts) should have the same shape"
assert (self.nvec > 0).all(), "nvec (counts) have to be positive"
super().__init__(self.nvec.shape, self.dtype, seed)
@property
def shape(self) -> tuple[int, ...]:
"""Has stricter type than :class:`gym.Space` - never None."""
return self._shape # type: ignore
@property
def is_np_flattenable(self):
"""Checks whether this space can be flattened to a :class:`spaces.Box`."""
return True
def sample(
self,
mask: tuple[MaskNDArray, ...] | None = None,
probability: tuple[MaskNDArray, ...] | None = None,
) -> NDArray[np.integer[Any]]:
"""Generates a single random sample from this space.
Args:
mask: An optional mask for multi-discrete, expects tuples with a ``np.ndarray`` mask in the position of each
action with shape ``(n,)`` where ``n`` is the number of actions and ``dtype=np.int8``.
Only ``mask values == 1`` are possible to sample unless all mask values for an action are ``0`` then the default action ``self.start`` (the smallest element) is sampled.
probability: An optional probability mask for multi-discrete, expects tuples with a ``np.ndarray`` probability mask in the position of each
action with shape ``(n,)`` where ``n`` is the number of actions and ``dtype=np.float64``.
Only probability mask values within ``[0,1]`` are possible to sample as long as the sum of all values is ``1``.
Returns:
An ``np.ndarray`` of :meth:`Space.shape`
"""
if mask is not None and probability is not None:
raise ValueError(
f"Only one of `mask` or `probability` can be provided, actual values: mask={mask}, probability={probability}"
)
elif mask is not None:
return np.array(
self._apply_mask(mask, self.nvec, self.start, "mask"),
dtype=self.dtype,
)
elif probability is not None:
return np.array(
self._apply_mask(probability, self.nvec, self.start, "probability"),
dtype=self.dtype,
)
else:
return (self.np_random.random(self.nvec.shape) * self.nvec).astype(
self.dtype
) + self.start
def _apply_mask(
self,
sub_mask: MaskNDArray | tuple[MaskNDArray, ...],
sub_nvec: MaskNDArray | np.integer[Any],
sub_start: MaskNDArray | np.integer[Any],
mask_type: str,
) -> int | list[Any]:
"""Returns a sample using the provided mask or probability mask."""
if isinstance(sub_nvec, np.ndarray):
assert isinstance(
sub_mask, tuple
), f"Expects the mask to be a tuple for sub_nvec ({sub_nvec}), actual type: {type(sub_mask)}"
assert len(sub_mask) == len(
sub_nvec
), f"Expects the mask length to be equal to the number of actions, mask length: {len(sub_mask)}, nvec length: {len(sub_nvec)}"
return [
self._apply_mask(new_mask, new_nvec, new_start, mask_type)
for new_mask, new_nvec, new_start in zip(sub_mask, sub_nvec, sub_start)
]
assert np.issubdtype(
type(sub_nvec), np.integer
), f"Expects the sub_nvec to be an action, actually: {sub_nvec}, {type(sub_nvec)}"
assert isinstance(
sub_mask, np.ndarray
), f"Expects the sub mask to be np.ndarray, actual type: {type(sub_mask)}"
assert (
len(sub_mask) == sub_nvec
), f"Expects the mask length to be equal to the number of actions, mask length: {len(sub_mask)}, action: {sub_nvec}"
if mask_type == "mask":
assert (
sub_mask.dtype == np.int8
), f"Expects the mask dtype to be np.int8, actual dtype: {sub_mask.dtype}"
valid_action_mask = sub_mask == 1
assert np.all(
np.logical_or(sub_mask == 0, valid_action_mask)
), f"Expects all masks values to 0 or 1, actual values: {sub_mask}"
if np.any(valid_action_mask):
return self.np_random.choice(np.where(valid_action_mask)[0]) + sub_start
else:
return sub_start
elif mask_type == "probability":
assert (
sub_mask.dtype == np.float64
), f"Expects the mask dtype to be np.float64, actual dtype: {sub_mask.dtype}"
valid_action_mask = np.logical_and(sub_mask > 0, sub_mask <= 1)
assert np.all(
np.logical_or(sub_mask == 0, valid_action_mask)
), f"Expects all masks values to be between 0 and 1, actual values: {sub_mask}"
assert np.isclose(
np.sum(sub_mask), 1
), f"Expects the sum of all mask values to be 1, actual sum: {np.sum(sub_mask)}"
normalized_sub_mask = sub_mask / np.sum(sub_mask)
return (
self.np_random.choice(
np.where(valid_action_mask)[0],
p=normalized_sub_mask[valid_action_mask],
)
+ sub_start
)
raise ValueError(f"Unsupported mask type: {mask_type}")
def contains(self, x: Any) -> bool:
"""Return boolean specifying if x is a valid member of this space."""
if isinstance(x, Sequence):
x = np.array(x) # Promote list to array for contains check
# if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
# is within correct bounds for space dtype (even though x does not have to be unsigned)
return bool(
isinstance(x, np.ndarray)
and x.shape == self.shape
and np.can_cast(x.dtype, self.dtype)
and np.all(self.start <= x)
and np.all(x - self.start < self.nvec)
)
def to_jsonable(
self, sample_n: Sequence[NDArray[np.integer[Any]]]
) -> list[Sequence[int]]:
"""Convert a batch of samples from this space to a JSONable data type."""
return [sample.tolist() for sample in sample_n]
def from_jsonable(
self, sample_n: list[Sequence[int]]
) -> list[NDArray[np.integer[Any]]]:
"""Convert a JSONable data type to a batch of samples from this space."""
return [np.array(sample, dtype=self.dtype) for sample in sample_n]
def __repr__(self):
"""Gives a string representation of this space."""
if np.any(self.start != 0):
return f"MultiDiscrete({self.nvec}, start={self.start})"
return f"MultiDiscrete({self.nvec})"
def __getitem__(self, index: int | tuple[int, ...]):
"""Extract a subspace from this ``MultiDiscrete`` space."""
nvec = self.nvec[index]
start = self.start[index]
if nvec.ndim == 0:
subspace = Discrete(nvec, start=start, dtype=self.dtype)
else:
subspace = MultiDiscrete(nvec, start=start, dtype=self.dtype)
# you don't need to deepcopy as np random generator call replaces the state not the data
subspace.np_random.bit_generator.state = self.np_random.bit_generator.state
return subspace
def __len__(self):
"""Gives the ``len`` of samples from this space."""
if self.nvec.ndim >= 2:
gym.logger.warn(
"Getting the length of a multi-dimensional MultiDiscrete space."
)
return len(self.nvec)
def __eq__(self, other: Any) -> bool:
"""Check whether ``other`` is equivalent to this instance."""
return bool(
isinstance(other, MultiDiscrete)
and self.dtype == other.dtype
and self.shape == other.shape
and np.all(self.nvec == other.nvec)
and np.all(self.start == other.start)
)
def __setstate__(self, state: Iterable[tuple[str, Any]] | Mapping[str, Any]):
"""Used when loading a pickled space.
This method has to be implemented explicitly to allow for loading of legacy states.
Args:
state: The new state
"""
state = dict(state)
if "start" not in state:
state["start"] = np.zeros(state["_shape"], dtype=state["dtype"])
super().__setstate__(state)
|
MultiDiscrete
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/descriptor1.py
|
{
"start": 1735,
"end": 2041
}
|
class ____:
@overload
def __get__(self, instance: None, owner: Any) -> int: ...
@overload
def __get__(self, instance: Any, owner: Any) -> str: ...
def __get__(self, instance: Any, owner: Any) -> int | str: ...
def __set__(self, owner: Any, value: int | None) -> None: ...
|
Descriptor4
|
python
|
rushter__MLAlgorithms
|
mla/ensemble/random_forest.py
|
{
"start": 169,
"end": 1687
}
|
class ____(BaseEstimator):
def __init__(
self,
n_estimators=10,
max_features=None,
min_samples_split=10,
max_depth=None,
criterion=None,
):
"""Base class for RandomForest.
Parameters
----------
n_estimators : int
The number of decision tree.
max_features : int
The number of features to consider when looking for the best split.
min_samples_split : int
The minimum number of samples required to split an internal node.
max_depth : int
Maximum depth of the tree.
criterion : str
The function to measure the quality of a split.
"""
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.max_features = max_features
self.n_estimators = n_estimators
self.trees = []
def fit(self, X, y):
self._setup_input(X, y)
if self.max_features is None:
self.max_features = int(np.sqrt(X.shape[1]))
else:
assert X.shape[1] > self.max_features
self._train()
def _train(self):
for tree in self.trees:
tree.train(
self.X,
self.y,
max_features=self.max_features,
min_samples_split=self.min_samples_split,
max_depth=self.max_depth,
)
def _predict(self, X=None):
raise NotImplementedError()
|
RandomForest
|
python
|
chardet__chardet
|
chardet/resultdict.py
|
{
"start": 41,
"end": 148
}
|
class ____(TypedDict):
encoding: Optional[str]
confidence: float
language: Optional[str]
|
ResultDict
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 25419,
"end": 27563
}
|
class ____(TestCase):
def test_basic(self):
iterable = [1, 2, 3, 4, 5]
for n, expected in (
(6, [(1, 2, 3, 4, 5, None)]),
(5, [(1, 2, 3, 4, 5)]),
(4, [(1, 2, 3, 4), (2, 3, 4, 5)]),
(3, [(1, 2, 3), (2, 3, 4), (3, 4, 5)]),
(2, [(1, 2), (2, 3), (3, 4), (4, 5)]),
(1, [(1,), (2,), (3,), (4,), (5,)]),
(0, [()]),
):
with self.subTest(n=n):
actual = list(mi.windowed(iterable, n))
self.assertEqual(actual, expected)
def test_fillvalue(self):
actual = list(mi.windowed([1, 2, 3, 4, 5], 6, fillvalue='!'))
expected = [(1, 2, 3, 4, 5, '!')]
self.assertEqual(actual, expected)
def test_step(self):
iterable = [1, 2, 3, 4, 5, 6, 7]
for n, step, expected in [
(3, 2, [(1, 2, 3), (3, 4, 5), (5, 6, 7)]), # n > step
(3, 3, [(1, 2, 3), (4, 5, 6), (7, None, None)]), # n == step
(3, 4, [(1, 2, 3), (5, 6, 7)]), # lines up nicely
(3, 5, [(1, 2, 3), (6, 7, None)]), # off by one
(3, 6, [(1, 2, 3), (7, None, None)]), # off by two
(3, 7, [(1, 2, 3)]), # step past the end
(7, 8, [(1, 2, 3, 4, 5, 6, 7)]), # step > len(iterable)
]:
with self.subTest(n=n, step=step):
actual = list(mi.windowed(iterable, n, step=step))
self.assertEqual(actual, expected)
def test_invalid_step(self):
# Step must be greater than or equal to 1
with self.assertRaises(ValueError):
list(mi.windowed([1, 2, 3, 4, 5], 3, step=0))
def test_fillvalue_step(self):
actual = list(mi.windowed([1, 2, 3, 4, 5], 3, fillvalue='!', step=3))
expected = [(1, 2, 3), (4, 5, '!')]
self.assertEqual(actual, expected)
def test_negative(self):
with self.assertRaises(ValueError):
list(mi.windowed([1, 2, 3, 4, 5], -1))
def test_empty_seq(self):
actual = list(mi.windowed([], 3))
expected = []
self.assertEqual(actual, expected)
|
WindowedTests
|
python
|
apache__airflow
|
shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py
|
{
"start": 2943,
"end": 15055
}
|
class ____:
def test_message(self, logger, caplog):
logger.info("XpasswordY")
assert caplog.text == "INFO X***Y\n"
def test_args(self, logger, caplog):
logger.info("Cannot connect to %s", "user:password")
assert caplog.text == "INFO Cannot connect to user:***\n"
def test_extra(self, logger, caplog):
with patch.object(
logger.handlers[0], "formatter", ShortExcFormatter("%(levelname)s %(message)s %(conn)s")
):
logger.info("Cannot connect", extra={"conn": "user:password"})
assert caplog.text == "INFO Cannot connect user:***\n"
def test_exception(self, logger, caplog):
try:
conn = "user:password"
raise RuntimeError("Cannot connect to " + conn)
except RuntimeError:
logger.exception("Err")
line = lineno() - 4
assert caplog.text == textwrap.dedent(
f"""\
ERROR Err
Traceback (most recent call last):
File ".../test_secrets_masker.py", line {line}, in test_exception
raise RuntimeError("Cannot connect to " + conn)
RuntimeError: Cannot connect to user:***
"""
)
def test_exception_not_raised(self, logger, caplog):
"""
Test that when ``logger.exception`` is called when there is no current exception we still log.
(This is a "bug" in user code, but we shouldn't die because of it!)
"""
logger.exception("Err")
assert caplog.text == textwrap.dedent(
"""\
ERROR Err
NoneType: None
"""
)
@pytest.mark.xfail(reason="Cannot filter secrets in traceback source")
def test_exc_tb(self, logger, caplog):
"""
Show it is not possible to filter secrets in the source.
It is not possible to (regularly/reliably) filter out secrets that
appear directly in the source code. This is because the formatting of
exc_info is not done in the filter, it is done after the filter is
called, and fixing this "properly" is hard/impossible.
(It would likely need to construct a custom traceback that changed the
source. I have no idea if that is even possible)
This test illustrates that, but ix marked xfail in case someone wants to
fix this later.
"""
try:
raise RuntimeError("Cannot connect to user:password")
except RuntimeError:
logger.exception("Err")
line = lineno() - 4
assert caplog.text == textwrap.dedent(
f"""\
ERROR Err
Traceback (most recent call last):
File ".../test_secrets_masker.py", line {line}, in test_exc_tb
raise RuntimeError("Cannot connect to user:***)
RuntimeError: Cannot connect to user:***
"""
)
def test_masking_in_implicit_context_exceptions(self, logger, caplog):
"""
Show that redacting password works in context exceptions.
"""
try:
try:
try:
raise RuntimeError(f"Cannot connect to user:{PASSWORD}")
except RuntimeError as ex1:
raise RuntimeError(f"Exception: {ex1}")
except RuntimeError as ex2:
raise RuntimeError(f"Exception: {ex2}")
except RuntimeError:
logger.exception("Err")
assert "user:password" not in caplog.text
assert caplog.text.count("user:***") >= 2
def test_masking_in_explicit_context_exceptions(self, logger, caplog):
"""
Show that redacting password works in context exceptions.
"""
exception = None
try:
raise RuntimeError(f"Cannot connect to user:{PASSWORD}")
except RuntimeError as ex:
exception = ex
try:
raise RuntimeError(f"Exception: {exception}") from exception
except RuntimeError:
logger.exception("Err")
line = lineno() - 8
assert caplog.text == textwrap.dedent(
f"""\
ERROR Err
Traceback (most recent call last):
File ".../test_secrets_masker.py", line {line}, in test_masking_in_explicit_context_exceptions
raise RuntimeError(f"Cannot connect to user:{{PASSWORD}}")
RuntimeError: Cannot connect to user:***
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File ".../test_secrets_masker.py", line {line + 4}, in test_masking_in_explicit_context_exceptions
raise RuntimeError(f"Exception: {{exception}}") from exception
RuntimeError: Exception: Cannot connect to user:***
"""
)
@pytest.mark.parametrize(
("name", "value", "expected_mask"),
[
(None, "secret", {"secret"}),
("apikey", "secret", {"secret"}),
# the value for "apikey", and "password" should end up masked
(None, {"apikey": "secret", "other": {"val": "innocent", "password": "foo"}}, {"secret"}),
(None, ["secret", "other"], {"secret", "other"}),
# When the "sensitive value" is a dict, don't mask anything
# (Or should this be mask _everything_ under it ?
("api_key", {"other": "innoent"}, set()),
(None, {"password": ""}, set()),
(None, "", set()),
],
)
def test_mask_secret(self, name, value, expected_mask):
filt = SecretsMasker()
configure_secrets_masker_for_test(filt)
filt.add_mask(value, name)
assert filt.patterns == expected_mask
@pytest.mark.parametrize(
("patterns", "name", "value", "expected"),
[
({"secret"}, None, "secret", "***"),
(
{"secret", "foo"},
None,
{"apikey": "secret", "other": {"val": "innocent", "password": "foo"}},
{"apikey": "***", "other": {"val": "innocent", "password": "***"}},
),
({"secret", "other"}, None, ["secret", "other"], ["***", "***"]),
# We don't mask dict _keys_.
({"secret", "other"}, None, {"data": {"secret": "secret"}}, {"data": {"secret": "***"}}),
# Non string dict keys
({"secret", "other"}, None, {1: {"secret": "secret"}}, {1: {"secret": "***"}}),
(
# Since this is a sensitive name, all the values should be redacted!
{"secret"},
"api_key",
{"other": "innoent", "nested": ["x", "y"]},
{"other": "***", "nested": ["***", "***"]},
),
(
# Test that masking still works based on name even when no patterns given
set(),
"env",
{"api_key": "masked based on key name", "other": "foo"},
{"api_key": "***", "other": "foo"},
),
],
)
def test_redact(self, patterns, name, value, expected):
filt = SecretsMasker()
configure_secrets_masker_for_test(filt)
for val in patterns:
filt.add_mask(val)
assert filt.redact(value, name) == expected
@pytest.mark.parametrize(
("name", "value", "expected"),
[
("api_key", "pass", "*️⃣*️⃣*️⃣"),
("api_key", ("pass",), ("*️⃣*️⃣*️⃣",)),
(None, {"data": {"secret": "secret"}}, {"data": {"secret": "*️⃣*️⃣*️⃣"}}),
# Non string dict keys
(None, {1: {"secret": "secret"}}, {1: {"secret": "*️⃣*️⃣*️⃣"}}),
(
"api_key",
{"other": "innoent", "nested": ["x", "y"]},
{"other": "*️⃣*️⃣*️⃣", "nested": ["*️⃣*️⃣*️⃣", "*️⃣*️⃣*️⃣"]},
),
],
)
def test_redact_replacement(self, name, value, expected):
filt = SecretsMasker()
configure_secrets_masker_for_test(filt)
assert filt.redact(value, name, replacement="*️⃣*️⃣*️⃣") == expected
def test_redact_filehandles(self, caplog):
filt = SecretsMasker()
configure_secrets_masker_for_test(filt)
with open("/dev/null", "w") as handle:
assert filt.redact(handle, None) == handle
# We shouldn't have logged a warning here
assert caplog.messages == []
@pytest.mark.parametrize(
("val", "expected", "max_depth"),
[
(["abcdef"], ["***"], None),
(["abcdef"], ["***"], 1),
([[[["abcdef"]]]], [[[["***"]]]], None),
([[[[["abcdef"]]]]], [[[[["***"]]]]], None),
# Items below max depth aren't redacted
([[[[[["abcdef"]]]]]], [[[[[["abcdef"]]]]]], None),
([["abcdef"]], [["abcdef"]], 1),
],
)
def test_redact_max_depth(self, val, expected, max_depth):
secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(secrets_masker)
secrets_masker.add_mask("abcdef")
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker", return_value=secrets_masker
):
got = redact(val, max_depth=max_depth)
assert got == expected
def test_redact_with_str_type(self, logger, caplog):
"""
SecretsMasker's re replacer has issues handling a redactable item of type
`str` with required constructor args. This test ensures there is a shim in
place that avoids any issues.
See: https://github.com/apache/airflow/issues/19816#issuecomment-983311373
"""
class StrLikeClassWithRequiredConstructorArg(str):
def __init__(self, required_arg):
pass
text = StrLikeClassWithRequiredConstructorArg("password")
logger.info("redacted: %s", text)
# we expect the object's __str__() output to be logged (no warnings due to a failed masking)
assert caplog.messages == ["redacted: ***"]
@pytest.mark.parametrize(
("state", "expected"),
[
(MyEnum.testname, "testvalue"),
],
)
def test_redact_state_enum(self, logger, caplog, state, expected):
logger.info("State: %s", state)
assert caplog.text == f"INFO State: {expected}\n"
assert "TypeError" not in caplog.text
def test_reset_secrets_masker(
self,
):
secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(secrets_masker)
secrets_masker.add_mask("mask_this")
secrets_masker.add_mask("and_this")
secrets_masker.add_mask("maybe_this_too")
val = ["mask_this", "and_this", "maybe_this_too"]
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker", return_value=secrets_masker
):
got = redact(val)
assert got == ["***"] * 3
reset_secrets_masker()
got = redact(val)
assert got == val
def test_property_for_log_masking(self, monkeypatch):
"""Test that log masking enable/disable methods."""
# store the original state before any patching
state = SecretsMasker.mask_secrets_in_logs
with monkeypatch.context() as mp:
mp.setattr(SecretsMasker, "mask_secrets_in_logs", True)
masker1 = SecretsMasker()
masker2 = SecretsMasker()
assert masker1.is_log_masking_enabled()
assert masker2.is_log_masking_enabled()
masker2.disable_log_masking()
assert not masker1.is_log_masking_enabled()
assert not masker2.is_log_masking_enabled()
masker1.enable_log_masking()
assert masker1.is_log_masking_enabled()
assert masker2.is_log_masking_enabled()
# assert we restored the original state
assert SecretsMasker.mask_secrets_in_logs == state
|
TestSecretsMasker
|
python
|
facebookresearch__faiss
|
tests/test_index_accuracy.py
|
{
"start": 21222,
"end": 21974
}
|
class ____(unittest.TestCase):
def test_roundoff(self):
# params that force use of BLAS implementation
nb = 100
nq = 25
d = 4
xb = np.zeros((nb, d), dtype="float32")
xb[:, 0] = np.arange(nb) + 12345
xq = xb[:nq] + 0.3
index = faiss.IndexFlat(d)
index.add(xb)
D, I = index.search(xq, 1)
# this does not work
assert not np.all(I.ravel() == np.arange(nq))
index = faiss.IndexPreTransform(faiss.CenteringTransform(d),
faiss.IndexFlat(d))
index.train(xb)
index.add(xb)
D, I = index.search(xq, 1)
# this works
assert np.all(I.ravel() == np.arange(nq))
|
TestRoundoff
|
python
|
pytorch__pytorch
|
test/quantization/core/test_workflow_module.py
|
{
"start": 21472,
"end": 27066
}
|
class ____(HistogramObserver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@torch.jit.ignore
def _non_linear_param_search(self):
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
def _get_norm(delta_begin, delta_end, density, norm_type):
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
assert norm_type == "L2", "Only L2 norms are currently supported"
norm = 0.0
if norm_type == "L2":
norm = (
delta_end * delta_end * delta_end
- delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(next_start_bin, next_end_bin, norm_type):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
norm = 0.0
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
for src_bin in range(self.bins):
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_begin / dst_bin_width))
)
dst_bin_of_end = min(
self.dst_nbins - 1, max(0.0, math.floor(src_bin_end / dst_bin_width))
)
dst_bin_of_begin_center = (
dst_bin_of_begin * dst_bin_width + dst_bin_width / 2
)
density = self.histogram[src_bin] / bin_width
if dst_bin_of_begin == dst_bin_of_end:
# if src_bin is entirely within 1 dst_bin
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = src_bin_end - dst_bin_of_begin_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
else:
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
norm = norm + (dst_bin_of_end - dst_bin_of_begin - 1) * _get_norm(
-dst_bin_width / 2, dst_bin_width / 2, density, norm_type
)
dst_bin_of_end_center = (
dst_bin_of_end * dst_bin_width + dst_bin_width / 2
)
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm = norm + _get_norm(delta_begin, delta_end, density, norm_type)
return norm
assert self.histogram.size()[0] == self.bins, "bins mismatch"
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = torch.sum(self.histogram).item()
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = _compute_quantization_error(next_start_bin, next_end_bin, "L2")
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
|
_ReferenceHistogramObserver
|
python
|
pytorch__pytorch
|
test/distributed/_shard/sharded_tensor/test_sharded_tensor.py
|
{
"start": 14633,
"end": 48410
}
|
class ____(ShardedTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_metadata(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
st_metadata = st.metadata()
self.assertEqual(torch.Size([10, 20]), st_metadata.size)
self.assertEqual(torch.Size([10, 20]), st.size())
self.assertEqual(torch.float, st.dtype)
self.assertEqual(torch.strided, st.layout)
self.assertEqual(False, st.requires_grad)
self.assertTrue(st.is_contiguous())
self.assertFalse(st.is_pinned())
st = sharded_tensor.empty(spec, 10, 20, requires_grad=True, init_rrefs=True)
self.assertEqual(True, st.requires_grad)
st = sharded_tensor.empty(spec, 10, 20, dtype=torch.double, init_rrefs=True)
self.assertEqual(torch.double, st.dtype)
# Need CPU for pin_memory
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cpu",
"rank:1/cpu",
"rank:2/cpu",
"rank:3/cpu",
],
)
st = sharded_tensor.empty(spec, 10, 20, pin_memory=True, init_rrefs=True)
self.assertEqual(True, st.is_pinned())
# test read only properties, they're read only as we can't simply change
# the global metadata without changing the underlying shard's properties
with self.assertRaisesRegex(RuntimeError, "torch function '__set__'"):
st.requires_grad = True
@skipIfRocm
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_complete_world_size(self):
for dim in [0, -2]:
spec = ChunkShardingSpec(
dim=dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
# Validate local shard.
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
if self.rank == 3:
self.assertEqual((1, 20), local_shard.size())
else:
self.assertEqual((3, 20), local_shard.size())
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([rank * 3, 0], shard_metadata.shard_offsets)
if rank == 3:
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
self.assertEqual([3, 20], shard_metadata.shard_sizes)
self.assertEqual(
f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement)
)
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual(
f"rank:{rpc_rank}/cuda:{rpc_rank}",
str(shard.metadata.placement),
)
if rpc_rank == 3:
self.assertEqual((1, 20), shard.tensor.size())
else:
self.assertEqual((3, 20), shard.tensor.size())
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_ones(self):
"""Test sharded_tensor.ones(...)"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
st = sharded_tensor.ones(spec, h, w)
# Validate local shard is initialized with torch.ones
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard, torch.ones(expected_h, w))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_even(self) -> None:
"""Test _sharded_tensor.gather(...) with evenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
st = sharded_tensor.ones(spec, h, w)
full_tensor = None
dst = 1
if self.rank == dst:
full_tensor = torch.zeros(
h,
w,
device=torch.device(f"cuda:{dst}"),
)
st.gather(dst, full_tensor)
if self.rank == dst:
self.assertEqual(full_tensor, torch.ones(h, w))
else:
self.assertIsNone(full_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_gather_uneven(self) -> None:
"""Test _sharded_tensor.gather(...) with unevenly distributed._shards"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:1/cuda:1",
"rank:2/cuda:2",
],
)
h, w = 10, 20
st = sharded_tensor.ones(spec, h, w)
full_tensor = None
dst = 1
if self.rank == dst:
full_tensor = torch.zeros(
h,
w,
device=torch.device(f"cuda:{dst}"),
)
st.gather(dst, full_tensor)
if self.rank == dst:
self.assertEqual(full_tensor, torch.ones(h, w))
else:
self.assertIsNone(full_tensor)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_zeros(self):
"""Test sharded_tensor.zeros(...)"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
st = sharded_tensor.zeros(spec, h, w)
# Validate local shard is initialized with torch.zeros
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(local_shard, torch.zeros(expected_h, w))
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_rand(self):
"""Test sharded_tensor.rand(...)/randn(...)"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 2
seed = 1234
expected_h = 2
expected_device = torch.device(f"cuda:{self.rank}")
dtype = torch.double
torch.manual_seed(seed)
# Test sharded_tensor.rand creation
expected = torch.rand(expected_h, w, device=expected_device, dtype=dtype)
# reset seed to ensure the same random numbers are generated
torch.manual_seed(seed)
st = sharded_tensor.rand(spec, h, w, dtype=dtype)
# Validate local shard is initialized with torch.rand
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(expected_device, local_shard.device)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(expected, local_shard)
# Test sharded_tensor.randn creation
torch.manual_seed(seed)
expected_randn = torch.randn(expected_h, w, device=expected_device, dtype=dtype)
# reset seed to ensure the same random numbers are generated
torch.manual_seed(seed)
st_randn = sharded_tensor.randn(spec, h, w, dtype=dtype)
# Validate local shard is initialized with torch.randn
local_shards = st_randn.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(expected_device, local_shard.device)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(expected_randn, local_shard)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_with_full(self):
"""Test sharded_tensor.full(...)"""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 10, 20
fill_value = 1234
st = sharded_tensor.full(
spec, size=(h, w), fill_value=fill_value, dtype=torch.int32
)
# Validate local shard is initialized with torch.full
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
# The split: for rank!=3 ceil(h/4)=3 for rank=3 1
expected_h = 1 if self.rank == 3 else math.ceil(h / 4)
self.assertEqual((expected_h, w), local_shard.size())
self.assertEqual(
local_shard,
torch.full(size=(expected_h, w), fill_value=fill_value, dtype=torch.int32),
)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_create_sharded_tensor_like(self):
"""Test tensor like methods, i.e. torch.zeros_like(...), torch.full_like, etc."""
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
h, w = 8, 8
expected_h = 2
seed = 1234
dtype = torch.double
expected_device = torch.device(f"cuda:{self.rank}")
st = sharded_tensor.rand(spec, (h, w), dtype=dtype)
tensor_like_ops = {
torch.zeros_like: torch.zeros,
torch.ones_like: torch.ones,
torch.rand_like: torch.rand,
torch.randn_like: torch.randn,
torch.empty_like: torch.empty,
torch.full_like: torch.full,
}
for op, expect_local_op in tensor_like_ops.items():
if op == torch.full_like:
# special handle full/full_like as it needs to have additional fill_value arg
expect_tensor = expect_local_op(
(expected_h, w), 8.8, device=expected_device, dtype=dtype
)
new_op_st = op(st, 8.8, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
elif op == torch.empty_like:
# empty/empty_like we only compare the shape
expect_tensor = expect_local_op(
expected_h, w, device=expected_device, dtype=dtype
)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor().shape, expect_tensor.shape)
else:
torch.manual_seed(seed)
expect_tensor = expect_local_op(
expected_h, w, device=expected_device, dtype=dtype
)
torch.manual_seed(seed)
new_op_st = op(st, dtype=dtype)
self.assertEqual(new_op_st.local_tensor(), expect_tensor)
@skipIfRocm
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_partial_world_size(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
# Validate local shard.
local_shards = st.local_shards()
if self.rank >= 2:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((5, 20), local_shard.size())
else:
self.assertEqual(0, len(local_shards))
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
self.assertEqual(
f"rank:{shard_rank + 2}/cuda:{shard_rank + 2}",
str(shard_metadata.placement),
)
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank >= 2:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
self.assertEqual(rpc_rank, remote_shard.owner().id)
shard = remote_shard.to_here()
self.assertEqual(
f"rank:{rpc_rank}/cuda:{rpc_rank}", str(shard.metadata.placement)
)
self.assertEqual((5, 20), shard.tensor.size())
@skipIfRocm
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_new_group(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
pg = dist.new_group(ranks=[1, 2, 3])
st = sharded_tensor.empty(spec, 10, 20, process_group=pg, init_rrefs=True)
# Validate local shard.
local_shards = st.local_shards()
if self.rank >= 2:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((5, 20), local_shard.size())
else:
self.assertEqual(0, len(local_shards))
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(2, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank * 5, 0], shard_metadata.shard_offsets)
self.assertEqual([5, 20], shard_metadata.shard_sizes)
self.assertEqual(
f"rank:{shard_rank + 2}/cuda:{shard_rank + 2}",
str(shard_metadata.placement),
)
# Validate remote shards.
remote_shards = st.remote_shards()
if self.rank >= 2:
self.assertEqual(1, len(remote_shards))
else:
self.assertEqual(2, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(1, len(shards))
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual(rpc_rank, remote_shard.owner().id)
self.assertEqual(
f"rank:{rpc_rank}/cuda:{rpc_rank}", str(shard.metadata.placement)
)
self.assertEqual((5, 20), shard.tensor.size())
@skipIfRocm
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_multiple_local_shards(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 16, 20, init_rrefs=True)
# Validate local shards.
local_shards = st.local_shards()
self.assertEqual(2, len(local_shards))
for local_shard in local_shards:
self.assertEqual(
torch.device(f"cuda:{self.rank}"), local_shard.tensor.device
)
self.assertEqual((2, 20), local_shard.tensor.size())
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(8, len(shards_metadata))
for shard_idx, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_idx * 2, 0], shard_metadata.shard_offsets)
self.assertEqual([2, 20], shard_metadata.shard_sizes)
self.assertEqual(
f"rank:{shard_idx % 4}/cuda:{shard_idx % 4}",
str(shard_metadata.placement),
)
# Validate remote shards.
remote_shards = st.remote_shards()
self.assertEqual(3, len(remote_shards))
for rpc_rank, shards in remote_shards.items():
self.assertEqual(2, len(shards))
for remote_shard in shards:
shard = remote_shard.to_here()
self.assertEqual((2, 20), shard.tensor.size())
self.assertEqual(rpc_rank, remote_shard.owner().id)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharding_columns(self):
self.init_pg()
for dim in [1, -1]:
spec = ChunkShardingSpec(
dim=dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 10, 32)
# Validate local shard.
local_shards = st.local_shards()
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((10, 8), local_shard.size())
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([0, rank * 8], shard_metadata.shard_offsets)
self.assertEqual([10, 8], shard_metadata.shard_sizes)
self.assertEqual(
f"rank:{rank}/cuda:{rank}", str(shard_metadata.placement)
)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_invalid_sharding(self):
self.init_pg()
with self.assertRaisesRegex(
NotImplementedError, "does not support named dimension"
):
spec = ChunkShardingSpec(dim="H", placements=["rank:1/cuda:1"])
sharded_tensor.empty(spec, 10, 20)
for dim in [2, 3, 4, -3, -4, -5]:
spec = ChunkShardingSpec(dim=dim, placements=["rank:1/cuda:1"])
with self.assertRaisesRegex(ValueError, "Invalid sharding dim"):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:5/cuda:1"])
with self.assertRaisesRegex(
ValueError, "Global rank 5 does not exist in input process group"
):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
st = sharded_tensor.empty(spec, 10, 20)
tensor = torch.empty(10, 20)
with self.assertRaisesRegex(
RuntimeError, r".*not supported for ShardedTensor!$"
):
torch.add(st, tensor)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
with self.assertRaisesRegex(
ValueError, "Only torch.strided layout is currently supported"
):
sharded_tensor.empty(spec, 10, 20, layout=torch.sparse_coo)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
with self.assertRaisesRegex(
ValueError,
"Only torch.contiguous_format memory_format is currently supported",
):
sharded_tensor.empty(spec, 10, 20, memory_format=torch.channels_last)
spec = ChunkShardingSpec(dim=0, placements=["worker0/cuda:1"])
with self.assertRaisesRegex(
RuntimeError, "RPC framework needs to be initialized"
):
sharded_tensor.empty(spec, 10, 20)
spec = ChunkShardingSpec(dim=0, placements=["rank:0/cuda:1"])
with self.assertRaisesRegex(
RuntimeError, "RPC Framework needs to be initialized"
):
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
with self.assertRaisesRegex(
RuntimeError, "ShardedTensor created with init_rrefs=False"
):
st = sharded_tensor.empty(spec, 10, 20)
st.remote_shards()
self.init_rpc()
spec = ChunkShardingSpec(dim=0, placements=["workerfoo/cuda:1"])
with self.assertRaisesRegex(ValueError, "Invalid worker name"):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_invalid_pg_rpc_ranks(self):
self.init_pg()
# Init RPC with different ranks.
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
_transports=tp_transports()
)
rpc_backend_options.init_method = f"file://{self.file_name}"
rank = (self.rank + 1) % self.world_size
rpc.init_rpc(
name=f"worker{rank}",
rank=rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
spec = ChunkShardingSpec(dim=0, placements=["rank:1/cuda:1"])
with self.assertRaisesRegex(
ValueError, "Default ProcessGroup and RPC ranks must be the same"
):
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_insufficient_sharding_dims(self):
self.init_pg()
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
st = sharded_tensor.empty(spec, 2, 20)
# Validate local shard.
local_shards = st.local_shards()
if self.rank <= 1:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual((1, 20), local_shard.size())
else:
self.assertEqual(1, len(local_shards))
local_shard = local_shards[0].tensor
self.assertEqual(torch.device(f"cuda:{self.rank}"), local_shard.device)
self.assertEqual(local_shard.numel(), 0)
# Validate global metadata.
st_metadata = st.metadata()
shards_metadata = st_metadata.shards_metadata
self.assertEqual(4, len(shards_metadata))
for shard_rank, shard_metadata in enumerate(shards_metadata):
self.assertEqual([shard_rank, 0], shard_metadata.shard_offsets)
self.assertEqual(
f"rank:{shard_rank}/cuda:{shard_rank}", str(shard_metadata.placement)
)
if shard_rank <= 1:
self.assertEqual([1, 20], shard_metadata.shard_sizes)
else:
self.assertEqual([0, 20], shard_metadata.shard_sizes)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sharded_tensor_sizes(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
# Test with *args
st = sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
self.assertEqual(torch.Size([10, 20]), st.size())
# Test with single *args
st = sharded_tensor.empty(spec, 10, init_rrefs=True)
self.assertEqual(torch.Size([10]), st.size())
# Test with list
st = sharded_tensor.empty(spec, [10, 20], init_rrefs=True)
self.assertEqual(torch.Size([10, 20]), st.size())
# Test with tuple
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(torch.Size([10, 20]), st.size())
# Test with row size
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(st.size(0), 10)
# Test with col size
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(st.size(1), 20)
# Test with negative indexed size
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
self.assertEqual(st.size(-1), 20)
# Test with dim/ndim
self.assertEqual(st.dim(), 2)
self.assertEqual(st.ndim, 2)
# Test with invalid input
st = sharded_tensor.empty(spec, (10, 20), init_rrefs=True)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
st.size(-3)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
st.size(2)
with self.assertRaises(TypeError):
st = sharded_tensor.empty(spec, "foo")
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_state_dict(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
m = MyShardedModel1(spec)
# Test save
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
mod_state_dict = m.state_dict()
mod_state_keys = mod_state_dict.keys()
self.assertTrue("sharded_tensor1" in mod_state_keys)
self.assertTrue("submodule.sharded_tensor2" in mod_state_keys)
torch.save(mod_state_dict, buffer)
# Test load.
module_load = MyShardedModel1()
module_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
buffer.seek(0)
# weights_only=False as ShardedTensor weights_only is already tested in TestFSDPStateDict.test_torch_save_load
state_dict_deser = torch.load(buffer, weights_only=False)
module_load.load_state_dict(state_dict_deser, strict=False)
module_load._register_state_dict_hook(state_dict_hook)
loaded_dict_keys = module_load.state_dict().keys()
self.assertTrue("sharded_tensor1" in loaded_dict_keys)
self.assertTrue("submodule.sharded_tensor2" in loaded_dict_keys)
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
self.assertTrue(
torch.equal(
m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2
)
)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_state_dict_new_group(self):
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:2/cuda:0",
"rank:3/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
pg = dist.new_group([2, 3])
m = MyShardedModel1(spec, pg)
# Test save
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
torch.save(m.state_dict(), buffer)
# Test load.
module_load = MyShardedModel1(spec=None, group=pg)
module_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
buffer.seek(0)
with load_with_process_group(pg):
# ShardedTensor weights_only is already tested in TestFSDPStateDict.test_torch_save_load
state_dict_deser = torch.load(buffer, weights_only=False)
module_load.load_state_dict(state_dict_deser, strict=False)
# Verify after load.
self.assertTrue(torch.equal(m.sharded_tensor1, module_load.sharded_tensor1))
self.assertTrue(
torch.equal(
m.submodule.sharded_tensor2, module_load.submodule.sharded_tensor2
)
)
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_state_dict_no_sharded_tensors(self):
# Verify hooks don't affect modules with no ShardedTensors.
m = torch.nn.Linear(10, 10)
# Test save
state_dict_before = m.state_dict()
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
torch.save(m.state_dict(), buffer)
self.assertEqual(state_dict_before, m.state_dict())
# Test load.
module_load = torch.nn.Linear(10, 10)
module_load._register_load_state_dict_pre_hook(pre_load_state_dict_hook, True)
buffer.seek(0)
state_dict_deser = torch.load(buffer)
module_load.load_state_dict(state_dict_deser, strict=False)
# Verify after load.
self.assertEqual(m.weight, module_load.weight)
self.assertEqual(m.bias, module_load.bias)
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_load_state_dict_errors(self):
self.init_rpc()
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
m = MyShardedModel1(spec)
# Test save
m._register_state_dict_hook(state_dict_hook)
buffer = io.BytesIO()
torch.save(m.state_dict(), buffer)
pg = dist.new_group(ranks=[0, 2, 3])
buffer.seek(0)
if self.rank != 0:
with self.assertRaisesRegex(RuntimeError, "Local rank at save time was"):
with load_with_process_group(pg):
# ShardedTensor weights_only is already tested in TestFSDPStateDict.test_torch_save_load
torch.load(buffer, weights_only=False)
else:
with self.assertRaisesRegex(
RuntimeError, "Local world size at save time was"
):
with load_with_process_group(pg):
# ShardedTensor weights_only is already tested in TestFSDPStateDict.test_torch_save_load
torch.load(buffer, weights_only=False)
dist.destroy_process_group()
buffer.seek(0)
with self.assertRaisesRegex(
RuntimeError, "Need to initialize default process group"
):
# ShardedTensor weights_only is already tested in TestFSDPStateDict.test_torch_save_load
torch.load(buffer, weights_only=False)
rpc.shutdown()
@with_comms
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_cleanup(self):
def create_tensors():
spec = ChunkShardingSpec(
dim=0,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
sharded_tensor.empty(spec, 10, 20, init_rrefs=True)
sharded_tensor.empty(spec, 10, 20)
create_tensors()
self.assertEqual(0, len(sharded_tensor.api._sharded_tensor_map))
|
TestShardedTensorChunked
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_state_dict.py
|
{
"start": 50917,
"end": 53610
}
|
class ____(FSDPTest):
@property
def world_size(self):
return torch.accelerator.device_count()
@skip_if_lt_x_gpu(4)
def test_local_state_dict_reshard(self):
"""
This test demonstrates the ability to do resharding when using
local_state_dict. Although we do not recommend users to use
local_state_dict, there are still some corner cases that
using local_state_dict is a better solution.
"""
model = FSDP(Model(wrap_fsdp=True)).to(device_type)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
batch = torch.randn(4, 4, device=torch.accelerator.current_device_index())
output = model(batch)
loss = output.sum()
loss.backward()
optim.step()
with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT):
state_dict = model.state_dict()
rank = dist.get_rank()
new_pg = dist.new_group(ranks=[0, 1])
resharded_state_dict = {}
# Mimic resharding from 4 GPUs to 2 GPUs
for key, value in state_dict.items():
if isinstance(value, ShardedTensor):
full_flat_param = _all_gather_sharded_tensor(value)
if rank < 2:
full_numel = full_flat_param.size()
chunks = full_flat_param.chunk(2)
flat_param = chunks[rank]
shard_offset = 0 if rank == 0 else chunks[0].numel()
local_shards = [
Shard.from_tensor_and_offsets(flat_param, [shard_offset], rank)
]
sharded_tensor = init_from_local_shards(
local_shards, full_numel, process_group=new_pg
)
resharded_state_dict[key] = sharded_tensor
else:
if rank < 2:
resharded_state_dict[key] = value
if rank < 2:
model2 = FSDP(
Model(wrap_fsdp=True, process_group=new_pg), process_group=new_pg
).to(device_type)
with FSDP.state_dict_type(model2, StateDictType.LOCAL_STATE_DICT):
model2.load_state_dict(resharded_state_dict)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT):
full_state_dict1 = model.state_dict()
if rank < 2:
with FSDP.state_dict_type(model2, StateDictType.FULL_STATE_DICT):
full_state_dict2 = model2.state_dict()
self.assertEqual(full_state_dict1, full_state_dict2)
instantiate_parametrized_tests(TestFSDPStateDict)
if __name__ == "__main__":
run_tests()
|
TestFSDPStateDict4GPUs
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_blocks/test_general_blocks.py
|
{
"start": 12227,
"end": 13531
}
|
class ____(util.MdCase):
"""Test blocks with `md_in_html`."""
extension = ['pymdownx.blocks.tab', 'pymdownx.blocks.html', 'markdown.extensions.md_in_html']
extension_configs = {
'pymdownx.blocks.tab': {'alternate_style': True}
}
def test_md_in_html_inserted_correctly(self):
"""Test that `md_in_html` inserts under the correct target."""
self.check_markdown(
R"""
//// html | div.my-div
/// tab | TEST
<div class="mf-generated" markdown>
Hello I'm in a div which can contain **markdown**!
</div>
///
////
""",
"""
<div class="my-div">
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">TEST</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<div class="mf-generated">
<p>Hello I'm in a div which can contain <strong>markdown</strong>!</p>
</div>
</div>
</div>
</div>
</div>
""", # noqa: E501
True
)
|
TestBlocksMdInHTML
|
python
|
PyCQA__pylint
|
pylint/reporters/json_reporter.py
|
{
"start": 3269,
"end": 6395
}
|
class ____(BaseReporter):
name = "json2"
extension = "json2"
def display_reports(self, layout: Section) -> None:
"""Don't do anything in this reporter."""
def _display(self, layout: Section) -> None:
"""Do nothing."""
def display_messages(self, layout: Section | None) -> None:
"""Launch layouts display."""
output = {
"messages": [self.serialize(message) for message in self.messages],
"statistics": self.serialize_stats(),
}
print(json.dumps(output, indent=4), file=self.out)
@staticmethod
def serialize(message: Message) -> JSONMessage:
return JSONMessage(
type=message.category,
symbol=message.symbol,
message=message.msg or "",
messageId=message.msg_id,
confidence=message.confidence.name,
module=message.module,
obj=message.obj,
line=message.line,
column=message.column,
endLine=message.end_line,
endColumn=message.end_column,
path=message.path,
absolutePath=message.abspath,
)
@staticmethod
def deserialize(message_as_json: JSONMessage) -> Message:
return Message(
msg_id=message_as_json["messageId"],
symbol=message_as_json["symbol"],
msg=message_as_json["message"],
location=MessageLocationTuple(
abspath=message_as_json["absolutePath"],
path=message_as_json["path"],
module=message_as_json["module"],
obj=message_as_json["obj"],
line=message_as_json["line"],
column=message_as_json["column"],
end_line=message_as_json["endLine"],
end_column=message_as_json["endColumn"],
),
confidence=CONFIDENCE_MAP[message_as_json["confidence"]],
)
def serialize_stats(self) -> dict[str, str | int | dict[str, int]]:
"""Serialize the linter stats into something JSON dumpable."""
stats = self.linter.stats
counts_dict = {
"fatal": stats.fatal,
"error": stats.error,
"warning": stats.warning,
"refactor": stats.refactor,
"convention": stats.convention,
"info": stats.info,
}
# Calculate score based on the evaluation option
evaluation = self.linter.config.evaluation
try:
note: int = eval( # pylint: disable=eval-used
evaluation, {}, {**counts_dict, "statement": stats.statement or 1}
)
except Exception as ex: # pylint: disable=broad-except
score: str | int = f"An exception occurred while rating: {ex}"
else:
score = note
return {
"messageTypeCount": counts_dict,
"modulesLinted": len(stats.by_module),
"score": score,
}
def register(linter: PyLinter) -> None:
linter.register_reporter(JSONReporter)
linter.register_reporter(JSON2Reporter)
|
JSON2Reporter
|
python
|
pytorch__pytorch
|
test/quantization/jit/test_quantize_jit.py
|
{
"start": 1864,
"end": 59080
}
|
class ____(QuantizationTestCase):
"""Test graph mode quantization passes used by quantize_jit"""
def test_skip_dequant_constant_prop(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3).float()
def forward(self, x):
return self.conv(x)
m = torch.jit.script(M())
observer = default_per_channel_weight_observer.with_args(ch_axis=1)
qconfig_dict = {"": QConfig(activation=default_observer, weight=observer)}
m = prepare_jit(m, qconfig_dict)
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
m(data)
m = convert_jit(m, debug=True)
freezed = torch.jit.freeze(m)
freezed(data)
# After freezing, weight becomes Constant.
# We have this pattern in the original graph: Constant f32_weight -> quant -> dequant
# After skipping dequant during Constant Propagation, the resulting graph will be:
# Constant int8_weight -> dequant
FileCheck().check_count("aten::quantize_per_tensor", 2, exactly=True).run(
freezed.graph
)
FileCheck().check_count("aten::quantize_per_channel", 0, exactly=True).run(
freezed.graph
)
FileCheck().check_count("aten::dequantize", 3, exactly=True).run(freezed.graph)
FileCheck().check("aten::quantize_per_tensor").check_next(
"aten::dequantize"
).check_not("aten::quantize_per_channel").check("aten::dequantize").check_next(
"aten::conv2d"
).check_next("aten::quantize_per_tensor").check_next("aten::dequantize").run(
freezed.graph
)
def test_foldbn_trivial(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
# Test trivial case
class TestModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = conv_module[dim](1, 20, 5, 1)
self.bn = bn_module[dim](num_features=20)
self.bn.eps = 0.0023
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
options = itertools.product([True, False], [2, 3])
data = {2: torch.rand(1, 1, 6, 6), 3: torch.rand(1, 1, 6, 6, 6)}
# Check that the transformation doesn't change numerics
for tracing, dim in options:
eager = TestModule(dim).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
# Check that in the original script module's forward we have two
# CallMethod nodes. One of them should be for conv.forward and the other
# for bn.forward.
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 2, exactly=True
).run(str(get_forward(scripted_or_traced._c).graph))
# Run FoldConvBatchnorm pass.
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
# Check that after the pass one of the CallMethods is gone (supposedly,
# the bn.forward).
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 1, exactly=True
).run(str(get_forward_graph(scripted_or_traced._c)))
# Check that the transformation doesn't change numerics
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_trivial_nobias(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
# Test trivial case
class TestModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = conv_module[dim](1, 20, 5, 1, bias=False)
self.bn = bn_module[dim](num_features=20)
# to make sure new bias is not zero
self.bn.eps = 0.0027
self.bn.bias = torch.nn.Parameter(torch.rand([20]))
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
options = itertools.product([True, False], [2, 3])
data = {2: torch.rand(1, 1, 6, 6), 3: torch.rand(1, 1, 6, 6, 6)}
for tracing, dim in options:
eager = TestModule(dim).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
# Check that in the original script module's forward we have two
# CallMethod nodes. One of them should be for conv.forward and the other
# for bn.forward.
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 2, exactly=True
).run(str(get_forward_graph(scripted_or_traced._c)))
# Run FoldConvBatchnorm pass.
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
# Check that after the pass one of the CallMethods is gone (supposedly,
# the bn.forward).
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 1, exactly=True
).run(str(get_forward_graph(scripted_or_traced._c)))
# Check that the transformation doesn't change numerics
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_in_submodule(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
# Test that we find Conv-BN patterns in submodules
class SubModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = conv_module[dim](1, 20, 5, 1)
self.bn = bn_module[dim](num_features=20)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class TestModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.sub = SubModule(dim)
def forward(self, x):
x = self.sub(x)
return x
options = itertools.product([True, False], [2, 3])
data = {2: torch.rand(1, 1, 10, 10), 3: torch.rand(1, 1, 10, 10, 10)}
for tracing, dim in options:
eager = TestModule(dim).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 2, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub._c)))
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
FileCheck().check_count(
'prim::CallMethod[name="forward"]', 1, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub._c)))
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_shared_classtype(self):
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class TestModule(torch.nn.Module):
def __init__(self, dim, bias=False):
super().__init__()
self.conv1 = conv_module[dim](5, 5, 3, bias=bias)
self.bn1 = bn_module[dim](num_features=5)
self.bn1.running_mean.fill_(-0.2)
self.bn1.bias = torch.nn.Parameter(torch.rand([5]))
# to make sure new bias is not zero
self.bn1.eps = 0.0023
self.conv2 = conv_module[dim](5, 5, 3, bias=bias)
self.bn2 = bn_module[dim](num_features=5)
self.bn2.eps = 0.0029
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
return x
options = itertools.product([True, False], [2, 2], [True, False])
data = {2: torch.rand(1, 5, 6, 6), 3: torch.rand(1, 5, 6, 6, 6)}
for tracing, dim, bias in options:
eager = TestModule(dim, bias).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x)
folded = fuse_conv_bn_jit(scripted_or_traced)
self.assertEqual(eager(x), scripted_or_traced(x))
def test_foldbn_no_fusion(self):
"""Test that we don't fuse the cases when module type does not match"""
class CustomConv(torch.nn.Module):
def forward(self, x):
return x
class CustomBn(torch.nn.Module):
def forward(self, x):
return x
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = CustomConv()
self.bn = CustomBn()
def forward(self, x):
return self.bn(self.conv(x))
m = torch.jit.script(M())
m = fuse_conv_bn_jit(m)
FileCheck().check_count("prim::CallMethod", 2, exactly=True).run(m.graph)
@set_default_dtype(torch.double)
def test_foldbn_complex_cases(self):
# This test case attempt to try combinations of conv2d/conv3d with bias/nobias
# as well as BatchNorm with affine/no-affine along with varying the
# number of layers.
# this only works when default dtype is double
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
conv_module = {2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
class SubModule(torch.nn.Module):
def __init__(self, dim, num_blocks, enable_bias, enable_affine):
super().__init__()
layers = []
for _ in range(num_blocks):
layers.append(conv_module[dim](20, 20, 5, 1, bias=enable_bias))
bn_obj = bn_module[dim](num_features=20, affine=enable_affine)
if enable_affine:
bn_obj.weight = torch.nn.Parameter(
torch.rand_like(bn_obj.weight)
)
bn_obj.bias = torch.nn.Parameter(torch.rand_like(bn_obj.bias))
bn_obj.running_mean = torch.rand_like(bn_obj.running_mean)
bn_obj.running_var = torch.rand_like(bn_obj.running_var)
layers.append(bn_obj)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class TestModule(torch.nn.Module):
def __init__(self, dim, num_blocks, enable_bias, enable_affine):
super().__init__()
self.sub = SubModule(dim, num_blocks, enable_bias, enable_affine)
def forward(self, x):
x = self.sub(x)
return x
options = itertools.product(
[True, False], [2, 3], [True, False], [True, False], [1, 2]
)
data = {2: torch.rand(1, 20, 10, 10), 3: torch.rand(1, 20, 10, 10, 10)}
for tracing, dim, enable_bias, enable_bn_affine, num_layers in options:
eager = TestModule(dim, num_layers, enable_bias, enable_bn_affine).eval()
x = data[dim]
scripted_or_traced = get_script_module(eager, tracing, x).eval()
FileCheck().check_count(
'prim::CallMethod[name="forward"]', num_layers * 2, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub.layers._c)))
scripted_or_traced = fuse_conv_bn_jit(scripted_or_traced)
FileCheck().check_count(
'prim::CallMethod[name="forward"]', num_layers, exactly=True
).run(str(get_forward_graph(scripted_or_traced.sub.layers._c)))
self.assertEqual(eager(x), scripted_or_traced(x))
def test_fuse_linear(self):
class FunctionalLinear(torch.nn.Module):
def __init__(self, weight, bias):
super().__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
res = torch.matmul(x, self.weight.t())
if self.bias is not None:
res.add_(self.bias)
return res
x1 = torch.rand(3)
w1 = torch.rand(5, 3)
b1 = torch.rand(5)
x2 = torch.rand(5, 5)
w2 = torch.rand(5, 5)
b2 = torch.rand(5)
x3 = torch.rand(5, 5, 5)
w3 = torch.rand(5, 5)
b3 = torch.rand(5)
for has_bias, (x, weight, b) in itertools.product(
[True, False], [(x1, w1, b1), (x2, w2, b2), (x3, w3, b3)]
):
bias = b if has_bias else None
model = torch.jit.trace(FunctionalLinear(weight, bias), [x])
for node in model.graph.nodes():
if node.kind() == "aten::matmul":
source_range_1 = node.sourceRange()
torch._C._jit_pass_fuse_linear(model.graph)
for node in model.graph.nodes():
if node.kind() == "aten::linear":
source_range_2 = node.sourceRange()
FileCheck().check("aten::linear").run(model.graph)
check_not = ["aten::matmul", "aten::addmm", "aten::add_", "aten::t("]
for cn in check_not:
FileCheck().check_not(cn).run(model.graph)
# make sure it runs
self.assertTrue(source_range_1 == source_range_2)
model(x)
# check matmuls are not fused
class Matmul(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return torch.matmul(x, self.weight)
x = torch.rand(5, 6, 5)
w = torch.rand(5, 5, 100)
model = torch.jit.trace(Matmul(w), [x])
torch._C._jit_pass_fuse_linear(model.graph)
# check 3d matmul is not fused
FileCheck().check("aten::matmul").run(model.graph)
FileCheck().check_not("aten::linear").run(model.graph)
# make sure it runs
model(x)
def test_insert_observers(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
def forward(self, x):
return self.conv(x)
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# for input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 2
# for weight
assert len(attrs_with_prefix(m.conv, "_observer_")) == 1
def test_insert_observers_interface(self):
@torch.jit.interface
class SubInterface(torch.nn.Module):
def addOne(self, inp) -> torch.Tensor:
pass
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5)
def addOne(self, inp):
return self.fc(inp) + 1
def forward(self, x):
return self.addOne(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.sub = Sub()
def forward(self, x):
return self.sub(self.conv(x))
m = torch.jit.script(M())
qconfig_dict = {"sub.conv": default_qconfig}
m = prepare_jit(m, qconfig_dict)
def test_insert_observers_interface_unshare_type(self):
@torch.jit.interface
class OperatorIf(nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class Operator(nn.Module):
def __init__(self, a):
super().__init__()
self.a = a
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return self.a * (inp + self.a)
class Inner(nn.Module):
op: OperatorIf
def __init__(self, op):
super().__init__()
self.op = op
def forward(self, inp):
return self.op(inp)
class Outer(nn.Module):
def __init__(self) -> None:
super().__init__()
self.inner_a = Inner(Operator(1))
self.inner_b = Inner(Operator(3.0))
def forward(self, inp):
return self.inner_a(inp) + self.inner_b(inp)
qconfig_dict = {"inner_a": default_qconfig, "inner_b": default_qconfig}
eager_model = Outer()
for tracing in [True, False]:
x = torch.rand(3)
script_model = get_script_module(eager_model, tracing, x)
# make sure it runs
prepare_jit(script_model, qconfig_dict)
def test_insert_observers_child_qconfig(self):
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5)
def forward(self, x):
return self.fc(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.sub = Sub()
def forward(self, x):
return self.sub(self.conv(x))
m = torch.jit.script(M())
qconfig_dict = {"sub.fc": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of sub
assert len(attrs_with_prefix(m, "_observer_")) == 2
# not quantized
assert len(attrs_with_prefix(m.conv, "_observer_")) == 0
# no observers since we observe in the outer most call site
assert len(attrs_with_prefix(m.sub, "_observer_")) == 0
# weight of linear
assert len(attrs_with_prefix(m.sub.fc, "_observer_")) == 1
@unittest.skipUnless(
"fbgemm" in torch.backends.quantized.supported_engines,
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
def test_insert_observers_skip_values(self):
class ConvFunctionalReLU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
def forward(self, x):
return F.relu(self.conv(x))
class ConvReLUModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x))
class AddReLUModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
out = self.conv(x)
out += x
return self.relu(out)
class AddFunctionalReLU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
out = self.conv(x)
out += x
return F.relu(out)
def attrs_with_prefix(module, prefix):
return [x for x, _ in module._modules._c.items() if x.startswith(prefix)]
qconfig_dict = {"": default_qconfig}
m = torch.jit.script(ConvFunctionalReLU())
m = prepare_jit(m, qconfig_dict)
# observer for weight of conv
assert len(attrs_with_prefix(m.conv, "_observer_")) == 1
# observer for input of conv and output of relu
assert len(attrs_with_prefix(m, "_observer_")) == 2
m = torch.jit.script(ConvReLUModule())
m = prepare_jit(m, qconfig_dict)
# observer for input of conv and output of relu
assert len(attrs_with_prefix(m, "_observer_")) == 2
# observer for weight of conv
assert len(attrs_with_prefix(m.conv, "_observer_")) == 1
# observer for output of relu
assert len(attrs_with_prefix(m.relu, "_observer_")) == 0
m = torch.jit.script(AddReLUModule())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
assert len(attrs_with_prefix(m, "_observer")) == 3
assert len(attrs_with_prefix(m.relu, "_observer")) == 0
FileCheck().check("aten::add_").check_not(
'Observer = prim::GetAttr[name="_observer_'
).check("ReLU = prim::GetAttr").run(str(get_forward_graph(m._c)))
m = torch.jit.script(AddFunctionalReLU())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
assert len(attrs_with_prefix(m, "_observer")) == 3
FileCheck().check("aten::add_").check_not(
'Observer = prim::GetAttr[name="_observer_'
).check("CallFunction").check('Observer = prim::GetAttr[name="_observer_').run(
str(get_forward_graph(m._c))
)
def test_insert_observers_weight_dtype(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
def forward(self, x):
return F.relu(self.conv(x))
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
activation_dtypes = {
obs.getattr("dtype")
for x, obs in m._modules._c.items()
if x.startswith("_observer_")
}
weight_dtypes = {
obs.getattr("dtype")
for x, obs in m.conv._modules._c.items()
if x.startswith("_observer_")
}
assert len(activation_dtypes) == 1, "Expected to have 1 activation dtype"
assert len(weight_dtypes) == 1, "Expected to have 1 weight dtype"
assert next(iter(activation_dtypes)) != next(iter(weight_dtypes)), (
"Expected activation dtype to "
)
" be different from wegiht dtype"
def test_insert_observers_for_reused_weight(self):
class M(torch.nn.Module):
def forward(self, x, y, weight):
x = F.conv2d(x, weight)
y = F.conv2d(y, weight)
return x + y
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
# 3 for x, y, weight, one for output of each F.conv2d and one for output of add
assert len(attrs_with_prefix(m, "_observer")) == 6
def test_insert_observers_shared_class_type(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 5, 3).float()
self.conv2 = torch.nn.Conv2d(3, 5, 3).float()
def forward(self, x):
return self.conv2(self.conv1(x))
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# conv1 and conv2 shares the same type, we need to
# make sure we didn't quantize the type twice
conv1_observers = attrs_with_prefix(m.conv1, "_observer_")
conv2_observers = attrs_with_prefix(m.conv2, "_observer_")
assert len(conv1_observers) == 1, "Expected to have 1 observer submodules"
assert len(conv2_observers) == 1, "Expected to have 1 observer submodules"
assert conv1_observers == conv2_observers, (
"Expect conv1 and conv2 to have same observers since the class type is shared"
)
def test_insert_observers_for_general_ops(self):
"""Make sure we skip observers for ops that doesn't require
observation, e.g. flatten
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x)
return x
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 2
FileCheck().check('Observer = prim::GetAttr[name="_observer_').check(
'prim::GetAttr[name="conv"]'
).check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check("aten::flatten").check_not(
'Observer = prim::GetAttr[name="_observer_'
).run(m.graph)
# TODO: this is too long, split this to test_insert_observers.py and remove
# insrt_observers prefix
def test_insert_observers_propagate_observed(self):
"""Make sure we propagate observed property through general ops"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
x = self.conv1(x)
x = torch.flatten(x)
# we don't want to insert observer for input of self.conv2
# because output of self.conv1 is already observed
x = self.conv2(x)
return x
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 3
FileCheck().check('Observer = prim::GetAttr[name="_observer_').check(
'prim::GetAttr[name="conv1"]'
).check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check("aten::flatten").check_not(
'Observer = prim::GetAttr[name="_observer_'
).check('prim::GetAttr[name="conv2"]').check(
'Observer = prim::GetAttr[name="_observer_'
).run(m.graph)
def test_insert_observers_propagate_observed_in_submodule(self):
"""Make sure we propagate observed property through general ops"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x = self.conv1(x)
x = self.avgpool(x)
# we don't want to insert observer for input of self.conv2
# because output of self.conv1 is already observed
x = self.conv2(x)
return x
m = torch.jit.script(M())
qconfig_dict = {"": default_qconfig}
m = prepare_jit(m, qconfig_dict)
# input and output of conv
assert len(attrs_with_prefix(m, "_observer_")) == 3
FileCheck().check('Observer = prim::GetAttr[name="_observer_').check(
'prim::GetAttr[name="conv1"]'
).check("prim::CallMethod").check(
'Observer = prim::GetAttr[name="_observer_'
).check("prim::CallMethod").check_not(
'Observer = prim::GetAttr[name="_observer_'
).check('prim::GetAttr[name="conv2"]').check(
'Observer = prim::GetAttr[name="_observer_'
).run(m.graph)
def test_insert_observers_propagate_observed_for_function(self):
def channel_shuffle(x: torch.Tensor, groups: int) -> torch.Tensor:
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 1).float()
self.conv2 = torch.nn.Conv2d(3, 3, 1).float()
def forward(self, x):
x = self.conv1(x)
x = channel_shuffle(x, 1)
x = self.conv2(x)
return x
data = [
(
torch.rand((1, 3, 10, 10), dtype=torch.float),
torch.randint(0, 1, (1,), dtype=torch.long),
)
for _ in range(2)
]
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
# we want to test that channel_shuffle is going to pass
# the observed property from the output of conv1 to input of conv2
# so that we don't insert observers for input of conv2
assert (
len(
attrs_with_prefix(
m,
"_observer_",
)
)
== 3
)
def test_insert_observers_for_if(self):
class QuantProp(torch.nn.Module):
def __init__(self, use_skip):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.use_skip = use_skip
def forward(self, x):
if self.use_skip:
x = self.conv(x)
return torch.reshape(x, x.shape)
else:
x = self.conv(x)
return torch.reshape(x, x.shape)
class Res(torch.nn.Module):
def __init__(self, use_skip):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.use_skip = use_skip
def forward(self, x):
if self.use_skip:
return self.conv(x)
else:
return self.conv(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.quant_prop = QuantProp(True)
self.res = Res(False)
def forward(self, x):
x = self.quant_prop(x)
x = self.res(x)
return x
data = [torch.rand(1, 3, 10, 10, dtype=torch.float)]
result = {False: [1, 2, 2], True: [2, 1, 0]}
for tracing in [True, False]:
if tracing:
m = torch.jit.trace(M(), data).eval()
else:
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
assert (
len(
attrs_with_prefix(
m,
"_observer_",
)
)
== result[tracing][0]
)
assert (
len(
attrs_with_prefix(
m.quant_prop,
"_observer_",
)
)
== result[tracing][1]
)
assert (
len(
attrs_with_prefix(
m.res,
"_observer_",
)
)
== result[tracing][2]
)
def test_insert_observers_for_nested_if(self):
class Res(torch.nn.Module):
def __init__(self, use_skip):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.cond = use_skip
self.use_skip = use_skip
def forward(self, x):
if self.use_skip:
if self.cond:
return self.conv(x)
else:
return self.conv(x)
else:
return self.conv(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.res1 = Res(True)
self.res2 = Res(False)
def forward(self, x):
x = self.res1(x)
x = self.res2(x)
return x
data = torch.rand((1, 3, 10, 10), dtype=torch.float)
result = {True: 3, False: 1}
for tracing in [True, False]:
if tracing:
m = torch.jit.trace(M(), data).eval()
else:
m = torch.jit.script(M()).eval()
m = prepare_jit(m, {"": default_qconfig})
assert len(attrs_with_prefix(m, "_observer_")) == result[tracing]
def test_insert_observers_for_if_consistent_observation(self):
"""check quantization for if works as long as
output of all branches are quantized/observed consistently
"""
class M(torch.nn.Module):
def __init__(self, cond):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
self.cond = cond
def forward(self, x):
x = self.conv(x)
# x is already observed
if self.cond:
x = torch.flatten(x)
return x
class M2(torch.nn.Module):
def __init__(self, cond):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
self.cond = cond
def forward(self, x):
x = self.conv1(x)
if self.cond:
x = self.conv2(x)
# x will be observed in the branch
else:
x = torch.flatten(x)
# since output for both branch are quantized
# the if node is quantized consistently
return x
data = torch.rand((1, 3, 5, 5), dtype=torch.float)
options = list(itertools.product([True, False], [True, False]))
for cond, tracing in options:
if tracing:
m = torch.jit.trace(M(cond), data)
else:
m = torch.jit.script(M(cond))
m = prepare_jit(m, {"": default_qconfig})
assert len(attrs_with_prefix(m, "_observer_")) == 2
for cond, tracing in options:
if tracing:
m = torch.jit.trace(M2(cond), data)
else:
m = torch.jit.script(M2(cond))
m = prepare_jit(m, {"": default_qconfig})
num_observers = 2 if tracing and not cond else 3
assert len(attrs_with_prefix(m, "_observer_")) == num_observers
def test_insert_quant_dequant(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3).float()
def forward(self, x):
return self.conv(x)
for is_per_channel in [True, False]:
m = torch.jit.script(M())
observer = (
default_per_channel_weight_observer.with_args(ch_axis=1)
if is_per_channel
else default_observer
)
qconfig_dict = {"": QConfig(activation=observer, weight=observer)}
m = prepare_jit(m, qconfig_dict)
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
m(data)
m = convert_jit(m, debug=True)
assert len(m._modules._c.items()) == 1, (
"Expected to have single submodule of conv"
)
# make sure the quantized model is executable
m(data)
quant_func = (
"aten::quantize_per_channel"
if is_per_channel
else "aten::quantize_per_tensor"
)
FileCheck().check_count(quant_func, 3, exactly=True).run(m.graph)
def test_insert_quant_dequant_shared_class_type(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 3).float()
self.conv2 = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
return self.conv2(self.conv1(x))
for is_per_channel in [True, False]:
m = torch.jit.script(M())
observer = (
default_per_channel_weight_observer.with_args(ch_axis=1)
if is_per_channel
else default_observer
)
qconfig = QConfig(activation=observer, weight=observer)
qconfig_dict = {"": qconfig}
m = prepare_jit(m, qconfig_dict)
# observers for input, output and value between conv1/conv2
assert len(attrs_with_prefix(m, "_observer_")) == 3, (
"Expected to have 3 observers"
)
# observer for weight
assert len(attrs_with_prefix(m.conv1, "_observer_")) == 1, (
"Expected to have 1 observers"
)
# observer for weight
assert len(attrs_with_prefix(m.conv2, "_observer_")) == 1, (
"Expected to have 1 observers"
)
data = torch.randn(1, 3, 10, 10, dtype=torch.float)
m(data)
m = convert_jit(m, debug=True)
m(data)
assert m.conv1._c._type() == m.conv2._c._type()
# check all observers have been removed
assert len(attrs_with_prefix(m, "_observer_")) == 0, (
"Expected to have 0 observers"
)
assert len(attrs_with_prefix(m.conv1, "_observer_")) == 0, (
"Expected to have 0 observers"
)
assert len(attrs_with_prefix(m.conv2, "_observer_")) == 0, (
"Expected to have 0 observers"
)
quant_func = (
"aten::quantize_per_channel"
if is_per_channel
else "aten::quantize_per_tensor"
)
for module in ["conv1", "conv2"]:
conv = m._c.getattr(module)
# quantize weight
FileCheck().check(quant_func).check_next("aten::dequantize").check(
'prim::CallMethod[name="_conv_forward"]'
).check("return").run(get_forward_graph(conv))
# no quantize node in _conv_forward
FileCheck().check_not(quant_func).check("aten::conv2d").check_not(
quant_func
).check("return").run(conv._get_method("_conv_forward").graph)
def test_dedup_module_uses(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(x)
x -= 0.5
return self.relu(x)
data = torch.randn((2, 2))
m = torch.jit.script(M())
ref_res = m(data)
assert (
len([x for x, _ in m._modules._c.items() if x.startswith("relu")]) == 1
), "Expected to have 1 relu modules after dedup module uses"
torch._C._jit_pass_dedup_module_uses(m._c)
m = torch.jit._recursive.wrap_cpp_module(m._c)
res = m(data)
assert (
len([x for x, _ in m._modules._c.items() if x.startswith("relu")]) == 2
), "Expected to have 2 relu modules after dedup module uses"
self.assertEqual(res, ref_res)
def test_replicate_dequantize(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
def forward(self, x):
x = torch.dequantize(x)
r = self.conv(x)
r += x
return r
x = torch.randn([1, 3, 10, 10], dtype=torch.float)
x = torch.quantize_per_tensor(x, 0.5, 1, torch.quint8)
m = torch.jit.script(M())
ref_res = m(x)
FileCheck().check_count("aten::dequantize", 1, exactly=True).run(m.graph)
torch._C._jit_pass_replicate_dequantize(m.graph)
FileCheck().check_count("aten::dequantize", 2, exactly=True).run(m.graph)
res = get_forward(m._c)(x)
self.assertEqual(res, ref_res)
def test_replicate_dequantize_in_block(self):
class M(torch.nn.Module):
def __init__(self, cond):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.cond = cond
def forward(self, x):
x = torch.dequantize(x)
if self.cond:
x = self.conv(x)
else:
x = x + 3
return x
x = torch.randn([1, 3, 10, 10], dtype=torch.float)
x = torch.quantize_per_tensor(x, 0.5, 1, torch.quint8)
m = torch.jit.script(M(True))
ref_res = m(x)
FileCheck().check_count("aten::dequantize", 1, exactly=True).run(m.graph)
torch._C._jit_pass_replicate_dequantize(m.graph)
FileCheck().check_count("aten::dequantize", 2, exactly=True).run(m.graph)
# check dequantize is right before CallMethod of conv
FileCheck().check("aten::dequantize").check_next("CallMethod").run(m.graph)
# check dequantize is right before add
FileCheck().check("aten::dequantize").check("aten::dequantize").check_next(
"aten::add"
).run(m.graph)
res = get_forward(m._c)(x)
self.assertEqual(res, ref_res)
def test_swap_functional_linear(self):
# TODO: This pass replaces any function called "linear" with "aten::linear"
# No longer necessary, and also quite surprising
def linear(input, weight, bias):
return torch.nn.functional.linear(input, weight, bias)
class M(torch.nn.Module):
def forward(self, x, weight, bias):
x = torch.dequantize(x)
weight = torch.dequantize(weight)
x = linear(x, weight, bias)
x = torch.quantize_per_tensor(
x, scale=1.0, zero_point=0, dtype=torch.quint8
)
return x
x = torch.rand((10, 5), dtype=torch.float)
x = torch.quantize_per_tensor(x, scale=0.5, zero_point=1, dtype=torch.quint8)
weight = torch.rand((5, 5), dtype=torch.float)
weight = torch.quantize_per_tensor(
weight, scale=0.5, zero_point=1, dtype=torch.qint8
)
bias = torch.rand((5), dtype=torch.float)
m = torch.jit.script(M())
ref_res = m(x, weight, bias)
FileCheck().check("CallFunction").run(m.graph)
torch._C._jit_pass_swap_functional_linear(m.graph)
FileCheck().check("aten::linear").check_not("CallFunction").run(m.graph)
res = m(x, weight, bias)
self.assertEqual(res, ref_res)
def test_replicate_quantize_for_if(self):
"""We want to move quantize nodes for output of prim::If
inside the prim::If blocks so that we can match quantization
patterns.
"""
class Res(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 1).float()
self.conv2 = torch.nn.Conv2d(3, 3, 1).float()
self.use_skip = True
def forward(self, x: torch.Tensor, cond: bool) -> torch.Tensor:
# to avoid being frozen
self.use_skip = cond
if self.use_skip:
return self.conv(x)
else:
return self.conv2(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.res1 = Res()
self.res2 = Res()
def forward(self, x):
x = self.res1(x, True)
x = self.res2(x, False)
return x
data = [[torch.rand((1, 3, 10, 10), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
m = torch.jit.script(M()).eval()
m = quantize_jit(m, qconfig_dict, test_only_eval_fn, [data])
# make sure patterns in both branches are fused
FileCheck().check_count("quantized::conv2d(", 4, exactly=True).run(m.graph)
def test_finalize_for_linear(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5).float()
def forward(self, x):
return self.fc(x)
data = [[torch.rand((1, 5), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(M()).eval()
model = quantize_jit(model, qconfig_dict, test_only_eval_fn, [data])
# make sure there is only one quantize_per_tensor for input
# and linear_prepack is folded
FileCheck().check_count("aten::quantize_per_tensor", 1, exactly=True).check_not(
"quantized::linear_prepack"
).check("quantized::linear").run(model.graph)
def test_inplace_option(self):
for tracing in [True, False]:
model = get_script_module(
torch.nn.Conv2d(3, 3, 3).float(), tracing, self.img_data_2d[0][0]
)
qconfig_dict = {"": default_qconfig}
quantize_jit(
model, qconfig_dict, test_only_eval_fn, [self.img_data_2d], inplace=True
)
FileCheck().check("quantized::conv2d").run(model.graph)
FileCheck().check_not("aten::conv2d").run(model.graph)
def test_finalize_debug(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
self.avgpool = torch.nn.AvgPool2d(3)
def forward(self, x):
x = self.conv(x)
x = self.avgpool(x)
return x
data = [[torch.rand((1, 3, 10, 10), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(M()).eval()
model = quantize_jit(model, qconfig_dict, test_only_eval_fn, [data], debug=True)
FileCheck().check_not("quantized::conv2d").check("aten::conv2d").check(
"aten::avg_pool2d"
).check("aten::q_scale").check_next("aten::q_zero_point").check_next(
"prim::dtype"
).check_next("aten::quantize_per_tensor").check("aten::dequantize").run(
model.graph
)
def test_module_list(self):
class SimpleLinearLayer(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = torch.nn.Linear(5, 5).float()
def forward(self, x):
return self.fc(x)
class ComplexModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.ModuleList(
[SimpleLinearLayer() for i in range(2)]
)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
states = []
for layer in self.layers:
val = layer(x)
states.append(val)
return states
data = torch.rand((1, 5), dtype=torch.float)
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(ComplexModel()).eval()
model = prepare_jit(model, qconfig_dict)
assert len(attrs_with_prefix(model, "_observer")) == 3
model(data)
model = convert_jit(model, debug=False)
FileCheck().check("quantized::linear").check("quantized::linear").run(
model.graph
)
def test_conv_trace(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1d = torch.nn.Conv1d(3, 3, 3).float()
self.conv2d = torch.nn.Conv2d(3, 3, 3).float()
self.conv3d = torch.nn.Conv3d(3, 3, 3).float()
def forward(self, x, y, z):
a = self.conv1d(x)
b = self.conv2d(y)
c = self.conv3d(z)
return (a, b, c)
qconfig_dict = {"": default_qconfig}
inputs = (
torch.rand((1, 3, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10, 10), dtype=torch.float),
)
model = torch.jit.trace(M(), inputs).eval()
m = prepare_jit(model, qconfig_dict)
FileCheck().check("aten::conv1d").check_not("aten::_convolution").run(
str(get_forward_graph(m.conv1d._c))
)
FileCheck().check("aten::conv2d").check_not("aten::_convolution").run(
str(get_forward_graph(m.conv2d._c))
)
FileCheck().check("aten::conv3d").check_not("aten::_convolution").run(
str(get_forward_graph(m.conv3d._c))
)
def test_convtranspose_trace(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.convtranspose1d = torch.nn.ConvTranspose1d(3, 3, 3).float()
self.convtranspose2d = torch.nn.ConvTranspose2d(3, 3, 3).float()
self.convtranspose3d = torch.nn.ConvTranspose3d(3, 3, 3).float()
def forward(self, x, y, z):
a = self.convtranspose1d(x)
b = self.convtranspose2d(y)
c = self.convtranspose3d(z)
return (a, b, c)
qconfig_dict = {"": default_qconfig}
inputs = (
torch.rand((1, 3, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10), dtype=torch.float),
torch.rand((1, 3, 10, 10, 10), dtype=torch.float),
)
model = torch.jit.trace(M(), inputs).eval()
m = prepare_jit(model, qconfig_dict)
FileCheck().check("aten::conv_transpose1d").check_not("aten::_convolution").run(
str(get_forward_graph(m.convtranspose1d._c))
)
FileCheck().check("aten::conv_transpose2d").check_not("aten::_convolution").run(
str(get_forward_graph(m.convtranspose2d._c))
)
FileCheck().check("aten::conv_transpose3d").check_not("aten::_convolution").run(
str(get_forward_graph(m.convtranspose3d._c))
)
@unittest.skipUnless(
"fbgemm" in torch.backends.quantized.supported_engines,
" Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs"
" with instruction set support avx2 or newer.",
)
def test_replicate_dequant_same_value(self):
class Mul(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3).float()
def forward(self, x):
x = self.conv(x)
return x * x
data = [[torch.rand((1, 3, 10, 10), dtype=torch.float)]]
qconfig_dict = {"": default_qconfig}
model = torch.jit.script(Mul()).eval()
m = quantize_jit(model, qconfig_dict, test_only_eval_fn, [data])
FileCheck().check("quantized::mul(").check_not("aten::mul").run(m.graph)
def test_interface_with_fork(self):
class SubModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.embedding1 = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=False,
mode="sum",
)
def forward(self, x, y):
return self.embedding1(x, y)
class OrigMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.embedding1 = torch.nn.EmbeddingBag(
num_embeddings=10,
embedding_dim=12,
include_last_offset=True,
sparse=False,
mode="sum",
)
def forward(self, x, y):
return self.embedding1(x, y)
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod: ModInterface
def __init__(self) -> None:
super().__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x, y):
a = self.proxy_mod(x, y)
b = self.sub(x, y)
return b
class MainModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.test = TestModule()
def forward(self, x, y):
fut = torch.jit._fork(self.test.forward, x, y)
z = torch.jit._wait(fut)
return z
indices = torch.tensor(
[
9,
6,
5,
7,
8,
8,
9,
2,
8,
6,
6,
9,
1,
6,
8,
8,
3,
2,
3,
6,
3,
6,
5,
7,
0,
8,
4,
6,
5,
8,
2,
3,
]
)
offsets = torch.tensor([0, 19, 20, 28, 28, 32])
m = torch.jit.trace(MainModule(), (indices, offsets))
m.eval()
int8_qconfig = QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, custom_op_name="embedding_bag_byte"
),
weight=PlaceholderObserver.with_args(custom_op_name="embedding_bag_byte"),
)
m = prepare_jit(m, {"": int8_qconfig})
m = convert_jit(m)
FileCheck().check("quantized::embedding_bag_byte_rowwise_offsets").run(m.graph)
@skipIfNoFBGEMM
def test_quantize_fork_wait(self):
"""Tests the case where fork and wait calls are in different subgraphs
Calling inline fork-wait only removes the fork call and leaves aten::wait
calls in the graph, with Tensor as input (instead of Future[Tensor])
"""
class MainModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fork_ops = ForkModule()
def init_values(self, x):
shared_module = self.fork_ops(x)
self.fork_dict = shared_module
def forward(self, x):
val = torch.jit._wait(self.fork_ops(x))
return val
class TestModule(torch.nn.Module):
def forward(self, x):
w = torch.ones(5, 5)
b = torch.zeros(5)
return torch.nn.functional.linear(x, w, b)
class ForkModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.test = TestModule()
def forward(self, x):
fut = torch.jit._fork(self.test.forward, x)
return fut
model = MainModule().eval()
traced = torch.jit.trace(model, (torch.randn(5, 5),))
model = prepare_dynamic_jit(traced, {"": default_qconfig})
model = convert_dynamic_jit(model)
FileCheck().check("quantized::linear_dynamic").run(model.graph)
# Make sure model save works
b = io.BytesIO()
torch.jit.save(model, b)
|
TestQuantizeJitPasses
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/grid_helper_curvelinear.py
|
{
"start": 10376,
"end": 13506
}
|
class ____(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
Parameters
----------
aux_trans : `.Transform` or tuple[Callable, Callable]
The transform from curved coordinates to rectilinear coordinate:
either a `.Transform` instance (which provides also its inverse),
or a pair of callables ``(trans, inv_trans)`` that define the
transform and its inverse. The callables should have signature::
x_rect, y_rect = trans(x_curved, y_curved)
x_curved, y_curved = inv_trans(x_rect, y_rect)
extreme_finder
grid_locator1, grid_locator2
Grid locators for each axis.
tick_formatter1, tick_formatter2
Tick formatters for each axis.
"""
super().__init__()
self._grid_info = None
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kwargs):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kwargs)
self._old_limits = None # Force revalidation.
def new_fixed_axis(
self, loc, *, axis_direction=None, offset=None, axes=None, nth_coord=None
):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
helper = FixedAxisArtistHelper(self, loc, nth_coord_ticks=nth_coord)
axisline = AxisArtist(axes, helper, axis_direction=axis_direction)
# Why is clip not set on axisline, unlike in new_floating_axis or in
# the floating_axig.GridHelperCurveLinear subclass?
return axisline
def new_floating_axis(self, nth_coord, value, axes=None, axis_direction="bottom"):
if axes is None:
axes = self.axes
helper = FloatingAxisArtistHelper(
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
# axisline.major_ticklabels.set_visible(True)
# axisline.minor_ticklabels.set_visible(False)
return axisline
def _update_grid(self, bbox):
self._grid_info = self.grid_finder.get_grid_info(bbox)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
grid_lines.extend([gl.T for gl in self._grid_info["lon"]["lines"]])
if axis in ["both", "y"]:
grid_lines.extend([gl.T for gl in self._grid_info["lat"]["lines"]])
return grid_lines
|
GridHelperCurveLinear
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/triggers/test_emr.py
|
{
"start": 9525,
"end": 10353
}
|
class ____:
def test_serialization(self):
application_id = "test_application_id"
waiter_delay = 30
waiter_max_attempts = 60
aws_conn_id = "aws_default"
trigger = EmrServerlessDeleteApplicationTrigger(
application_id=application_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.emr.EmrServerlessDeleteApplicationTrigger"
assert kwargs == {
"application_id": "test_application_id",
"waiter_delay": 30,
"waiter_max_attempts": 60,
"aws_conn_id": "aws_default",
}
|
TestEmrServerlessDeleteApplicationTrigger
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/collective_all_reduce_strategy_test.py
|
{
"start": 17316,
"end": 18243
}
|
class ____(
CollectiveAllReduceStrategyTestBase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 3 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0, has_chief=True)
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testMinimizeLossGraph(self, required_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, required_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testVariableInitialization(self, required_gpus):
self._run_between_graph_clients(
self._test_variable_initialization,
self._cluster_spec,
num_gpus=required_gpus)
|
DistributedCollectiveAllReduceStrategyTestWithChief
|
python
|
ray-project__ray
|
python/ray/data/stats.py
|
{
"start": 2314,
"end": 4960
}
|
class ____:
"""Container for categorized columns and their aggregators."""
numerical_columns: List[str]
str_columns: List[str]
vector_columns: List[str]
aggregators: List[AggregateFnV2]
def feature_aggregators_for_dataset(
dataset: "Dataset", columns: Optional[List[str]] = None
) -> FeatureAggregators:
"""Generate aggregators for all columns in a dataset.
Args:
dataset: A Ray Dataset instance
columns: A list of columns to include in the summary. If None, all columns will be included.
Returns:
FeatureAggregators containing categorized column names and their aggregators
"""
schema = dataset.schema()
if not schema:
raise ValueError("Dataset must have a schema to determine numerical columns")
if columns is None:
columns = schema.names
# Validate columns exist in schema
missing_cols = set(columns) - set(schema.names)
if missing_cols:
raise ValueError(f"Columns {missing_cols} not found in dataset schema")
# Categorize columns and build aggregators
numerical_columns = []
str_columns = []
vector_columns = []
all_aggs = []
# Get column types - Ray's Schema provides names and types as lists
column_names = schema.names
column_types = schema.types
# Create a mapping of column names to types
name_to_type = dict(zip(column_names, column_types))
for name in columns:
if name not in name_to_type:
continue
ftype = name_to_type[name]
if not isinstance(ftype, pa.DataType):
logger.warning(
f"Skipping field {name}: type {ftype} is not a PyArrow DataType"
)
continue
# Check for numerical types (including boolean as numerical)
if (
pa.types.is_integer(ftype)
or pa.types.is_floating(ftype)
or pa.types.is_decimal(ftype)
or pa.types.is_boolean(ftype)
):
numerical_columns.append(name)
all_aggs.extend(numerical_aggregators(name))
elif pa.types.is_string(ftype):
str_columns.append(name)
all_aggs.extend(categorical_aggregators(name))
elif pa.types.is_list(ftype):
vector_columns.append(name)
all_aggs.extend(vector_aggregators(name))
else:
logger.warning(f"Skipping field {name}: type {ftype} not supported")
return FeatureAggregators(
numerical_columns=numerical_columns,
str_columns=str_columns,
vector_columns=vector_columns,
aggregators=all_aggs,
)
|
FeatureAggregators
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/bijector_test.py
|
{
"start": 1084,
"end": 3057
}
|
class ____(test.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
# In Python 3.9, "abstract methods" become "abstract method"
with self.assertRaisesRegex(TypeError,
r"Can't instantiate abstract class Bijector "
r"with.* abstract method '?__init__'?"):
bijector.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(bijector.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
super().__init__(forward_min_event_ndims=0)
bij = _BareBonesBijector()
self.assertEqual([], bij.graph_parents)
self.assertEqual(False, bij.is_constant_jacobian)
self.assertEqual(False, bij.validate_args)
self.assertEqual(None, bij.dtype)
self.assertEqual("bare_bones_bijector", bij.name)
for shape in [[], [1, 2], [1, 2, 3]]:
forward_event_shape_ = self.evaluate(
bij.inverse_event_shape_tensor(shape))
inverse_event_shape_ = self.evaluate(
bij.forward_event_shape_tensor(shape))
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
with self.assertRaisesRegex(NotImplementedError, "inverse not implemented"):
bij.inverse(0)
with self.assertRaisesRegex(NotImplementedError, "forward not implemented"):
bij.forward(0)
with self.assertRaisesRegex(NotImplementedError,
"inverse_log_det_jacobian not implemented"):
bij.inverse_log_det_jacobian(0, event_ndims=0)
with self.assertRaisesRegex(NotImplementedError,
"forward_log_det_jacobian not implemented"):
bij.forward_log_det_jacobian(0, event_ndims=0)
|
BaseBijectorTest
|
python
|
google__pytype
|
pytype/rewrite/load_abstract_test.py
|
{
"start": 1851,
"end": 2478
}
|
class ____(test_utils.ContextfulTestBase):
def test_builtin_type(self):
t = self.ctx.abstract_loader.load_raw_type(int)
self.assertIsInstance(t, abstract.SimpleClass)
self.assertEqual(t.name, 'int')
self.assertEqual(t.module, 'builtins')
def test_stdlib_type(self):
t = self.ctx.abstract_loader.load_raw_type(numbers.Number)
self.assertIsInstance(t, abstract.SimpleClass)
self.assertEqual(t.name, 'Number')
self.assertEqual(t.module, 'numbers')
def test_nonetype(self):
t = self.ctx.abstract_loader.load_raw_type(type(None))
self.assertIs(t, self.ctx.consts[None])
|
LoadRawTypeTest
|
python
|
nedbat__coveragepy
|
tests/test_api.py
|
{
"start": 40067,
"end": 41557
}
|
class ____(CoverageTest):
"""Test the numerical analysis of results."""
def test_many_missing_branches(self) -> None:
cov = coverage.Coverage(branch=True)
self.make_file(
"missing.py",
"""\
def fun1(x):
if x == 1:
print("one")
else:
print("not one")
print("done") # pragma: nocover
def fun2(x):
if x:
print("x")
else:
print("not x")
fun2(3)
""",
)
# Import the Python file, executing it.
self.start_import_stop(cov, "missing")
nums = cov._analyze("missing.py").numbers
assert nums.n_files == 1
assert nums.n_statements == 9
assert nums.n_excluded == 1
assert nums.n_missing == 4
assert nums.n_branches == 4
assert nums.n_partial_branches == 1
assert nums.n_missing_branches == 3
filename, statements, excluded, missing, missing_formatted = cov.analysis2("missing.py")
assert os.path.relpath(filename) == "missing.py"
assert statements == [1, 2, 3, 5, 8, 9, 10, 12, 14]
assert excluded == [6]
assert missing == [2, 3, 5, 12]
assert missing_formatted == "2-5, 12"
branch_stats = cov.branch_stats("missing.py")
assert branch_stats == {2: (2, 0), 9: (2, 1)}
|
AnalysisTest
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 856496,
"end": 877885
}
|
class ____(FieldChannelMixin, core.TimeDef):
r"""
Time schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
rescale : bool
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "time"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Time: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> Time: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> Time: ...
@overload
def bandPosition(self, _: float, /) -> Time: ...
@overload
def bin(self, _: bool | Bin | None, /) -> Time: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Time: ...
@overload
def field(self, _: str | RepeatRef, /) -> Time: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Time: ...
@overload
def rescale(self, _: bool, /) -> Time: ...
@overload
def scale(self, _: Scale | None, /) -> Time: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> Time: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> Time: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Time: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Time: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Time: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Time: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Time: ...
@overload
def type(self, _: StandardType_T, /) -> Time: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
rescale: Optional[bool] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
rescale=rescale,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
|
Time
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.