language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver37.py | {
"start": 144,
"end": 409
} | class ____(Generic[A]): ...
def func1(x: A) -> A: ...
def func2(x: Gen[A], y: A) -> Gen[Gen[A]]: ...
def func3(x: Gen[Gen[A]]) -> Gen[A]:
return func4(x, func1, func2)
def func4(x: Gen[A], id_: Callable[[B], B], step: Callable[[A, B], Gen[A]]) -> A: ...
| Gen |
python | walkccc__LeetCode | solutions/3185. Count Pairs That Form a Complete Day II/3185.py | {
"start": 0,
"end": 276
} | class ____:
# Same as 3184. Count Pairs That Form a Complete Day I
def countCompleteDayPairs(self, hours: list[int]) -> int:
ans = 0
count = [0] * 24
for hour in hours:
ans += count[(24 - hour % 24) % 24]
count[hour % 24] += 1
return ans
| Solution |
python | pytorch__pytorch | test/export/test_passes.py | {
"start": 2403,
"end": 3047
} | class ____(OperatorSupport):
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
return node.op == "call_function" and node.target in {torch.ops.aten.add.Tensor}
def _to_partition_names(partitions: list[Partition]) -> list[set[str]]:
return [{n.name for n in p.nodes} for p in partitions]
def _get_output_names(gm: torch.fx.GraphModule) -> list[str]:
output_node = next(n for n in gm.graph.nodes if n.op == "output")
args = pytree.tree_leaves(output_node.args)
# if isinstance(args, tuple) and len(args) == 1:
# args = args[0]
return [str(arg) for arg in args]
| _AtenAddOperatorSupport |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 14492,
"end": 16297
} | class ____(TestCase, RowDeprecationTestMixin):
def setUp(self):
self.value = 11.111
self.widget = widgets.FloatWidget()
self.widget_coerce_to_string = widgets.FloatWidget(coerce_to_string=True)
def test_clean(self):
self.assertEqual(self.widget.clean(11.111), self.value)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators(self):
self.assertEqual(self.widget.clean("1,234.5"), 1234.5)
@override_settings(LANGUAGE_CODE="ar", USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators_arabic(self):
self.assertEqual(self.widget.clean("1.234,5"), 1234.5)
@override_settings(LANGUAGE_CODE="zh-hans", USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators_chinese_simplified(self):
self.assertEqual(self.widget.clean("1234.5"), 1234.5)
@override_settings(LANGUAGE_CODE="fr", USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators_french(self):
self.assertEqual(self.widget.clean("1\xa0234,5"), 1234.5)
def test_render(self):
self.assertEqual(self.widget.render(self.value), "11.111")
def test_render_invalid_type(self):
self.assertEqual(self.widget.render("a"), "")
def test_clean_string_zero(self):
self.assertEqual(self.widget.clean("0"), 0.0)
self.assertEqual(self.widget.clean("0.0"), 0.0)
def test_clean_empty_string(self):
self.assertEqual(self.widget.clean(""), None)
self.assertEqual(self.widget.clean(" "), None)
self.assertEqual(self.widget.clean("\r\n\t"), None)
@override_settings(LANGUAGE_CODE="fr-fr")
def test_locale_render_coerce_to_string_gte4(self):
self.assertEqual(self.widget_coerce_to_string.render(self.value), "11,111")
| FloatWidgetTest |
python | django__django | django/utils/xmlutils.py | {
"start": 158,
"end": 1172
} | class ____(XMLGenerator):
def addQuickElement(self, name, contents=None, attrs=None):
"Convenience method for adding an element with no children"
if attrs is None:
attrs = {}
self.startElement(name, attrs)
if contents is not None:
self.characters(contents)
self.endElement(name)
def characters(self, content):
if content and re.search(r"[\x00-\x08\x0B-\x0C\x0E-\x1F]", content):
# Fail loudly when content has control chars (unsupported in XML
# 1.0) See https://www.w3.org/International/questions/qa-controls
raise UnserializableContentError(
"Control characters are not supported in XML 1.0"
)
XMLGenerator.characters(self, content)
def startElement(self, name, attrs):
# Sort attrs for a deterministic output.
sorted_attrs = dict(sorted(attrs.items())) if attrs else attrs
super().startElement(name, sorted_attrs)
| SimplerXMLGenerator |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/auto_width_input.py | {
"start": 95,
"end": 439
} | class ____(App[None]):
CSS = """
Input.auto {
width: auto;
max-width: 100%;
}
"""
def compose(self) -> ComposeResult:
yield Header()
yield Input(placeholder="This has auto width", classes="auto")
yield Footer()
if __name__ == "__main__":
InputWidthAutoApp().run()
| InputWidthAutoApp |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels20.py | {
"start": 315,
"end": 1472
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels20.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [45740032, 45743104]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {"value": 1, "legend_key": True},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/plugins/providers/base.py | {
"start": 595,
"end": 3045
} | class ____:
auth_provider: str | None = None
logger = logging.getLogger(__name__)
def link_auth(self, user, organization, data):
usa = usersocialauth_service.get_one_or_none(
filter={
"id": data["default_auth_id"],
"user_id": user.id,
"provider": self.auth_provider,
}
)
if not usa:
raise PluginError
rpc_organization = serialize_rpc_organization(org=organization)
usersocialauth_service.link_auth(usa=usa, organization=rpc_organization)
def get_auth(self, user: RpcUser | User, **kwargs) -> RpcUserSocialAuth | None:
if self.auth_provider is None:
return None
organization = kwargs.get("organization")
if organization:
ois = integration_service.get_organization_integrations(
providers=[self.auth_provider], organization_id=organization.id
)
if len(ois) > 0 and ois[0].default_auth_id is not None:
auth = usersocialauth_service.get_one_or_none(filter={"id": ois[0].default_auth_id})
if auth:
return auth
if not user.is_authenticated:
return None
return usersocialauth_service.get_one_or_none(
filter={"user_id": user.id, "provider": self.auth_provider}
)
def handle_api_error(self, e: Exception) -> Response:
context: dict[str, object] = {"error_type": "unknown"}
if isinstance(e, InvalidIdentity):
if self.auth_provider is None:
context.update(
{
"message": "Your authentication credentials are invalid. Please check your project settings."
}
)
else:
context.update(
{
"error_type": "auth",
"auth_url": reverse("socialauth_associate", args=[self.auth_provider]),
}
)
status = 400
elif isinstance(e, PluginError):
# TODO(dcramer): we should have a proper validation error
context.update({"error_type": "validation", "errors": {"__all__": str(e)}})
status = 400
else:
self.logger.exception(str(e))
status = 500
return Response(context, status=status)
| ProviderMixin |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/sensors/test_spark_kubernetes.py | {
"start": 20522,
"end": 33567
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(conn_id="kubernetes_default", conn_type="kubernetes", extra=json.dumps({}))
)
create_connection_without_db(
Connection(
conn_id="kubernetes_with_namespace",
conn_type="kubernetes",
extra=json.dumps({"namespace": "mock_namespace"}),
)
)
args = {"owner": "airflow", "start_date": timezone.datetime(2020, 2, 1)}
self.dag = DAG("test_dag_id", schedule=None, default_args=args)
def test_init(self, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(task_id="task", application_name="application")
assert sensor.task_id == "task"
assert sensor.application_name == "application"
assert sensor.attach_log is False
assert sensor.namespace is None
assert sensor.container_name == "spark-kubernetes-driver"
assert sensor.kubernetes_conn_id == "kubernetes_default"
assert sensor.api_group == "sparkoperator.k8s.io"
assert sensor.api_version == "v1beta2"
assert "hook" not in sensor.__dict__ # Cached property has not been accessed as part of construction.
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_completed_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_FAILED_APPLICATION,
)
def test_failed_application(
self,
mock_get_namespaced_crd,
mock_kubernetes_hook,
):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
with pytest.raises(AirflowException):
sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_NOT_PROCESSED_APPLICATION,
)
def test_not_processed_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_NEW_APPLICATION,
)
def test_new_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_RUNNING_APPLICATION,
)
def test_running_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_SUBMITTED_APPLICATION,
)
def test_submitted_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_PENDING_RERUN_APPLICATION,
)
def test_pending_rerun_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_UNKNOWN_APPLICATION,
)
def test_unknown_application(
self,
mock_get_namespaced_crd,
mock_kubernetes_hook,
):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
with pytest.raises(AirflowException):
sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_namespace_from_sensor(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
namespace="sensor_namespace",
task_id="test_task_id",
)
sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="sensor_namespace",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_api_group_and_version_from_sensor(self, mock_get_namespaced_crd, mock_kubernetes_hook):
api_group = "sparkoperator.example.com"
api_version = "v1alpha1"
sensor = SparkKubernetesSensor(
application_name="spark_pi",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
task_id="test_task_id",
api_group=api_group,
api_version=api_version,
)
sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group=api_group,
name="spark_pi",
namespace="mock_namespace",
plural="sparkapplications",
version=api_version,
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_namespace_from_connection(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
task_id="test_task_id",
)
sensor.poke({})
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="mock_namespace",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_FAILED_APPLICATION,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
def test_driver_logging_failure(
self,
mock_log_call,
mock_get_namespaced_crd,
mock_kube_conn,
caplog,
):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
with pytest.raises(AirflowException):
sensor.poke({})
mock_log_call.assert_called_once_with(
"spark-pi-driver", namespace="default", container="spark-kubernetes-driver"
)
assert (ANY, logging.ERROR, TEST_POD_LOG_RESULT) in caplog.record_tuples
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
def test_driver_logging_completed(self, mock_log_call, mock_get_namespaced_crd, mock_kube_conn, caplog):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke({})
mock_log_call.assert_called_once_with(
"spark-pi-2020-02-24-1-driver", namespace="default", container="spark-kubernetes-driver"
)
assert (ANY, logging.INFO, TEST_POD_LOG_RESULT) in caplog.record_tuples
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
side_effect=ApiException("Test api exception"),
)
def test_driver_logging_error(self, mock_log_call, mock_get_namespaced_crd, mock_kube_conn, caplog):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke({})
assert (ANY, logging.WARNING, ANY) in caplog.record_tuples, "Expected something logged at warning"
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_DRIVER_WITH_SIDECAR_APPLICATION,
)
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
def test_sidecar_driver_logging_completed(
self, mock_log_call, mock_get_namespaced_crd, mock_kube_conn, caplog
):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke({})
mock_log_call.assert_called_once_with(
"spark-pi-2020-02-24-1-driver", namespace="default", container="spark-kubernetes-driver"
)
assert (ANY, logging.INFO, TEST_POD_LOG_RESULT) in caplog.record_tuples
| TestSparkKubernetesSensor |
python | mlflow__mlflow | dev/update_ml_package_versions.py | {
"start": 901,
"end": 11032
} | class ____:
version: str
upload_time: datetime
def get_package_version_infos(package_name: str) -> list[VersionInfo]:
url = f"https://pypi.python.org/pypi/{package_name}/json"
for _ in range(5): # Retry up to 5 times
try:
with urllib.request.urlopen(url) as res:
data = json.load(res)
except ConnectionResetError as e:
sys.stderr.write(f"Retrying {url} due to {e}\n")
time.sleep(1)
else:
break
else:
raise Exception(f"Failed to fetch {url}")
def is_dev_or_pre_release(version_str):
v = Version(version_str)
return v.is_devrelease or v.is_prerelease
return [
VersionInfo(
version=version,
upload_time=datetime.fromisoformat(dist_files[0]["upload_time"]),
)
for version, dist_files in data["releases"].items()
if (
len(dist_files) > 0
and not is_dev_or_pre_release(version)
and not any(uploaded_recently(dist) for dist in dist_files)
and not any(dist.get("yanked", False) for dist in dist_files)
)
]
def get_latest_version(candidates):
return max(candidates, key=Version)
def update_version(src, key, new_version, category, update_max):
"""
Examples
========
>>> src = '''
... sklearn:
... ...
... models:
... minimum: "0.0.0"
... maximum: "0.0.0"
... xgboost:
... ...
... autologging:
... minimum: "1.1.1"
... maximum: "1.1.1"
... '''.strip()
>>> new_src = update_version(src, "sklearn", "0.1.0", "models", update_max=True)
>>> new_src = update_version(new_src, "xgboost", "1.2.1", "autologging", update_max=True)
>>> print(new_src)
sklearn:
...
models:
minimum: "0.0.0"
maximum: "0.1.0"
xgboost:
...
autologging:
minimum: "1.1.1"
maximum: "1.2.1"
"""
match = "maximum" if update_max else "minimum"
pattern = r"((^|\n){key}:.+?{category}:.+?{match}: )\".+?\"".format(
key=re.escape(key), category=category, match=match
)
# Matches the following pattern:
#
# <key>:
# ...
# <category>:
# ...
# maximum: "1.2.3"
return re.sub(pattern, rf'\g<1>"{new_version}"', src, flags=re.DOTALL)
def extract_field(d, keys):
for key in keys:
if key in d:
d = d[key]
else:
return None
return d
def _get_autolog_flavor_module_map(config):
"""
Parse _ML_PACKAGE_VERSIONS to get the mapping of flavor name to
the module name to be imported for autologging.
"""
autolog_flavor_module_map = {}
for flavor, config in config.items():
if "autologging" not in config:
continue
module_name = config["package_info"].get("module_name", flavor)
autolog_flavor_module_map[flavor] = module_name
return autolog_flavor_module_map
def update_ml_package_versions_py(config_path):
with open(config_path) as f:
genai_config = {}
non_genai_config = {}
for name, cfg in yaml.load(f, Loader=yaml.SafeLoader).items():
# Extract required fields
pip_release = extract_field(cfg, ("package_info", "pip_release"))
module_name = extract_field(cfg, ("package_info", "module_name"))
min_version = extract_field(cfg, ("models", "minimum"))
max_version = extract_field(cfg, ("models", "maximum"))
genai = extract_field(cfg, ("package_info", "genai"))
config_to_update = genai_config if genai else non_genai_config
if min_version:
config_to_update[name] = {
"package_info": {
"pip_release": pip_release,
},
"models": {
"minimum": min_version,
"maximum": max_version,
},
}
else:
config_to_update[name] = {
"package_info": {
"pip_release": pip_release,
}
}
if module_name:
config_to_update[name]["package_info"]["module_name"] = module_name
# Check for autologging configuration
autolog_min_version = extract_field(cfg, ("autologging", "minimum"))
autolog_max_version = extract_field(cfg, ("autologging", "maximum"))
if (pip_release, autolog_min_version, autolog_max_version).count(None) > 0:
continue
config_to_update[name].update(
{
"autologging": {
"minimum": autolog_min_version,
"maximum": autolog_max_version,
}
},
)
genai_flavor_module_mapping = _get_autolog_flavor_module_map(genai_config)
# We have "langgraph" entry in ml-package-versions.yml so that we can run test
# against multiple versions of langgraph. However, we don't have a flavor for
# langgraph and it is a part of the langchain flavor.
genai_flavor_module_mapping.pop("langgraph", None)
non_genai_flavor_module_mapping = _get_autolog_flavor_module_map(non_genai_config)
# Add special case for pyspark.ml (non-GenAI)
non_genai_flavor_module_mapping["pyspark.ml"] = "pyspark"
this_file = Path(__file__).name
dst = Path("mlflow", "ml_package_versions.py")
config_str = json.dumps(genai_config | non_genai_config, indent=4)
Path(dst).write_text(
f"""\
# This file was auto-generated by {this_file}.
# Please do not edit it manually.
_ML_PACKAGE_VERSIONS = {config_str}
# A mapping of flavor name to the module name to be imported for autologging.
# This is used for checking version compatibility in autologging.
# DO NOT EDIT MANUALLY
# GenAI packages
GENAI_FLAVOR_TO_MODULE_NAME = {json.dumps(genai_flavor_module_mapping, indent=4)}
# Non-GenAI packages
NON_GENAI_FLAVOR_TO_MODULE_NAME = {json.dumps(non_genai_flavor_module_mapping, indent=4)}
# Combined mapping for backward compatibility
FLAVOR_TO_MODULE_NAME = NON_GENAI_FLAVOR_TO_MODULE_NAME | GENAI_FLAVOR_TO_MODULE_NAME
"""
)
def parse_args():
parser = argparse.ArgumentParser(description="Update MLflow package versions")
parser.add_argument(
"--skip-yml", action="store_true", help="Skip updating ml-package-versions.yml"
)
return parser.parse_args()
def get_min_supported_version(versions_infos: list[VersionInfo], genai: bool = False) -> str | None:
"""
Get the minimum version that is released within the past two years
"""
years = 1 if genai else 2
min_support_date = datetime.now() - timedelta(days=years * 365)
min_support_date = min_support_date.replace(tzinfo=None)
# Extract versions that were released in the past two years
recent_versions = [v for v in versions_infos if v.upload_time > min_support_date]
if not recent_versions:
return None
# Get minimum version according to upload date
return min(recent_versions, key=lambda v: v.upload_time).version
def update(skip_yml=False):
yml_path = "mlflow/ml-package-versions.yml"
if not skip_yml:
old_src = read_file(yml_path)
new_src = old_src
config_dict = yaml.load(old_src, Loader=yaml.SafeLoader)
for flavor_key, config in config_dict.items():
# We currently don't have bandwidth to support newer versions of these flavors.
if flavor_key in ["litellm"]:
continue
package_name = config["package_info"]["pip_release"]
genai = config["package_info"].get("genai", False)
versions_and_upload_times = get_package_version_infos(package_name)
min_supported_version = get_min_supported_version(
versions_and_upload_times, genai=genai
)
for category in ["autologging", "models"]:
print("Processing", flavor_key, category)
if category in config and "minimum" in config[category]:
old_min_version = config[category]["minimum"]
if flavor_key == "spark":
# We should support pyspark versions that are older than the cut off date.
pass
elif min_supported_version is None:
# The latest release version was 2 years ago.
# set the min version to be the same with the max version.
max_ver = config[category]["maximum"]
new_src = update_version(
new_src, flavor_key, max_ver, category, update_max=False
)
elif Version(min_supported_version) > Version(old_min_version):
new_src = update_version(
new_src, flavor_key, min_supported_version, category, update_max=False
)
if (category not in config) or config[category].get("pin_maximum", False):
continue
max_ver = config[category]["maximum"]
versions = [v.version for v in versions_and_upload_times]
unsupported = config[category].get("unsupported", [])
versions = set(versions).difference(unsupported) # exclude unsupported versions
latest_version = get_latest_version(versions)
if Version(latest_version) <= Version(max_ver):
continue
new_src = update_version(
new_src, flavor_key, latest_version, category, update_max=True
)
save_file(new_src, yml_path)
update_ml_package_versions_py(yml_path)
def main():
args = parse_args()
update(args.skip_yml)
if __name__ == "__main__":
main()
| VersionInfo |
python | rq__rq | tests/test_cli.py | {
"start": 34857,
"end": 38688
} | class ____(CLITestCase):
"""Tests the `rq cron` CLI command."""
def setUp(self):
# Call parent setUp first to initialize self.connection, self.redis_url etc.
super().setUp()
# Path to the existing cron config file
current_dir = os.path.dirname(__file__)
self.cron_config_path = os.path.abspath(os.path.join(current_dir, 'cron_config.py'))
self.assertTrue(os.path.exists(self.cron_config_path), f'Config file not found at {self.cron_config_path}')
def test_cron_execution(self):
"""rq cron <config_path> -u <url>"""
runner = CliRunner()
mock_cron = MagicMock()
# Mock the Cron class instead of load_config
with patch('rq.cli.cli_cron.CronScheduler', return_value=mock_cron) as mock_cron_class:
# Make the start method just return to avoid infinite loop
mock_cron.start.side_effect = lambda: None
result = runner.invoke(main, ['cron', self.cron_config_path, '-u', self.redis_url])
self.assert_normal_execution(result)
# Verify Cron was constructed with correct parameters
mock_cron_class.assert_called_once()
# Verify load_config_from_file was called with correct path
mock_cron.load_config_from_file.assert_called_once_with(self.cron_config_path)
# Verify start was called
mock_cron.start.assert_called_once()
def test_cron_execution_log_level(self):
"""rq cron <config_path> -u <url> --logging-level DEBUG"""
runner = CliRunner()
mock_cron = MagicMock()
# Mock the Cron class
with patch('rq.cli.cli_cron.CronScheduler', return_value=mock_cron) as mock_cron_class:
mock_cron.start.side_effect = lambda: None
result = runner.invoke(
main, ['cron', '--logging-level', 'DEBUG', self.cron_config_path, '-u', self.redis_url]
)
self.assert_normal_execution(result)
# Verify Cron was constructed with correct parameters
mock_cron_class.assert_called_once()
# Verify all logging parameters
call_kwargs = mock_cron_class.call_args[1]
self.assertEqual(call_kwargs['logging_level'], 'DEBUG')
# Verify config loading and start were called
mock_cron.load_config_from_file.assert_called_once_with(self.cron_config_path)
mock_cron.start.assert_called_once()
def test_cron_execution_with_url(self):
"""Verify that the Redis URL (-u option) is correctly parsed and used"""
runner = CliRunner()
mock_cron = MagicMock()
# Use a distinctive non-default URL with different host, port, and DB
test_url = 'redis://test-host:7777/5'
with patch('rq.cli.cli_cron.CronScheduler', return_value=mock_cron) as mock_cron_class:
mock_cron.start.side_effect = lambda: None
result = runner.invoke(main, ['cron', self.cron_config_path, '-u', test_url])
self.assert_normal_execution(result)
# Verify that Cron was called with a connection from the provided URL
mock_cron_class.assert_called_once()
connection = mock_cron_class.call_args[1]['connection']
# Verify all connection parameters match our custom URL
connection_kwargs = connection.connection_pool.connection_kwargs
self.assertEqual(connection_kwargs['host'], 'test-host')
self.assertEqual(connection_kwargs['port'], 7777)
self.assertEqual(connection_kwargs['db'], 5)
# Verify config loading and start were called
mock_cron.load_config_from_file.assert_called_once_with(self.cron_config_path)
mock_cron.start.assert_called_once()
| CronCLITestCase |
python | realpython__materials | inheritance-and-composition/inheritance/disgruntled.py | {
"start": 0,
"end": 166
} | class ____:
def __init__(self, id, name):
self.id = id
self.name = name
def calculate_payroll(self):
return 1_000_000
| DisgruntledEmployee |
python | realpython__materials | python-basic-data-types/point.py | {
"start": 0,
"end": 267
} | class ____:
def __init__(self, x, y):
self.x = x
self.y = y
def __bool__(self):
if self.x == self.y == 0:
return False
return True
origin = Point(0, 0)
print(bool(origin))
point = Point(2, 4)
print(bool(point))
| Point |
python | astropy__astropy | astropy/coordinates/baseframe.py | {
"start": 15069,
"end": 91942
} | class ____(MaskableShapedLikeNDArray):
"""
The base class for coordinate frames.
This class is intended to be subclassed to create instances of specific
systems. Subclasses can implement the following attributes:
* `default_representation`
A subclass of `~astropy.coordinates.BaseRepresentation` that will be
treated as the default representation of this frame. This is the
representation assumed by default when the frame is created.
* `default_differential`
A subclass of `~astropy.coordinates.BaseDifferential` that will be
treated as the default differential class of this frame. This is the
differential class assumed by default when the frame is created.
* `~astropy.coordinates.Attribute` class attributes
Frame attributes such as ``FK4.equinox`` or ``FK4.obstime`` are defined
using a descriptor class. See the narrative documentation or
built-in classes code for details.
* `frame_specific_representation_info`
A dictionary mapping the name or class of a representation to a list of
`~astropy.coordinates.RepresentationMapping` objects that tell what
names and default units should be used on this frame for the components
of that representation.
Unless overridden via `frame_specific_representation_info`, velocity name
defaults are:
* ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for `~astropy.coordinates.SphericalCosLatDifferential` velocity components
* ``pm_{lon}``, ``pm_{lat}`` for `~astropy.coordinates.SphericalDifferential` velocity components
* ``radial_velocity`` for any ``d_distance`` component
* ``v_{x,y,z}`` for `~astropy.coordinates.CartesianDifferential` velocity components
where ``{lon}`` and ``{lat}`` are the frame names of the angular components.
"""
default_representation = None
default_differential = None
# Specifies special names and units for representation and differential
# attributes.
frame_specific_representation_info = {}
frame_attributes = {}
# Default empty frame_attributes dict
# Declare that BaseCoordinateFrame can be used as a Table column by defining
# the info property.
info = CoordinateFrameInfo()
def __init_subclass__(cls, **kwargs):
# We first check for explicitly set values for these:
default_repr = getattr(cls, "default_representation", None)
default_diff = getattr(cls, "default_differential", None)
repr_info = getattr(cls, "frame_specific_representation_info", None)
# Then, to make sure this works for subclasses-of-subclasses, we also
# have to check for cases where the attribute names have already been
# replaced by underscore-prefaced equivalents by the logic below:
if default_repr is None or isinstance(default_repr, property):
default_repr = getattr(cls, "_default_representation", None)
if default_diff is None or isinstance(default_diff, property):
default_diff = getattr(cls, "_default_differential", None)
if repr_info is None or isinstance(repr_info, property):
repr_info = getattr(cls, "_frame_specific_representation_info", None)
repr_info = cls._infer_repr_info(repr_info)
# Make read-only properties for the frame class attributes that should
# be read-only to make them immutable after creation.
# We copy attributes instead of linking to make sure there's no
# accidental cross-talk between classes
cls._create_readonly_property(
"default_representation",
default_repr,
"Default representation for position data",
)
cls._create_readonly_property(
"default_differential",
default_diff,
"Default representation for differential data (e.g., velocity)",
)
cls._create_readonly_property(
"frame_specific_representation_info",
copy.deepcopy(repr_info),
"Mapping for frame-specific component names",
)
# Set the frame attributes. We first construct the attributes from
# superclasses, going in reverse order to keep insertion order,
# and then add any attributes from the frame now being defined
# (if any old definitions are overridden, this keeps the order).
# Note that we cannot simply start with the inherited frame_attributes
# since we could be a mixin between multiple coordinate frames.
# TODO: Should this be made to use readonly_prop_factory as well or
# would it be inconvenient for getting the frame_attributes from
# classes?
frame_attrs = {}
for basecls in reversed(cls.__bases__):
if issubclass(basecls, BaseCoordinateFrame):
frame_attrs.update(basecls.frame_attributes)
frame_attrs |= {k: v for k, v in vars(cls).items() if isinstance(v, Attribute)}
cls.frame_attributes = frame_attrs
# Deal with setting the name of the frame:
if not hasattr(cls, "name"):
cls.name = cls.__name__.lower()
elif BaseCoordinateFrame not in cls.__bases__ and cls.name in [
getattr(base, "name", None) for base in cls.__bases__
]:
# This may be a subclass of a subclass of BaseCoordinateFrame,
# like ICRS(BaseRADecFrame). In this case, cls.name will have been
# set by init_subclass
cls.name = cls.__name__.lower()
# A cache that *must be unique to each frame class* - it is
# insufficient to share them with superclasses, hence the need to put
# them in the meta
cls._frame_class_cache = {}
super().__init_subclass__(**kwargs)
# call this once here to initialize defaults
# (via FrameAttribute.__get__/convert_input)
cls.get_frame_attr_defaults()
def __init__(
self,
*args,
copy=True,
representation_type=None,
differential_type=None,
**kwargs,
):
self._attr_names_with_defaults = []
self._representation = self._infer_representation(
representation_type, differential_type
)
data = self._infer_data(args, copy, kwargs) # possibly None.
shapes = [] if data is None else [data.shape]
# Set frame attributes, if any.
# Keep track of their shapes, but do not broadcast them yet.
values = {}
for fnm, fdefault in self.get_frame_attr_defaults().items():
# Read-only frame attributes are defined as FrameAttribute
# descriptors which are not settable, so set 'real' attributes as
# the name prefaced with an underscore.
if fnm in kwargs:
value = kwargs.pop(fnm)
setattr(self, "_" + fnm, value)
# Validate attribute by getting it.
values[fnm] = value = getattr(self, fnm)
shapes.append(getattr(value, "shape", ()))
else:
setattr(self, "_" + fnm, fdefault)
self._attr_names_with_defaults.append(fnm)
if kwargs:
raise TypeError(
f"Coordinate frame {self.__class__.__name__} got unexpected "
f"keywords: {list(kwargs)}"
)
# Determine the overall shape of the frame.
try:
self._shape = np.broadcast_shapes(*shapes)
except ValueError as exc:
exc.add_note(
f"non-scalar data and/or attributes with inconsistent shapes: {shapes}",
)
raise exc
# Broadcast the data if necessary and set it
if data is not None and data.shape != self._shape:
try:
# if broadcasting isn't strictly needed, avoid it
# see https://github.com/astropy/astropy/issues/16219
data = data.reshape(self._shape)
except Exception:
data = data._apply(np.broadcast_to, shape=self._shape, subok=True)
if copy:
data = data.copy()
self._data = data
# Broadcast the attributes if necessary by getting them again
# (we now know the shapes will be OK).
for key in values:
getattr(self, key)
# The logic of this block is not related to the previous one
if self.has_data:
# This makes the cache keys backwards-compatible, but also adds
# support for having differentials attached to the frame data
# representation object.
if "s" in self._data.differentials:
# TODO: assumes a velocity unit differential
key = (
self._data.__class__.__name__,
self._data.differentials["s"].__class__.__name__,
False,
)
else:
key = (self._data.__class__.__name__, False)
# Set up representation cache.
self.cache["representation"][key] = self._data
def _infer_representation(self, representation_type, differential_type):
if representation_type is None and differential_type is None:
return {"base": self.default_representation, "s": self.default_differential}
if representation_type is None:
representation_type = self.default_representation
if isinstance(differential_type, type) and issubclass(
differential_type, r.BaseDifferential
):
# TODO: assumes the differential class is for the velocity
# differential
differential_type = {"s": differential_type}
elif isinstance(differential_type, str):
# TODO: assumes the differential class is for the velocity
# differential
diff_cls = r.DIFFERENTIAL_CLASSES[differential_type]
differential_type = {"s": diff_cls}
elif differential_type is None:
if representation_type == self.default_representation:
differential_type = {"s": self.default_differential}
else:
differential_type = {"s": "base"} # see set_representation_cls()
return _get_repr_classes(representation_type, **differential_type)
def _infer_data(self, args, copy, kwargs):
# if not set below, this is a frame with no data
representation_data = None
differential_data = None
args = list(args) # need to be able to pop them
if args and (isinstance(args[0], r.BaseRepresentation) or args[0] is None):
representation_data = args.pop(0) # This can still be None
if len(args) > 0:
raise TypeError(
"Cannot create a frame with both a representation object "
"and other positional arguments"
)
if representation_data is not None:
diffs = representation_data.differentials
differential_data = diffs.get("s", None)
if (differential_data is None and len(diffs) > 0) or (
differential_data is not None and len(diffs) > 1
):
raise ValueError(
"Multiple differentials are associated with the representation"
" object passed in to the frame initializer. Only a single"
f" velocity differential is supported. Got: {diffs}"
)
else:
representation_cls = self.get_representation_cls()
# Get any representation data passed in to the frame initializer
# using keyword or positional arguments for the component names
repr_kwargs = {}
for nmkw, nmrep in self.representation_component_names.items():
if len(args) > 0:
# first gather up positional args
repr_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
repr_kwargs[nmrep] = kwargs.pop(nmkw)
# special-case the Spherical->UnitSpherical if no `distance`
if repr_kwargs:
# TODO: determine how to get rid of the part before the "try" -
# currently removing it has a performance regression for
# unitspherical because of the try-related overhead.
# Also frames have no way to indicate what the "distance" is
if repr_kwargs.get("distance", True) is None:
del repr_kwargs["distance"]
if (
issubclass(representation_cls, r.SphericalRepresentation)
and "distance" not in repr_kwargs
):
representation_cls = representation_cls._unit_representation
try:
representation_data = representation_cls(copy=copy, **repr_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
try:
representation_data = representation_cls._unit_representation(
copy=copy, **repr_kwargs
)
except Exception:
msg = str(e)
names = self.get_representation_component_names()
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise e
# Now we handle the Differential data:
# Get any differential data passed in to the frame initializer
# using keyword or positional arguments for the component names
differential_cls = self.get_representation_cls("s")
diff_component_names = self.get_representation_component_names("s")
diff_kwargs = {}
for nmkw, nmrep in diff_component_names.items():
if len(args) > 0:
# first gather up positional args
diff_kwargs[nmrep] = args.pop(0)
elif nmkw in kwargs:
diff_kwargs[nmrep] = kwargs.pop(nmkw)
if diff_kwargs:
if (
hasattr(differential_cls, "_unit_differential")
and "d_distance" not in diff_kwargs
):
differential_cls = differential_cls._unit_differential
elif len(diff_kwargs) == 1 and "d_distance" in diff_kwargs:
differential_cls = r.RadialDifferential
try:
differential_data = differential_cls(copy=copy, **diff_kwargs)
except TypeError as e:
# this except clause is here to make the names of the
# attributes more human-readable. Without this the names
# come from the representation instead of the frame's
# attribute names.
msg = str(e)
names = self.get_representation_component_names("s")
for frame_name, repr_name in names.items():
msg = msg.replace(repr_name, frame_name)
msg = msg.replace("__init__()", f"{self.__class__.__name__}()")
e.args = (msg,)
raise
if len(args) > 0:
raise TypeError(
f"{type(self).__name__}.__init__ had {len(args)} remaining "
"unhandled arguments"
)
if representation_data is None and differential_data is not None:
raise ValueError(
"Cannot pass in differential component data "
"without positional (representation) data."
)
if differential_data:
# Check that differential data provided has units compatible
# with time-derivative of representation data.
# NOTE: there is no dimensionless time while lengths can be
# dimensionless (u.dimensionless_unscaled).
for comp in representation_data.components:
if (diff_comp := f"d_{comp}") in differential_data.components:
current_repr_unit = representation_data._units[comp]
current_diff_unit = differential_data._units[diff_comp]
expected_unit = current_repr_unit / u.s
if not current_diff_unit.is_equivalent(expected_unit):
for (
key,
val,
) in self.get_representation_component_names().items():
if val == comp:
current_repr_name = key
break
for key, val in self.get_representation_component_names(
"s"
).items():
if val == diff_comp:
current_diff_name = key
break
raise ValueError(
f'{current_repr_name} has unit "{current_repr_unit}" with'
f' physical type "{current_repr_unit.physical_type}", but'
f" {current_diff_name} has incompatible unit"
f' "{current_diff_unit}" with physical type'
f' "{current_diff_unit.physical_type}" instead of the'
f' expected "{(expected_unit).physical_type}".'
)
representation_data = representation_data.with_differentials(
{"s": differential_data}
)
return representation_data
@classmethod
def _infer_repr_info(cls, repr_info):
# Unless overridden via `frame_specific_representation_info`, velocity
# name defaults are (see also docstring for BaseCoordinateFrame):
# * ``pm_{lon}_cos{lat}``, ``pm_{lat}`` for
# `SphericalCosLatDifferential` proper motion components
# * ``pm_{lon}``, ``pm_{lat}`` for `SphericalDifferential` proper
# motion components
# * ``radial_velocity`` for any `d_distance` component
# * ``v_{x,y,z}`` for `CartesianDifferential` velocity components
# where `{lon}` and `{lat}` are the frame names of the angular
# components.
if repr_info is None:
repr_info = {}
# the tuple() call below is necessary because if it is not there,
# the iteration proceeds in a difficult-to-predict manner in the
# case that one of the class objects hash is such that it gets
# revisited by the iteration. The tuple() call prevents this by
# making the items iterated over fixed regardless of how the dict
# changes
for cls_or_name in tuple(repr_info.keys()):
if isinstance(cls_or_name, str):
# TODO: this provides a layer of backwards compatibility in
# case the key is a string, but now we want explicit classes.
repr_info[_get_repr_cls(cls_or_name)] = repr_info.pop(cls_or_name)
# The default spherical names are 'lon' and 'lat'
sph_repr = repr_info.setdefault(
r.SphericalRepresentation,
[RepresentationMapping("lon", "lon"), RepresentationMapping("lat", "lat")],
)
sph_component_map = {m.reprname: m.framename for m in sph_repr}
lon = sph_component_map["lon"]
lat = sph_component_map["lat"]
ang_v_unit = u.mas / u.yr
lin_v_unit = u.km / u.s
sph_coslat_diff = repr_info.setdefault(
r.SphericalCosLatDifferential,
[
RepresentationMapping("d_lon_coslat", f"pm_{lon}_cos{lat}", ang_v_unit),
RepresentationMapping("d_lat", f"pm_{lat}", ang_v_unit),
RepresentationMapping("d_distance", "radial_velocity", lin_v_unit),
],
)
sph_diff = repr_info.setdefault(
r.SphericalDifferential,
[
RepresentationMapping("d_lon", f"pm_{lon}", ang_v_unit),
RepresentationMapping("d_lat", f"pm_{lat}", ang_v_unit),
RepresentationMapping("d_distance", "radial_velocity", lin_v_unit),
],
)
repr_info.setdefault(
r.RadialDifferential,
[RepresentationMapping("d_distance", "radial_velocity", lin_v_unit)],
)
repr_info.setdefault(
r.CartesianDifferential,
[RepresentationMapping(f"d_{c}", f"v_{c}", lin_v_unit) for c in "xyz"],
)
# Unit* classes should follow the same naming conventions
# TODO: this adds some unnecessary mappings for the Unit classes, so
# this could be cleaned up, but in practice doesn't seem to have any
# negative side effects
repr_info.setdefault(r.UnitSphericalRepresentation, sph_repr)
repr_info.setdefault(r.UnitSphericalCosLatDifferential, sph_coslat_diff)
repr_info.setdefault(r.UnitSphericalDifferential, sph_diff)
return repr_info
@classmethod
def _create_readonly_property(cls, attr_name, value, doc=None):
private_attr = "_" + attr_name
def getter(self):
return getattr(self, private_attr)
setattr(cls, private_attr, value)
setattr(cls, attr_name, property(getter, doc=doc))
@functools.cached_property
def cache(self):
"""Cache for this frame, a dict.
It stores anything that should be computed from the coordinate data (*not* from
the frame attributes). This can be used in functions to store anything that
might be expensive to compute but might be reused by some other function.
E.g.::
if 'user_data' in myframe.cache:
data = myframe.cache['user_data']
else:
myframe.cache['user_data'] = data = expensive_func(myframe.lat)
If in-place modifications are made to the frame data, the cache should
be cleared::
myframe.cache.clear()
"""
return defaultdict(dict)
@property
def data(self):
"""
The coordinate data for this object. If this frame has no data,
a `ValueError` will be raised. Use `has_data` to
check if data is present on this frame object.
"""
if self._data is None:
raise ValueError(
f'The frame object "{self!r}" does not have associated data'
)
return self._data
@property
def has_data(self):
"""
True if this frame has `data`, False otherwise.
"""
return self._data is not None
@property
def shape(self):
return self._shape
def __bool__(self):
return self.has_data and self.size > 0
@property
def size(self):
return self.data.size
@property
def masked(self):
"""Whether the underlying data is masked.
Raises
------
ValueError
If the frame has no associated data.
"""
return self.data.masked
def get_mask(self, *attrs):
"""Get the mask associated with these coordinates.
Parameters
----------
*attrs : str
Attributes from which to get the masks to combine. Items can be
dotted, like ``"data.lon", "data.lat"``. By default, get the
combined mask of all components (including from differentials),
ignoring possible masks of attributes.
Returns
-------
mask : ~numpy.ndarray of bool
The combined, read-only mask. If the instance is not masked, it
is an array of `False` with the correct shape.
Raises
------
ValueError
If the coordinate frame has no associated data.
"""
if attrs:
values = operator.attrgetter(*attrs)(self)
if not isinstance(values, tuple):
values = (values,)
masks = [getattr(v, "mask", None) for v in values]
elif self.data.masked:
masks = [diff.mask for diff in self.data.differentials.values()]
masks.append(self.data.mask)
else:
# Short-cut if the data is not masked.
masks = []
# Broadcast makes it readonly too.
return np.broadcast_to(combine_masks(masks), self.shape)
mask = property(
get_mask,
doc="""The mask associated with these coordinates.
Combines the masks of all components of the underlying representation,
including possible differentials.
""",
)
@classmethod
def get_frame_attr_defaults(cls):
"""Return a dict with the defaults for each frame attribute."""
return {name: getattr(cls, name).default for name in cls.frame_attributes}
def get_representation_cls(self, which="base"):
"""The class used for part of this frame's data.
Parameters
----------
which : ('base', 's', `None`)
The class of which part to return. 'base' means the class used to
represent the coordinates; 's' the first derivative to time, i.e.,
the class representing the proper motion and/or radial velocity.
If `None`, return a dict with both.
Returns
-------
representation : `~astropy.coordinates.BaseRepresentation` or `~astropy.coordinates.BaseDifferential`.
"""
return self._representation if which is None else self._representation[which]
def set_representation_cls(self, base=None, s="base"):
"""Set representation and/or differential class for this frame's data.
Parameters
----------
base : str, `~astropy.coordinates.BaseRepresentation` subclass, optional
The name or subclass to use to represent the coordinate data.
s : `~astropy.coordinates.BaseDifferential` subclass, optional
The differential subclass to use to represent any velocities,
such as proper motion and radial velocity. If equal to 'base',
which is the default, it will be inferred from the representation.
If `None`, the representation will drop any differentials.
"""
if base is None:
base = self._representation["base"]
self._representation = _get_repr_classes(base=base, s=s)
representation_type = property(
fget=get_representation_cls,
fset=set_representation_cls,
doc="""The representation class used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseRepresentation`.
Can also be *set* using the string name of the representation. If you
wish to set an explicit differential class (rather than have it be
inferred), use the ``set_representation_cls`` method.
""",
)
@property
def differential_type(self):
"""
The differential used for this frame's data.
This will be a subclass from `~astropy.coordinates.BaseDifferential`.
For simultaneous setting of representation and differentials, see the
``set_representation_cls`` method.
"""
return self.get_representation_cls("s")
@differential_type.setter
def differential_type(self, value):
self.set_representation_cls(s=value)
@functools.cached_property
def representation_info(self):
"""
A dictionary with the information of what attribute names for this frame
apply to particular representations.
"""
cls = type(self)
if (
cls._frame_class_cache.get("last_reprdiff_hash", None)
!= r.get_reprdiff_cls_hash()
):
repr_attrs = {}
for repr_diff_cls in list(r.REPRESENTATION_CLASSES.values()) + list(
r.DIFFERENTIAL_CLASSES.values()
):
repr_attrs[repr_diff_cls] = {"names": [], "units": []}
for c, c_cls in repr_diff_cls.attr_classes.items():
repr_attrs[repr_diff_cls]["names"].append(c)
rec_unit = u.deg if issubclass(c_cls, Angle) else None
repr_attrs[repr_diff_cls]["units"].append(rec_unit)
for (
repr_diff_cls,
mappings,
) in cls._frame_specific_representation_info.items():
# take the 'names' and 'units' tuples from repr_attrs,
# and then use the RepresentationMapping objects
# to update as needed for this frame.
nms = repr_attrs[repr_diff_cls]["names"]
uns = repr_attrs[repr_diff_cls]["units"]
comptomap = {m.reprname: m for m in mappings}
for i, c in enumerate(repr_diff_cls.attr_classes.keys()):
if (mapping := comptomap.get(c)) is not None:
nms[i] = mapping.framename
defaultunit = mapping.defaultunit
# need the isinstance because otherwise if it's a unit it
# will try to compare to the unit string representation
if not (
isinstance(defaultunit, str)
and defaultunit == "recommended"
):
uns[i] = defaultunit
# else we just leave it as recommended_units says above
# Convert to tuples so that this can't mess with frame internals
repr_attrs[repr_diff_cls]["names"] = tuple(nms)
repr_attrs[repr_diff_cls]["units"] = tuple(uns)
cls._frame_class_cache["representation_info"] = repr_attrs
cls._frame_class_cache["last_reprdiff_hash"] = r.get_reprdiff_cls_hash()
return cls._frame_class_cache["representation_info"]
def get_representation_component_names(self, which="base"):
cls = self.get_representation_cls(which)
if cls is None:
return {}
return dict(zip(self.representation_info[cls]["names"], cls.attr_classes))
def get_representation_component_units(self, which="base"):
repr_or_diff_cls = self.get_representation_cls(which)
if repr_or_diff_cls is None:
return {}
repr_attrs = self.representation_info[repr_or_diff_cls]
return {k: v for k, v in zip(repr_attrs["names"], repr_attrs["units"]) if v}
representation_component_names = property(get_representation_component_names)
representation_component_units = property(get_representation_component_units)
def _replicate(self, data, copy=False, **kwargs):
"""Base for replicating a frame, with possibly different attributes.
Produces a new instance of the frame using the attributes of the old
frame (unless overridden) and with the data given.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation` or None
Data to use in the new frame instance. If `None`, it will be
a data-less frame.
copy : bool, optional
Whether data and the attributes on the old frame should be copied
(default), or passed on by reference.
**kwargs
Any attributes that should be overridden.
"""
# This is to provide a slightly nicer error message if the user tries
# to use frame_obj.representation instead of frame_obj.data to get the
# underlying representation object [e.g., #2890]
if isinstance(data, type):
raise TypeError(
"Class passed as data instead of a representation instance. If you"
" called frame.representation, this returns the representation class."
" frame.data returns the instantiated object - you may want to use"
" this instead."
)
if copy and data is not None:
data = data.copy()
for attr in self.frame_attributes:
if attr not in self._attr_names_with_defaults and attr not in kwargs:
value = getattr(self, attr)
kwargs[attr] = value.copy() if copy else value
return self.__class__(data, copy=False, **kwargs)
def replicate(self, copy=False, **kwargs):
"""
Return a replica of the frame, optionally with new frame attributes.
The replica is a new frame object that has the same data as this frame
object and with frame attributes overridden if they are provided as extra
keyword arguments to this method. If ``copy`` is set to `True` then a
copy of the internal arrays will be made. Otherwise the replica will
use a reference to the original arrays when possible to save memory. The
internal arrays are normally not changeable by the user so in most cases
it should not be necessary to set ``copy`` to `True`.
Parameters
----------
copy : bool, optional
If True, the resulting object is a copy of the data. When False,
references are used where possible. This rule also applies to the
frame attributes.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
Replica of this object, but possibly with new frame attributes.
"""
return self._replicate(self.data, copy=copy, **kwargs)
def replicate_without_data(self, copy=False, **kwargs):
"""
Return a replica without data, optionally with new frame attributes.
The replica is a new frame object without data but with the same frame
attributes as this object, except where overridden by extra keyword
arguments to this method. The ``copy`` keyword determines if the frame
attributes are truly copied vs being references (which saves memory for
cases where frame attributes are large).
This method is essentially the converse of `realize_frame`.
Parameters
----------
copy : bool, optional
If True, the resulting object has copies of the frame attributes.
When False, references are used where possible.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
Replica of this object, but without data and possibly with new frame
attributes.
"""
return self._replicate(None, copy=copy, **kwargs)
def realize_frame(self, data, **kwargs):
"""
Generates a new frame with new data from another frame (which may or
may not have data). Roughly speaking, the converse of
`replicate_without_data`.
Parameters
----------
data : `~astropy.coordinates.BaseRepresentation`
The representation to use as the data for the new frame.
**kwargs
Any additional keywords are treated as frame attributes to be set on the
new frame object. In particular, `representation_type` can be specified.
Returns
-------
frameobj : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
A new object in *this* frame, with the same frame attributes as
this one, but with the ``data`` as the coordinate data.
"""
return self._replicate(data, **kwargs)
def represent_as(self, base, s="base", in_frame_units=False):
"""
Generate and return a new representation of this frame's `data`
as a Representation object.
Note: In order to make an in-place change of the representation
of a Frame or SkyCoord object, set the ``representation``
attribute of that object to the desired new representation, or
use the ``set_representation_cls`` method to also set the differential.
Parameters
----------
base : subclass of BaseRepresentation or string
The type of representation to generate. Must be a *class*
(not an instance), or the string name of the representation
class.
s : subclass of `~astropy.coordinates.BaseDifferential`, str, optional
Class in which any velocities should be represented. Must be
a *class* (not an instance), or the string name of the
differential class. If equal to 'base' (default), inferred from
the base class. If `None`, all velocity information is dropped.
in_frame_units : bool, keyword-only
Force the representation units to match the specified units
particular to this frame
Returns
-------
newrep : BaseRepresentation-derived object
A new representation object of this frame's `data`.
Raises
------
AttributeError
If this object had no `data`
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord, CartesianRepresentation
>>> coord = SkyCoord(0*u.deg, 0*u.deg)
>>> coord.represent_as(CartesianRepresentation) # doctest: +FLOAT_CMP
<CartesianRepresentation (x, y, z) [dimensionless]
(1., 0., 0.)>
>>> coord.representation_type = CartesianRepresentation
>>> coord # doctest: +FLOAT_CMP
<SkyCoord (ICRS): (x, y, z) [dimensionless]
(1., 0., 0.)>
"""
# In the future, we may want to support more differentials, in which
# case one probably needs to define **kwargs above and use it here.
# But for now, we only care about the velocity.
repr_classes = _get_repr_classes(base=base, s=s)
representation_cls = repr_classes["base"]
# We only keep velocity information
if "s" in self.data.differentials:
# For the default 'base' option in which _get_repr_classes has
# given us a best guess based on the representation class, we only
# use it if the class we had already is incompatible.
if s == "base" and (
self.data.differentials["s"].__class__
in representation_cls._compatible_differentials
):
differential_cls = self.data.differentials["s"].__class__
else:
differential_cls = repr_classes["s"]
elif s is None or s == "base":
differential_cls = None
else:
raise TypeError(
"Frame data has no associated differentials (i.e. the frame has no"
" velocity data) - represent_as() only accepts a new representation."
)
if differential_cls:
cache_key = (
representation_cls.__name__,
differential_cls.__name__,
in_frame_units,
)
else:
cache_key = (representation_cls.__name__, in_frame_units)
if cached_repr := self.cache["representation"].get(cache_key):
return cached_repr
if differential_cls:
# Sanity check to ensure we do not just drop radial
# velocity. TODO: should Representation.represent_as
# allow this transformation in the first place?
if (
isinstance(self.data, r.UnitSphericalRepresentation)
and issubclass(representation_cls, r.CartesianRepresentation)
and not isinstance(
self.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
)
):
raise u.UnitConversionError(
"need a distance to retrieve a cartesian representation "
"when both radial velocity and proper motion are present, "
"since otherwise the units cannot match."
)
# TODO NOTE: only supports a single differential
data = self.data.represent_as(representation_cls, differential_cls)
diff = data.differentials["s"] # TODO: assumes velocity
else:
data = self.data.represent_as(representation_cls)
# If the new representation is known to this frame and has a defined
# set of names and units, then use that.
if in_frame_units and (
new_attrs := self.representation_info.get(representation_cls)
):
datakwargs = {comp: getattr(data, comp) for comp in data.components}
for comp, new_attr_unit in zip(data.components, new_attrs["units"]):
if new_attr_unit:
datakwargs[comp] = datakwargs[comp].to(new_attr_unit)
data = data.__class__(copy=False, **datakwargs)
if differential_cls:
# the original differential
data_diff = self.data.differentials["s"]
# If the new differential is known to this frame and has a
# defined set of names and units, then use that.
if in_frame_units and (
new_attrs := self.representation_info.get(differential_cls)
):
diffkwargs = {comp: getattr(diff, comp) for comp in diff.components}
for comp, new_attr_unit in zip(diff.components, new_attrs["units"]):
# Some special-casing to treat a situation where the
# input data has a UnitSphericalDifferential or a
# RadialDifferential. It is re-represented to the
# frame's differential class (which might be, e.g., a
# dimensional Differential), so we don't want to try to
# convert the empty component units
if (
isinstance(
data_diff,
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
)
and comp not in data_diff.__class__.attr_classes
):
continue
# Try to convert to requested units. Since that might
# not be possible (e.g., for a coordinate with proper
# motion but without distance, one cannot convert to a
# cartesian differential in km/s), we allow the unit
# conversion to fail. See gh-7028 for discussion.
if new_attr_unit and hasattr(diff, comp):
try:
diffkwargs[comp] = diffkwargs[comp].to(new_attr_unit)
except Exception:
pass
diff = diff.__class__(copy=False, **diffkwargs)
# Here we have to bypass using with_differentials() because
# it has a validation check. But because
# .representation_type and .differential_type don't point to
# the original classes, if the input differential is a
# RadialDifferential, it usually gets turned into a
# SphericalCosLatDifferential (or whatever the default is)
# with strange units for the d_lon and d_lat attributes.
# This then causes the dictionary key check to fail (i.e.
# comparison against `diff._get_deriv_key()`)
data._differentials.update({"s": diff})
self.cache["representation"][cache_key] = data
return data
def transform_to(self, new_frame):
"""
Transform this object's coordinate data to a new frame.
Parameters
----------
new_frame : coordinate-like
The frame to transform this coordinate frame into.
Returns
-------
transframe : coordinate-like
A new object with the coordinate data represented in the
``newframe`` system.
Raises
------
ValueError
If there is no possible transformation route.
"""
if self._data is None:
raise ValueError("Cannot transform a frame with no data")
if (
getattr(self.data, "differentials", None)
and hasattr(self, "obstime")
and hasattr(new_frame, "obstime")
and np.any(self.obstime != new_frame.obstime)
):
raise NotImplementedError(
"You cannot transform a frame that has velocities to another frame at a"
" different obstime. If you think this should (or should not) be"
" possible, please comment at"
" https://github.com/astropy/astropy/issues/6280"
)
if hasattr(new_frame, "_sky_coord_frame"):
# Input new_frame is not a frame instance or class and is most
# likely a SkyCoord object.
new_frame = new_frame._sky_coord_frame
trans = frame_transform_graph.get_transform(self.__class__, new_frame.__class__)
if trans is None:
if new_frame is self.__class__:
# no special transform needed, but should update frame info
return new_frame.realize_frame(self.data)
msg = "Cannot transform from {0} to {1}"
raise ConvertError(msg.format(self.__class__, new_frame.__class__))
return trans(self, new_frame)
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass or instance
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
new_frame_cls = new_frame if isinstance(new_frame, type) else type(new_frame)
if frame_transform_graph.get_transform(type(self), new_frame_cls):
return True
return "same" if new_frame_cls is type(self) else False
def is_frame_attr_default(self, attrnm):
"""
Determine whether or not a frame attribute has its value because it's
the default value, or because this frame was created with that value
explicitly requested.
Parameters
----------
attrnm : str
The name of the attribute to check.
Returns
-------
isdefault : bool
True if the attribute ``attrnm`` has its value by default, False if
it was specified at creation of this frame.
"""
return attrnm in self._attr_names_with_defaults
@staticmethod
def _frameattr_equiv(left_fattr, right_fattr): # noqa: PLR0911
"""
Determine if two frame attributes are equivalent. Implemented as a
staticmethod mainly as a convenient location, although conceivable it
might be desirable for subclasses to override this behavior.
Primary purpose is to check for equality of representations.
Secondary purpose is to check for equality of coordinate attributes,
which first checks whether they themselves are in equivalent frames
before checking for equality in the normal fashion. This is because
checking for equality with non-equivalent frames raises an error.
"""
if left_fattr is right_fattr:
# shortcut if it's exactly the same object
return True
elif left_fattr is None or right_fattr is None:
# shortcut if one attribute is unspecified and the other isn't
return False
left_is_repr = isinstance(left_fattr, r.BaseRepresentationOrDifferential)
if left_is_repr ^ isinstance(right_fattr, r.BaseRepresentationOrDifferential):
return False
if left_is_repr:
# both are representations.
if getattr(left_fattr, "differentials", False) or getattr(
right_fattr, "differentials", False
):
warnings.warn(
"Two representation frame attributes were checked for equivalence"
" when at least one of them has differentials. This yields False"
" even if the underlying representations are equivalent (although"
" this may change in future versions of Astropy)",
AstropyWarning,
)
return False
return np.all(
left_fattr == right_fattr
if type(left_fattr) is type(right_fattr)
else left_fattr.to_cartesian() == right_fattr.to_cartesian()
)
left_is_coord = isinstance(left_fattr, BaseCoordinateFrame)
if left_is_coord ^ isinstance(right_fattr, BaseCoordinateFrame):
return False
if left_is_coord:
# both are coordinates
return left_fattr.is_equivalent_frame(right_fattr) and np.all(
left_fattr == right_fattr
)
return np.all(left_fattr == right_fattr)
def is_equivalent_frame(self, other):
"""
Checks if this object is the same frame as the ``other`` object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. Note that it does *not* matter what, if any,
data either object has.
Parameters
----------
other : :class:`~astropy.coordinates.BaseCoordinateFrame`
the other frame to check
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `~astropy.coordinates.BaseCoordinateFrame` or subclass.
"""
if self.__class__ == other.__class__:
return all(
self._frameattr_equiv(getattr(self, attr), getattr(other, attr))
for attr in self.frame_attributes
)
elif not isinstance(other, BaseCoordinateFrame):
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't a frame"
)
else:
return False
def __repr__(self):
frameattrs = self._frame_attrs_repr()
data_repr = self._data_repr()
if frameattrs:
frameattrs = f" ({frameattrs})"
if data_repr:
return f"<{self.__class__.__name__} Coordinate{frameattrs}: {data_repr}>"
else:
return f"<{self.__class__.__name__} Frame{frameattrs}>"
def _data_repr(self):
"""Returns a string representation of the coordinate data."""
if not self.has_data:
return ""
if rep_cls := self.representation_type:
if isinstance(self.data, getattr(rep_cls, "_unit_representation", ())):
rep_cls = self.data.__class__
dif_cls = None
if "s" in self.data.differentials:
dif_cls = self.get_representation_cls("s")
if isinstance(
dif_data := self.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
dif_cls = dif_data.__class__
data = self.represent_as(rep_cls, dif_cls, in_frame_units=True)
data_repr = repr(data)
# Generate the list of component names out of the repr string
part1, _, remainder = data_repr.partition("(")
if remainder:
comp_str, part2 = remainder.split(")", 1)
# Swap in frame-specific component names
invnames = {
nmrepr: nmpref
for nmpref, nmrepr in self.representation_component_names.items()
}
comp_names = (invnames.get(name, name) for name in comp_str.split(", "))
# Reassemble the repr string
data_repr = f"{part1}({', '.join(comp_names)}){part2}"
else:
data = self.data
data_repr = repr(self.data)
if data_repr.startswith(class_prefix := f"<{type(data).__name__} "):
data_repr = data_repr.removeprefix(class_prefix).removesuffix(">")
else:
data_repr = "Data:\n" + data_repr
if "s" not in self.data.differentials:
return data_repr
data_repr_spl = data_repr.split("\n")
first, *middle, last = repr(data.differentials["s"]).split("\n")
if first.startswith("<"):
first = " " + first.split(" ", 1)[1]
for frm_nm, rep_nm in self.get_representation_component_names("s").items():
first = first.replace(rep_nm, frm_nm)
data_repr_spl[-1] = "\n".join((first, *middle, last.removesuffix(">")))
return "\n".join(data_repr_spl)
def _frame_attrs_repr(self):
"""
Returns a string representation of the frame's attributes, if any.
"""
attr_strs = []
for attribute_name in self.frame_attributes:
attr = getattr(self, attribute_name)
# Check to see if this object has a way of representing itself
# specific to being an attribute of a frame. (Note, this is not the
# Attribute class, it's the actual object).
if hasattr(attr, "_astropy_repr_in_frame"):
attrstr = attr._astropy_repr_in_frame()
else:
attrstr = str(attr)
attr_strs.append(f"{attribute_name}={attrstr}")
return ", ".join(attr_strs)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args : tuple
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
new = super().__new__(self.__class__)
if hasattr(self, "_representation"):
new._representation = self._representation.copy()
new._attr_names_with_defaults = self._attr_names_with_defaults.copy()
new_shape = ()
for attr in self.frame_attributes:
_attr = "_" + attr
if attr in self._attr_names_with_defaults:
setattr(new, _attr, getattr(self, _attr))
else:
value = getattr(self, _attr)
if getattr(value, "shape", ()):
value = apply_method(value)
new_shape = new_shape or value.shape
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, _attr, value)
if self.has_data:
new._data = apply_method(self.data)
new_shape = new_shape or new._data.shape
else:
new._data = None
new._shape = new_shape
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
if value is np.ma.masked or value is np.ma.nomask:
self.data.__setitem__(item, value)
self.cache.clear()
return
if self.__class__ is not value.__class__:
raise TypeError(
f"can only set from object of same class: {self.__class__.__name__} vs."
f" {value.__class__.__name__}"
)
if not self.is_equivalent_frame(value):
raise ValueError("can only set frame item from an equivalent frame")
if value._data is None:
raise ValueError("can only set frame with value that has data")
if self._data is None:
raise ValueError("cannot set frame which has no data")
if self.shape == ():
raise TypeError(
f"scalar '{self.__class__.__name__}' frame object "
"does not support item assignment"
)
if self._data.__class__ is not value._data.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self._data.__class__.__name__} vs. {value._data.__class__.__name__}"
)
if self._data._differentials:
# Can this ever occur? (Same class but different differential keys).
# This exception is not tested since it is not clear how to generate it.
if self._data._differentials.keys() != value._data._differentials.keys():
raise ValueError("setitem value must have same differentials")
for key, self_diff in self._data._differentials.items():
if self_diff.__class__ is not value._data._differentials[key].__class__:
raise TypeError(
"can only set from object of same class: "
f"{self_diff.__class__.__name__} vs. "
f"{value._data._differentials[key].__class__.__name__}"
)
# Set representation data
self._data[item] = value._data
# Frame attributes required to be identical by is_equivalent_frame,
# no need to set them here.
self.cache.clear()
def insert(self, obj, values, axis=0):
return self.info._insert(obj, values, axis)
insert.__doc__ = CoordinateFrameInfo._insert.__doc__
def __dir__(self):
"""
Override the builtin `dir` behavior to include representation
names.
TODO: dynamic representation transforms (i.e. include cylindrical et al.).
"""
return sorted(
set(super().__dir__())
| set(self.representation_component_names)
| set(self.get_representation_component_names("s"))
)
def __getattr__(self, attr):
"""
Allow access to attributes on the representation and differential as
found via ``self.get_representation_component_names``.
TODO: We should handle dynamic representation transforms here (e.g.,
`.cylindrical`) instead of defining properties as below.
"""
# attr == '_representation' is likely from the hasattr() test in the
# representation property which is used for
# self.representation_component_names.
#
# Prevent infinite recursion here.
if attr.startswith("_"):
return self.__getattribute__(attr) # Raise AttributeError.
repr_names = self.representation_component_names
if attr in repr_names:
if self._data is None:
# this raises the "no data" error by design - doing it this way means we
# don't have to replicate the error message here.
self.data # noqa: B018
rep = self.represent_as(self.representation_type, in_frame_units=True)
return getattr(rep, repr_names[attr])
diff_names = self.get_representation_component_names("s")
if attr in diff_names:
if self._data is None:
self.data # noqa: B018 # see above.
# TODO: this doesn't work for the case when there is only
# unitspherical information. The differential_type gets set to the
# default_differential, which expects full information, so the
# units don't work out
rep = self.represent_as(
in_frame_units=True, **self.get_representation_cls(None)
)
return getattr(rep.differentials["s"], diff_names[attr])
return self.__getattribute__(attr) # Raise AttributeError.
def __setattr__(self, attr, value):
# Don't slow down access of private attributes!
if not attr.startswith("_"):
if hasattr(self, "representation_info"):
repr_attr_names = set()
for representation_attr in self.representation_info.values():
repr_attr_names.update(representation_attr["names"])
if attr in repr_attr_names:
raise AttributeError(f"Cannot set any frame attribute {attr}")
super().__setattr__(attr, value)
def __eq__(self, value):
"""Equality operator for frame.
This implements strict equality and requires that the frames are
equivalent and that the representation data are exactly equal.
"""
if not isinstance(value, BaseCoordinateFrame):
return NotImplemented
is_equiv = self.is_equivalent_frame(value)
if self._data is None and value._data is None:
# For Frame with no data, == compare is same as is_equivalent_frame()
return is_equiv
if not is_equiv:
raise TypeError(
"cannot compare: objects must have equivalent frames: "
f"{self.replicate_without_data()} vs. {value.replicate_without_data()}"
)
if (value._data is None) != (self._data is None):
raise ValueError(
"cannot compare: one frame has data and the other does not"
)
return self._data == value._data
def __ne__(self, value):
return np.logical_not(self == value)
def _prepare_unit_sphere_coords(
self,
other: Union["BaseCoordinateFrame", "SkyCoord"],
origin_mismatch: Literal["ignore", "warn", "error"],
) -> tuple[Longitude, Latitude, Longitude, Latitude]:
other_frame = getattr(other, "frame", other)
if not (
origin_mismatch == "ignore"
or self.is_equivalent_frame(other_frame)
or all(
isinstance(comp, (StaticMatrixTransform, DynamicMatrixTransform))
for comp in frame_transform_graph.get_transform(
type(self), type(other_frame)
).transforms
)
):
if origin_mismatch == "warn":
warnings.warn(NonRotationTransformationWarning(self, other_frame))
elif origin_mismatch == "error":
raise NonRotationTransformationError(self, other_frame)
else:
raise ValueError(
f"{origin_mismatch=} is invalid. Allowed values are 'ignore', "
"'warn' or 'error'."
)
self_sph = self.represent_as(r.UnitSphericalRepresentation)
other_sph = other_frame.transform_to(self).represent_as(
r.UnitSphericalRepresentation
)
return self_sph.lon, self_sph.lat, other_sph.lon, other_sph.lat
def position_angle(self, other: Union["BaseCoordinateFrame", "SkyCoord"]) -> Angle:
"""Compute the on-sky position angle to another coordinate.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
`~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``, measured East from North. If either ``self`` or
``other`` contain arrays, this will be an array following the
appropriate `numpy` broadcasting rules.
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import ICRS, SkyCoord
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = ICRS(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).to(u.deg)
<Angle 90. deg>
>>> c2.position_angle(c1).to(u.deg)
<Angle 270. deg>
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).to(u.deg) # doctest: +FLOAT_CMP
<Angle 44.995636455344844 deg>
"""
return position_angle(*self._prepare_unit_sphere_coords(other, "ignore"))
def separation(
self,
other: Union["BaseCoordinateFrame", "SkyCoord"],
*,
origin_mismatch: Literal["ignore", "warn", "error"] = "warn",
) -> Angle:
"""
Computes on-sky separation between this coordinate and another.
For more on how to use this (and related) functionality, see the
examples in :ref:`astropy-coordinates-separations-matching`.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate to get the separation to.
origin_mismatch : {"warn", "ignore", "error"}, keyword-only
If the ``other`` coordinates are in a different frame then they
will have to be transformed, and if the transformation is not a
pure rotation then ``self.separation(other)`` can be
different from ``other.separation(self)``. With
``origin_mismatch="warn"`` (default) the transformation is
always performed, but a warning is emitted if it is not a
pure rotation. If ``origin_mismatch="ignore"`` then the
required transformation is always performed without warnings.
If ``origin_mismatch="error"`` then only transformations
that are pure rotations are allowed.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
return Angle(
angular_separation(
*self._prepare_unit_sphere_coords(other, origin_mismatch)
),
unit=u.degree,
)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :ref:`astropy-coordinates-separations-matching`.
Parameters
----------
other : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The coordinate system to get the distance to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if isinstance(self.data, r.UnitSphericalRepresentation):
raise ValueError(
"This object does not have a distance; cannot compute 3d separation."
)
# do this first just in case the conversion somehow creates a distance
other = getattr(other, "frame", other).transform_to(self)
if isinstance(other, r.UnitSphericalRepresentation):
raise ValueError(
"The other object does not have a distance; "
"cannot compute 3d separation."
)
# drop the differentials to ensure they don't do anything odd in the
# subtraction
dist = (
self.data.without_differentials().represent_as(r.CartesianRepresentation)
- other.data.without_differentials().represent_as(r.CartesianRepresentation)
).norm()
return dist if dist.unit == u.one else Distance(dist)
@property
def cartesian(self):
"""
Shorthand for a cartesian representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cartesian", in_frame_units=True)
@property
def cylindrical(self):
"""
Shorthand for a cylindrical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("cylindrical", in_frame_units=True)
@property
def spherical(self):
"""
Shorthand for a spherical representation of the coordinates in this
object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", in_frame_units=True)
@property
def sphericalcoslat(self):
"""
Shorthand for a spherical representation of the positional data and a
`~astropy.coordinates.SphericalCosLatDifferential` for the velocity
data in this object.
"""
# TODO: if representations are updated to use a full transform graph,
# the representation aliases should not be hard-coded like this
return self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
@property
def velocity(self):
"""
Shorthand for retrieving the Cartesian space-motion as a
`~astropy.coordinates.CartesianDifferential` object.
This is equivalent to calling ``self.cartesian.differentials['s']``.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
return self.cartesian.differentials["s"]
@property
def proper_motion(self):
"""
Shorthand for the two-dimensional proper motion as a
`~astropy.units.Quantity` object with angular velocity units. In the
returned `~astropy.units.Quantity`, ``axis=0`` is the longitude/latitude
dimension so that ``.proper_motion[0]`` is the longitudinal proper
motion and ``.proper_motion[1]`` is latitudinal. The longitudinal proper
motion already includes the cos(latitude) term.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", "sphericalcoslat", in_frame_units=True)
pm_lon = sph.differentials["s"].d_lon_coslat
pm_lat = sph.differentials["s"].d_lat
return (
np.stack((pm_lon.value, pm_lat.to(pm_lon.unit).value), axis=0) * pm_lon.unit
)
@property
def radial_velocity(self):
"""
Shorthand for the radial or line-of-sight velocity as a
`~astropy.units.Quantity` object.
"""
if "s" not in self.data.differentials:
raise ValueError(
"Frame has no associated velocity (Differential) data information."
)
sph = self.represent_as("spherical", in_frame_units=True)
return sph.differentials["s"].d_distance
def to_table(self) -> QTable:
"""
Convert this |BaseFrame| to a |QTable|.
Any attributes that have the same length as the |BaseFrame| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |BaseFrame|.
Examples
--------
>>> from astropy.coordinates import ICRS
>>> coord = ICRS(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg)
>>> t = coord.to_table()
>>> t
<QTable length=2>
ra dec
deg deg
float64 float64
------- -------
40.0 0.0
70.0 -20.0
>>> t.meta
{'representation_type': 'spherical'}
"""
columns = {}
metadata = {}
# Record attributes that have the same length as self as columns in the
# table, and the other attributes as table metadata. This matches
# table.serialize._represent_mixin_as_column().
for key, value in self.info._represent_as_dict().items():
if getattr(value, "shape", ())[:1] == (len(self),):
columns[key] = value
else:
metadata[key] = value
return QTable(columns, meta=metadata)
| BaseCoordinateFrame |
python | python-poetry__poetry | src/poetry/inspection/info.py | {
"start": 1544,
"end": 19367
} | class ____:
def __init__(
self,
*,
name: str | None = None,
version: str | None = None,
summary: str | None = None,
requires_dist: list[str] | None = None,
requires_python: str | None = None,
files: Sequence[Mapping[str, str]] | None = None,
yanked: str | bool = False,
cache_version: str | None = None,
) -> None:
self.name = name
self.version = version
self.summary = summary
self.requires_dist = requires_dist
self.requires_python = requires_python
self.files = files or []
self.yanked = yanked
self._cache_version = cache_version
self._source_type: str | None = None
self._source_url: str | None = None
self._source_reference: str | None = None
@property
def cache_version(self) -> str | None:
return self._cache_version
def update(self, other: PackageInfo) -> PackageInfo:
self.name = other.name or self.name
self.version = other.version or self.version
self.summary = other.summary or self.summary
self.requires_dist = other.requires_dist or self.requires_dist
self.requires_python = other.requires_python or self.requires_python
self.files = other.files or self.files
self._cache_version = other.cache_version or self._cache_version
return self
def asdict(self) -> dict[str, Any]:
"""
Helper method to convert package info into a dictionary used for caching.
"""
return {
"name": self.name,
"version": self.version,
"summary": self.summary,
"requires_dist": self.requires_dist,
"requires_python": self.requires_python,
"files": self.files,
"yanked": self.yanked,
"_cache_version": self._cache_version,
}
@classmethod
def load(cls, data: dict[str, Any]) -> PackageInfo:
"""
Helper method to load data from a dictionary produced by `PackageInfo.asdict()`.
:param data: Data to load. This is expected to be a `dict` object output by
`asdict()`.
"""
cache_version = data.pop("_cache_version", None)
return cls(cache_version=cache_version, **data)
def to_package(
self, name: str | None = None, root_dir: Path | None = None
) -> Package:
"""
Create a new `poetry.core.packages.package.Package` instance using metadata from
this instance.
:param name: Name to use for the package, if not specified name from this
instance is used.
:param extras: Extras to activate for this package.
:param root_dir: Optional root directory to use for the package. If set,
dependency strings will be parsed relative to this directory.
"""
name = name or self.name
if not name:
raise RuntimeError(f"Unable to create package with no name for {root_dir}")
if not self.version:
# The version could not be determined, so we raise an error since it is
# mandatory.
raise RuntimeError(f"Unable to retrieve the package version for {name}")
package = Package(
name=name,
version=self.version,
source_type=self._source_type,
source_url=self._source_url,
source_reference=self._source_reference,
yanked=self.yanked,
)
if self.summary is not None:
package.description = self.summary
package.root_dir = root_dir
package.python_versions = self.requires_python or "*"
package.files = self.files
# If this is a local poetry project, we can extract "richer" requirement
# information, eg: development requirements etc.
if root_dir is not None:
path = root_dir
elif self._source_type == "directory" and self._source_url is not None:
path = Path(self._source_url)
else:
path = None
if path is not None:
poetry_package = self._get_poetry_package(path=path)
if poetry_package:
package.extras = poetry_package.extras
for dependency in poetry_package.requires:
package.add_dependency(dependency)
return package
seen_requirements = set()
package_extras: dict[NormalizedName, list[Dependency]] = {}
for req in self.requires_dist or []:
try:
# Attempt to parse the PEP-508 requirement string
dependency = Dependency.create_from_pep_508(req, relative_to=root_dir)
except InvalidMarkerError:
# Invalid marker, We strip the markers hoping for the best
logger.warning(
"Stripping invalid marker (%s) found in %s-%s dependencies",
req,
package.name,
package.version,
)
req = req.split(";")[0]
dependency = Dependency.create_from_pep_508(req, relative_to=root_dir)
except InvalidRequirementError:
# Unable to parse requirement so we skip it
logger.warning(
"Invalid requirement (%s) found in %s-%s dependencies, skipping",
req,
package.name,
package.version,
)
continue
if dependency.in_extras:
# this dependency is required by an extra package
for extra in dependency.in_extras:
if extra not in package_extras:
# this is the first time we encounter this extra for this
# package
package_extras[extra] = []
package_extras[extra].append(dependency)
req = dependency.to_pep_508(with_extras=True)
if req not in seen_requirements:
package.add_dependency(dependency)
seen_requirements.add(req)
package.extras = package_extras
return package
@classmethod
def _requirements_from_distribution(
cls,
dist: pkginfo.BDist | pkginfo.SDist | pkginfo.Wheel,
) -> list[str] | None:
"""
Helper method to extract package requirements from a `pkginfo.Distribution`
instance.
:param dist: The distribution instance to extract requirements from.
"""
# If the distribution lists requirements, we use those.
#
# If the distribution does not list requirements, but the metadata is new enough
# to specify that this is because there definitely are none: then we return an
# empty list.
#
# If there is a requires.txt, we use that.
if dist.requires_dist:
return list(dist.requires_dist)
if dist.metadata_version is not None:
metadata_version = Version.parse(dist.metadata_version)
if (
metadata_version >= DYNAMIC_METADATA_VERSION
and "Requires-Dist" not in dist.dynamic
):
return []
requires = Path(dist.filename) / "requires.txt"
if requires.exists():
text = requires.read_text(encoding="utf-8")
requirements = parse_requires(text)
return requirements
return None
@classmethod
def _from_distribution(
cls, dist: pkginfo.BDist | pkginfo.SDist | pkginfo.Wheel
) -> PackageInfo:
"""
Helper method to parse package information from a `pkginfo.Distribution`
instance.
:param dist: The distribution instance to parse information from.
"""
# If the METADATA version is greater than the highest supported version,
# pkginfo prints a warning and tries to parse the fields from the highest
# known version. Assuming that METADATA versions adhere to semver,
# this should be safe for minor updates.
if not dist.metadata_version or dist.metadata_version.split(".")[0] not in {
v.split(".")[0] for v in pkginfo.distribution.HEADER_ATTRS
}:
raise ValueError(f"Unknown metadata version: {dist.metadata_version}")
requirements = cls._requirements_from_distribution(dist)
info = cls(
name=dist.name,
version=dist.version,
summary=dist.summary,
requires_dist=requirements,
requires_python=dist.requires_python,
)
info._source_type = "file"
info._source_url = Path(dist.filename).resolve().as_posix()
return info
@classmethod
def _from_sdist_file(cls, path: Path) -> PackageInfo:
"""
Helper method to parse package information from an sdist file. We attempt to
first inspect the file using `pkginfo.SDist`. If this does not provide us with
package requirements, we extract the source and handle it as a directory.
:param path: The sdist file to parse information from.
"""
info = None
with contextlib.suppress(ValueError):
sdist = pkginfo.SDist(str(path))
info = cls._from_distribution(sdist)
if info is not None and info.requires_dist is not None:
# we successfully retrieved dependencies from sdist metadata
return info
# Still not dependencies found
# So, we unpack and introspect
suffix = path.suffix
zip = suffix == ".zip"
if suffix == ".bz2":
suffixes = path.suffixes
if len(suffixes) > 1 and suffixes[-2] == ".tar":
suffix = ".tar.bz2"
elif not zip:
suffix = ".tar.gz"
with temporary_directory() as tmp_str:
tmp = Path(tmp_str)
extractall(source=path, dest=tmp, zip=zip)
# a little bit of guess work to determine the directory we care about
elements = list(tmp.glob("*"))
if len(elements) == 1 and elements[0].is_dir():
sdist_dir = elements[0]
else:
sdist_dir = tmp / path.name.rstrip(suffix)
if not sdist_dir.is_dir():
sdist_dir = tmp
# now this is an unpacked directory we know how to deal with
new_info = cls.from_directory(path=sdist_dir)
new_info._source_type = "file"
new_info._source_url = path.resolve().as_posix()
if not info:
return new_info
return info.update(new_info)
@staticmethod
def _find_dist_info(path: Path) -> Iterator[Path]:
"""
Discover all `*.*-info` directories in a given path.
:param path: Path to search.
"""
pattern = "**/*.*-info"
# Sometimes pathlib will fail on recursive symbolic links, so we need to work
# around it and use the glob module instead. Note that this does not happen with
# pathlib2 so it's safe to use it for Python < 3.4.
directories = glob.iglob(path.joinpath(pattern).as_posix(), recursive=True)
for d in directories:
yield Path(d)
@classmethod
def from_metadata(cls, metadata: RawMetadata) -> PackageInfo:
"""
Create package information from core metadata.
:param metadata: raw metadata
"""
return cls(
name=metadata.get("name"),
version=metadata.get("version"),
summary=metadata.get("summary"),
requires_dist=metadata.get("requires_dist"),
requires_python=metadata.get("requires_python"),
)
@classmethod
def from_metadata_directory(cls, path: Path) -> PackageInfo | None:
"""
Helper method to parse package information from an unpacked metadata directory.
:param path: The metadata directory to parse information from.
"""
if path.suffix in {".dist-info", ".egg-info"}:
directories = [path]
else:
directories = list(cls._find_dist_info(path=path))
dist: pkginfo.BDist | pkginfo.SDist | pkginfo.Wheel
for directory in directories:
try:
if directory.suffix == ".egg-info":
dist = pkginfo.UnpackedSDist(directory.as_posix())
elif directory.suffix == ".dist-info":
dist = pkginfo.Wheel(directory.as_posix())
else:
continue
break
except ValueError:
continue
else:
try:
# handle PKG-INFO in unpacked sdist root
dist = pkginfo.UnpackedSDist(path.as_posix())
except ValueError:
return None
return cls._from_distribution(dist=dist)
@classmethod
def from_package(cls, package: Package) -> PackageInfo:
"""
Helper method to inspect a `Package` object, in order to generate package info.
:param package: This must be a poetry package instance.
"""
requires = {dependency.to_pep_508() for dependency in package.requires}
for extra_requires in package.extras.values():
for dependency in extra_requires:
requires.add(dependency.to_pep_508())
return cls(
name=package.name,
version=str(package.version),
summary=package.description,
requires_dist=list(requires),
requires_python=package.python_versions,
files=package.files,
yanked=package.yanked_reason if package.yanked else False,
)
@staticmethod
def _get_poetry_package(path: Path) -> ProjectPackage | None:
# Note: we ignore any setup.py file at this step
# TODO: add support for handling non-poetry PEP-517 builds
if PyProjectTOML(path.joinpath("pyproject.toml")).is_poetry_project():
with contextlib.suppress(RuntimeError):
return Factory().create_poetry(path).package
return None
@classmethod
def from_directory(cls, path: Path) -> PackageInfo:
"""
Generate package information from a package source directory. If introspection
of all available metadata fails, the package is attempted to be built in an
isolated environment so as to generate required metadata.
:param path: Path to generate package information from.
"""
project_package = cls._get_poetry_package(path)
info: PackageInfo | None
if project_package:
info = cls.from_package(project_package)
else:
info = cls.from_metadata_directory(path)
if not info or info.requires_dist is None:
try:
info = get_pep517_metadata(path)
except PackageInfoError:
if not info:
raise
# we discovered PkgInfo but no requirements were listed
info._source_type = "directory"
info._source_url = path.as_posix()
return info
@classmethod
def from_sdist(cls, path: Path) -> PackageInfo:
"""
Gather package information from an sdist file, packed or unpacked.
:param path: Path to an sdist file or unpacked directory.
"""
if path.is_file():
return cls._from_sdist_file(path=path)
# if we get here then it is neither an sdist instance nor a file
# so, we assume this is an directory
return cls.from_directory(path=path)
@classmethod
def from_wheel(cls, path: Path) -> PackageInfo:
"""
Gather package information from a wheel.
:param path: Path to wheel.
"""
try:
wheel = pkginfo.Wheel(str(path))
return cls._from_distribution(wheel)
except ValueError as e:
raise PackageInfoError(path, e)
@classmethod
def from_bdist(cls, path: Path) -> PackageInfo:
"""
Gather package information from a bdist (wheel etc.).
:param path: Path to bdist.
"""
if path.suffix == ".whl":
return cls.from_wheel(path=path)
try:
bdist = pkginfo.BDist(str(path))
return cls._from_distribution(bdist)
except ValueError as e:
raise PackageInfoError(path, e)
@classmethod
def from_path(cls, path: Path) -> PackageInfo:
"""
Gather package information from a given path (bdist, sdist, directory).
:param path: Path to inspect.
"""
try:
return cls.from_bdist(path=path)
except PackageInfoError:
return cls.from_sdist(path=path)
@functools.cache
def get_pep517_metadata(path: Path) -> PackageInfo:
"""
Helper method to use PEP-517 library to build and read package metadata.
:param path: Path to package source to build and read metadata for.
"""
info = None
with tempfile.TemporaryDirectory() as dist:
try:
dest = Path(dist)
with isolated_builder(path, "wheel") as builder:
builder.metadata_path(dest)
info = PackageInfo.from_metadata_directory(dest)
except IsolatedBuildBackendError as e:
raise PackageInfoError(path, str(e)) from None
if info:
return info
# if we reach here, everything has failed and all hope is lost
raise PackageInfoError(path, "Exhausted all core metadata sources.")
| PackageInfo |
python | pdm-project__pdm | src/pdm/_types.py | {
"start": 316,
"end": 3569
} | class ____:
"""Private dataclass to be subclassed"""
config_prefix: str
name: str
url: str | None = None
username: str | None = None
password: str | None = dc.field(default=None, repr=False)
verify_ssl: bool | None = None
type: str | None = None
ca_certs: str | None = None
client_cert: str | None = None
client_key: str | None = None
include_packages: list[str] = dc.field(default_factory=list)
exclude_packages: list[str] = dc.field(default_factory=list)
def __post_init__(self) -> None:
self.include_packages = [_normalize_pattern(p) for p in self.include_packages]
self.exclude_packages = [_normalize_pattern(p) for p in self.exclude_packages]
def populate_keyring_auth(self) -> None:
if self.username is None or self.password is None:
from pdm.models.auth import keyring
service = f"pdm-{self.config_prefix}-{self.name}"
auth = keyring.get_auth_info(service, self.username)
if auth is not None:
self.username, self.password = auth
def passive_update(self, other: RepositoryConfig | None = None, **kwargs: Any) -> None:
"""An update method that prefers the existing value over the new one."""
if other is not None:
for k in other.__dataclass_fields__:
v = getattr(other, k)
if getattr(self, k) is None and v is not None:
setattr(self, k, v)
for k, v in kwargs.items():
if getattr(self, k) is None and v is not None:
setattr(self, k, v)
def __rich__(self) -> str:
config_prefix = f"{self.config_prefix}.{self.name}." if self.name else f"{self.config_prefix}."
lines: list[str] = []
self.populate_keyring_auth()
if self.url:
lines.append(f"[primary]{config_prefix}url[/] = {self.url}")
if self.username:
lines.append(f"[primary]{config_prefix}username[/] = {self.username}")
if self.password:
lines.append(f"[primary]{config_prefix}password[/] = [i]<hidden>[/]")
if self.verify_ssl is not None:
lines.append(f"[primary]{config_prefix}verify_ssl[/] = {self.verify_ssl}")
if self.type:
lines.append(f"[primary]{config_prefix}type[/] = {self.type}")
if self.ca_certs:
lines.append(f"[primary]{config_prefix}ca_certs[/] = {self.ca_certs}")
return "\n".join(lines)
@property
def url_with_credentials(self) -> HiddenText:
from urllib.parse import urlsplit, urlunsplit
from pdm.utils import expand_env_vars_in_auth, hide_url
assert self.url is not None
self.populate_keyring_auth()
if not self.username or not self.password:
return hide_url(expand_env_vars_in_auth(self.url))
parsed = urlsplit(self.url)
*_, netloc = parsed.netloc.rpartition("@")
netloc = f"{self.username}:{self.password}@{netloc}"
url = urlunsplit((parsed.scheme, netloc, parsed.path, parsed.query, parsed.fragment))
return hide_url(url)
RequirementDict = Union[str, dict[str, Union[str, bool]]]
CandidateInfo = tuple[list[str], str, str]
| RepositoryConfig |
python | wandb__wandb | wandb/sdk/data_types/_dtypes.py | {
"start": 10146,
"end": 10457
} | class ____(Type):
"""A disallowed type.
Assignments to a InvalidType result in a Never Type. InvalidType is basically the
invalid case.
"""
name = "invalid"
types: t.ClassVar[t.List[type]] = []
def assign_type(self, wb_type: "Type") -> "InvalidType":
return self
| InvalidType |
python | great-expectations__great_expectations | great_expectations/execution_engine/sparkdf_batch_data.py | {
"start": 90,
"end": 353
} | class ____(BatchData):
def __init__(self, execution_engine, dataframe) -> None:
super().__init__(execution_engine=execution_engine)
self._dataframe = dataframe
@property
def dataframe(self):
return self._dataframe
| SparkDFBatchData |
python | wntrblm__nox | nox/logger.py | {
"start": 1700,
"end": 2511
} | class ____(ColoredFormatter):
def __init__(
self,
*,
datefmt: Any = None,
style: Any = None,
log_colors: Any = None,
reset: bool = True,
secondary_log_colors: Any = None,
add_timestamp: bool = False,
) -> None:
super().__init__(
fmt=_get_format(colorlog=True, add_timestamp=add_timestamp),
datefmt=datefmt,
style=style,
log_colors=log_colors,
reset=reset,
secondary_log_colors=secondary_log_colors,
)
self._simple_fmt = logging.Formatter("%(message)s")
def format(self, record: Any) -> str:
if record.levelname == "OUTPUT":
return self._simple_fmt.format(record)
return super().format(record)
| NoxColoredFormatter |
python | facebook__pyre-check | scripts/explore_pysa_models.py | {
"start": 903,
"end": 1272
} | class ____(NamedTuple):
models: Dict[str, FilePosition] = {}
issues: Dict[str, List[FilePosition]] = {}
call_graphs: Dict[str, FilePosition] = {}
def update(self, index: "AnalysisOutputIndex") -> None:
self.models.update(index.models)
self.issues.update(index.issues)
self.call_graphs.update(index.call_graphs)
| AnalysisOutputIndex |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/glue.py | {
"start": 23999,
"end": 30534
} | class ____(AwsBaseOperator[GlueDataQualityHook]):
"""
Starts a recommendation run that is used to generate rules, Glue Data Quality analyzes the data and comes up with recommendations for a potential ruleset.
Recommendation runs are automatically deleted after 90 days.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlueDataQualityRuleRecommendationRunOperator`
:param datasource: The data source (Glue table) associated with this run. (templated)
:param role: IAM role supplied for job execution. (templated)
:param number_of_workers: The number of G.1X workers to be used in the run. (default: 5)
:param timeout: The timeout for a run in minutes. This is the maximum time that a run can consume resources
before it is terminated and enters TIMEOUT status. (default: 2,880)
:param show_results: Displays the recommended ruleset (a set of rules), when recommendation run completes. (default: True)
:param recommendation_run_kwargs: Extra arguments for recommendation run. (templated)
:param wait_for_completion: Whether to wait for job to stop. (default: True)
:param waiter_delay: Time in seconds to wait between status checks. (default: 60)
:param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 20)
:param deferrable: If True, the operator will wait asynchronously for the job to stop.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = GlueDataQualityHook
template_fields: Sequence[str] = aws_template_fields(
"datasource",
"role",
"recommendation_run_kwargs",
)
template_fields_renderers = {"datasource": "json", "recommendation_run_kwargs": "json"}
ui_color = "#ededed"
def __init__(
self,
*,
datasource: dict,
role: str,
number_of_workers: int = 5,
timeout: int = 2880,
show_results: bool = True,
recommendation_run_kwargs: dict[str, Any] | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 60,
waiter_max_attempts: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.datasource = datasource
self.role = role
self.number_of_workers = number_of_workers
self.timeout = timeout
self.show_results = show_results
self.recommendation_run_kwargs = recommendation_run_kwargs or {}
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> str:
glue_table = self.datasource.get("GlueTable", {})
if not glue_table.get("DatabaseName") or not glue_table.get("TableName"):
raise AttributeError("DataSource glue table must have DatabaseName and TableName")
self.log.info("Submitting AWS Glue data quality recommendation run with %s", self.datasource)
try:
response = self.hook.conn.start_data_quality_rule_recommendation_run(
DataSource=self.datasource,
Role=self.role,
NumberOfWorkers=self.number_of_workers,
Timeout=self.timeout,
**self.recommendation_run_kwargs,
)
except ClientError as error:
raise AirflowException(
f"AWS Glue data quality recommendation run failed: {error.response['Error']['Message']}"
)
recommendation_run_id = response["RunId"]
message_description = (
f"AWS Glue data quality recommendation run RunId: {recommendation_run_id} to complete."
)
if self.deferrable:
self.log.info("Deferring %s", message_description)
self.defer(
trigger=GlueDataQualityRuleRecommendationRunCompleteTrigger(
recommendation_run_id=recommendation_run_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.log.info("Waiting for %s", message_description)
self.hook.get_waiter("data_quality_rule_recommendation_run_complete").wait(
RunId=recommendation_run_id,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
self.log.info(
"AWS Glue data quality recommendation run completed RunId: %s", recommendation_run_id
)
if self.show_results:
self.hook.log_recommendation_results(run_id=recommendation_run_id)
else:
self.log.info(message_description)
return recommendation_run_id
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error: AWS Glue data quality rule recommendation run: {validated_event}")
if self.show_results:
self.hook.log_recommendation_results(run_id=validated_event["recommendation_run_id"])
return validated_event["recommendation_run_id"]
| GlueDataQualityRuleRecommendationRunOperator |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip_text.py | {
"start": 33016,
"end": 39385
} | class ____(BlipTextPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "bert.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
self.bert = BlipTextModel(config, add_pooling_layer=False)
self.cls = BlipTextOnlyMLMHead(config)
self.label_smoothing = config.label_smoothing
def get_input_embeddings(self):
return self.bert.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.bert.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
return_logits: Optional[bool] = False,
is_decoder: Optional[bool] = True,
reduction: Optional[str] = "mean",
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor`, *optional*): Sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is
configured as a decoder.
encoder_attention_mask (`torch.FloatTensor`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
past_key_values (`Cache`, *optional*):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
is_decoder=is_decoder,
cache_position=cache_position,
)
hidden_states = outputs[0]
# Only compute necessary logits
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
prediction_scores = self.cls(hidden_states[:, slice_indices, :])
if return_logits:
return prediction_scores[:, :-1, :].contiguous()
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous().to(shifted_prediction_scores.device)
loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=self.label_smoothing)
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if reduction == "none":
lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
# Overwrite -- hardcoded key return (`is_decoder=True`)
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
**model_kwargs,
)
model_inputs["is_decoder"] = True
return model_inputs
__all__ = ["BlipTextModel", "BlipTextLMHeadModel", "BlipTextPreTrainedModel"]
| BlipTextLMHeadModel |
python | huggingface__transformers | tests/models/markuplm/test_feature_extraction_markuplm.py | {
"start": 1792,
"end": 3616
} | class ____(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = MarkupLMFeatureExtractor if is_bs4_available() else None
def setUp(self):
self.feature_extract_tester = MarkupLMFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_call(self):
# Initialize feature_extractor
feature_extractor = self.feature_extraction_class()
# Test not batched input
html_string = get_html_strings()[0]
encoding = feature_extractor(html_string)
# fmt: off
expected_nodes = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
expected_xpaths = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes, expected_nodes)
self.assertEqual(encoding.xpaths, expected_xpaths)
# Test batched
html_strings = get_html_strings()
encoding = feature_extractor(html_strings)
# fmt: off
expected_nodes = expected_nodes + [['My First Heading', 'My first paragraph.']]
expected_xpaths = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes), 2)
self.assertEqual(len(encoding.xpaths), 2)
self.assertEqual(encoding.nodes, expected_nodes)
self.assertEqual(encoding.xpaths, expected_xpaths)
| MarkupLMFeatureExtractionTest |
python | jazzband__django-pipeline | pipeline/compressors/jsmin.py | {
"start": 50,
"end": 306
} | class ____(CompressorBase):
"""
JS compressor based on the Python library jsmin
(http://pypi.python.org/pypi/jsmin/).
"""
def compress_js(self, js):
from jsmin import jsmin # noqa: PLC0415
return jsmin(js)
| JSMinCompressor |
python | pytorch__pytorch | torch/profiler/_pattern_matcher.py | {
"start": 4473,
"end": 4843
} | class ____(Pattern):
def __init__(
self, prof: profile, name: str, should_benchmark: bool = False
) -> None:
super().__init__(prof, should_benchmark)
self.description = f"Matched Name Event: {name}"
self.name = name
def match(self, event: _ProfilerEvent):
return re.search(self.name, event.name) is not None
| NamePattern |
python | bokeh__bokeh | tests/unit/bokeh/models/test_plots.py | {
"start": 11817,
"end": 12017
} | class ____(BaseTwinAxis):
"""Test whether extra x and y ranges can be categorical"""
@staticmethod
def get_range_instance():
return FactorRange('foo', 'bar')
| TestCategoricalTwinAxis |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 869877,
"end": 870613
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for PullRequest."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PullRequestEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("PullRequest"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| PullRequestConnection |
python | django__django | tests/update_only_fields/models.py | {
"start": 1016,
"end": 1084
} | class ____(Employee):
class Meta:
proxy = True
| ProxyEmployee |
python | django__django | tests/i18n/patterns/tests.py | {
"start": 17808,
"end": 18344
} | class ____(URLTestCaseBase):
"""
#21579 - LocaleMiddleware should respect the script prefix.
"""
def test_language_prefix_with_script_prefix(self):
prefix = "/script_prefix"
with override_script_prefix(prefix):
response = self.client.get(
"/prefixed/", headers={"accept-language": "en"}, SCRIPT_NAME=prefix
)
self.assertRedirects(
response, "%s/en/prefixed/" % prefix, target_status_code=404
)
| URLRedirectWithScriptAliasTests |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF052_0.py | {
"start": 697,
"end": 1229
} | class ____:
_valid_private_cls_attr = 1
print(_valid_private_cls_attr)
def __init__(self):
self._valid_private_ins_attr = 2
print(self._valid_private_ins_attr)
def _valid_method(self):
return self._valid_private_ins_attr
def method(arg):
_valid_unused_var = arg
return
def fun(x):
_ = 1
__ = 2
___ = 3
if x == 1:
return _
if x == 2:
return __
if x == 3:
return ___
return x
###########
# Incorrect
###########
| ClassOk |
python | oauthlib__oauthlib | tests/test_common.py | {
"start": 3070,
"end": 4311
} | class ____(TestCase):
def test_generate_timestamp(self):
timestamp = generate_timestamp()
self.assertIsInstance(timestamp, str)
self.assertTrue(int(timestamp))
self.assertGreater(int(timestamp), 1331672335)
def test_generate_nonce(self):
"""Ping me (ib-lundgren) when you discover how to test randomness."""
nonce = generate_nonce()
for i in range(50):
self.assertNotEqual(nonce, generate_nonce())
def test_generate_token(self):
token = generate_token()
self.assertEqual(len(token), 30)
token = generate_token(length=44)
self.assertEqual(len(token), 44)
token = generate_token(length=6, chars="python")
self.assertEqual(len(token), 6)
for c in token:
self.assertIn(c, "python")
def test_generate_client_id(self):
client_id = generate_client_id()
self.assertEqual(len(client_id), 30)
client_id = generate_client_id(length=44)
self.assertEqual(len(client_id), 44)
client_id = generate_client_id(length=6, chars="python")
self.assertEqual(len(client_id), 6)
for c in client_id:
self.assertIn(c, "python")
| GeneratorTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generic2.py | {
"start": 166,
"end": 440
} | class ____(Generic[_T1]):
pass
# This should generate an error.
def func1(a: _T1) -> Generic[_T1]: ...
# This should generate an error.
def func2(p1: Generic[_T1]) -> _T1: ...
TA1 = Generic
# This should generate an error.
def func3(a: _T1) -> TA1[_T1]: ...
| ClassA |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy_test.py | {
"start": 30590,
"end": 31860
} | class ____(test.TestCase):
def testIsInstance(self):
# It's not uncommon for people to special case MultiWorkerMirroredStrategy,
# so we need to make sure isinstance check works for combinations between
# the experimental and non-experimental endpoints.
strategy = CollectiveAllReduceStrategy()
experimental_strategy = _CollectiveAllReduceStrategyExperimental()
self.assertIsInstance(strategy, CollectiveAllReduceStrategy)
self.assertIsInstance(strategy, _CollectiveAllReduceStrategyExperimental)
self.assertIsInstance(experimental_strategy, CollectiveAllReduceStrategy)
self.assertIsInstance(experimental_strategy,
_CollectiveAllReduceStrategyExperimental)
def testName(self):
self.assertEqual(CollectiveAllReduceStrategy.__name__,
'CollectiveAllReduceStrategy')
self.assertEqual(_CollectiveAllReduceStrategyExperimental.__name__,
'CollectiveAllReduceStrategy')
def _replica_id_f32():
return math_ops.cast(
distribute_lib.get_replica_context()
.replica_id_in_sync_group, dtypes.float32)
if __name__ == '__main__':
# TODO(b/172304955): enable logical devices.
test_util.main(config_logical_devices=False)
| ExperimentalCompatibilityTest |
python | ray-project__ray | rllib/core/models/torch/primitives.py | {
"start": 22684,
"end": 23303
} | class ____(nn.Module):
def __init__(self, num_features, **kwargs):
super().__init__()
self.layer_norm = nn.LayerNorm(num_features, **kwargs)
def forward(self, x):
# x shape: (B, dim, dim, channels).
batch_size, channels, h, w = x.size()
# Reshape to (batch_size * height * width, channels) for LayerNorm
x = x.permute(0, 2, 3, 1).reshape(-1, channels)
# Apply LayerNorm
x = self.layer_norm(x)
# Reshape back to (batch_size, dim, dim, channels)
x = x.reshape(batch_size, h, w, channels).permute(0, 3, 1, 2)
return x
| LayerNorm1D |
python | allegroai__clearml | clearml/utilities/plotlympl/mplexporter/renderers/vega_renderer.py | {
"start": 163,
"end": 3992
} | class ____(Renderer):
def open_figure(self, fig: Any, props: Dict[str, Union[int, float]]) -> None:
self.props = props
self.figwidth = int(props["figwidth"] * props["dpi"])
self.figheight = int(props["figheight"] * props["dpi"])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax: Any, props: Dict[str, Any]) -> None:
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [
dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10),
]
self.scales = [
dict(
name="x",
domain=props["xlim"],
type="linear",
range="width",
),
dict(
name="y",
domain=props["ylim"],
type="linear",
range="height",
),
]
def draw_line(
self,
data: List[Tuple[float, float]],
coordinates: str,
style: Dict[str, Union[str, float]],
label: str,
mplobj: Optional[Any] = None,
) -> None:
if coordinates != "data":
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({"name": dataname, "values": [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append(
{
"type": "line",
"from": {"data": dataname},
"properties": {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style["color"]},
"strokeOpacity": {"value": style["alpha"]},
"strokeWidth": {"value": style["linewidth"]},
}
},
}
)
def draw_markers(
self,
data: List[Tuple[float, float]],
coordinates: str,
style: Dict[str, Any],
label: str,
mplobj: Optional[Any] = None,
) -> None:
if coordinates != "data":
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({"name": dataname, "values": [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append(
{
"type": "symbol",
"from": {"data": dataname},
"properties": {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style["facecolor"]},
"fillOpacity": {"value": style["alpha"]},
"stroke": {"value": style["edgecolor"]},
"strokeOpacity": {"value": style["alpha"]},
"strokeWidth": {"value": style["edgewidth"]},
}
},
}
)
def draw_text(
self,
text: str,
position: Any,
coordinates: str,
style: Any,
text_type: Optional[str] = None,
mplobj: Optional[Any] = None,
) -> None:
if text_type == "xlabel":
self.axes[0]["title"] = text
elif text_type == "ylabel":
self.axes[1]["title"] = text
| VegaRenderer |
python | pytorch__pytorch | torch/distributed/checkpoint/_extension.py | {
"start": 660,
"end": 1665
} | class ____(abc.ABC):
"""
Extensions provide modular additions to functionality within distributed checkpointing,
which affect the layout or format of the written artifacts. Extensions may be
built into pytorch, or provided externally.
When writing, the caller provides a list of extension instances of the appropriate
type. Each extension can output a descriptor which is used to reconstitute the
extension at read-time.
"""
@staticmethod
@abc.abstractmethod
def registry_name() -> str:
"""
See ExtensionRegistry.from_descriptor_list
"""
@staticmethod
@abc.abstractmethod
def from_descriptor(version: str) -> "Extension":
"""
See ExtensionRegistry.from_descriptor_list
"""
@abc.abstractmethod
def get_descriptor(self) -> str:
"""
Return descriptor name to be included in metadata. The form should be
"extension_name[@local-domain][/version]".
"""
| Extension |
python | ray-project__ray | rllib/models/torch/misc.py | {
"start": 11397,
"end": 11632
} | class ____(nn.Module):
"""Standard module that reshapes/views a tensor"""
def __init__(self, shape: List):
super().__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
| Reshape |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/associationproxy.py | {
"start": 49665,
"end": 56383
} | class ____(_AssociationSingleItem[_T], MutableSequence[_T]):
"""Generic, converting, list-to-list proxy."""
col: MutableSequence[_T]
def _set(self, object_: Any, value: _T) -> None:
self.setter(object_, value)
@overload
def __getitem__(self, index: int) -> _T: ...
@overload
def __getitem__(self, index: slice) -> MutableSequence[_T]: ...
def __getitem__(
self, index: Union[int, slice]
) -> Union[_T, MutableSequence[_T]]:
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
@overload
def __setitem__(self, index: int, value: _T) -> None: ...
@overload
def __setitem__(self, index: slice, value: Iterable[_T]) -> None: ...
def __setitem__(
self, index: Union[int, slice], value: Union[_T, Iterable[_T]]
) -> None:
if not isinstance(index, slice):
self._set(self.col[index], cast("_T", value))
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
sized_value = list(value)
if step == 1:
for i in rng:
del self[start]
i = start
for item in sized_value:
self.insert(i, item)
i += 1
else:
if len(sized_value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(sized_value), len(rng))
)
for i, item in zip(rng, value):
self._set(self.col[i], item)
@overload
def __delitem__(self, index: int) -> None: ...
@overload
def __delitem__(self, index: slice) -> None: ...
def __delitem__(self, index: Union[slice, int]) -> None:
del self.col[index]
def __contains__(self, value: object) -> bool:
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self) -> Iterator[_T]:
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
return
def append(self, value: _T) -> None:
col = self.col
item = self._create(value)
col.append(item)
def count(self, value: Any) -> int:
count = 0
for v in self:
if v == value:
count += 1
return count
def extend(self, values: Iterable[_T]) -> None:
for v in values:
self.append(v)
def insert(self, index: int, value: _T) -> None:
self.col[index:index] = [self._create(value)]
def pop(self, index: int = -1) -> _T:
return self.getter(self.col.pop(index))
def remove(self, value: _T) -> None:
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self) -> NoReturn:
"""Not supported, use reversed(mylist)"""
raise NotImplementedError()
def sort(self) -> NoReturn:
"""Not supported, use sorted(mylist)"""
raise NotImplementedError()
def clear(self) -> None:
del self.col[0 : len(self.col)]
def __eq__(self, other: object) -> bool:
return list(self) == other
def __ne__(self, other: object) -> bool:
return list(self) != other
def __lt__(self, other: List[_T]) -> bool:
return list(self) < other
def __le__(self, other: List[_T]) -> bool:
return list(self) <= other
def __gt__(self, other: List[_T]) -> bool:
return list(self) > other
def __ge__(self, other: List[_T]) -> bool:
return list(self) >= other
def __add__(self, other: List[_T]) -> List[_T]:
try:
other = list(other)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, other: List[_T]) -> List[_T]:
try:
other = list(other)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n: SupportsIndex) -> List[_T]:
if not isinstance(n, int):
return NotImplemented
return list(self) * n
def __rmul__(self, n: SupportsIndex) -> List[_T]:
if not isinstance(n, int):
return NotImplemented
return n * list(self)
def __iadd__(self, iterable: Iterable[_T]) -> Self:
self.extend(iterable)
return self
def __imul__(self, n: SupportsIndex) -> Self:
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
raise NotImplementedError()
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
if typing.TYPE_CHECKING:
# TODO: no idea how to do this without separate "stub"
def index(
self, value: Any, start: int = ..., stop: int = ...
) -> int: ...
else:
def index(self, value: Any, *arg) -> int:
ls = list(self)
return ls.index(value, *arg)
def copy(self) -> List[_T]:
return list(self)
def __repr__(self) -> str:
return repr(list(self))
def __hash__(self) -> NoReturn:
raise TypeError("%s objects are unhashable" % type(self).__name__)
if not typing.TYPE_CHECKING:
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
| _AssociationList |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/pipes/client.py | {
"start": 6357,
"end": 11022
} | class ____(ABC):
@abstractmethod
@contextmanager
def read_messages(self, handler: "PipesMessageHandler") -> Iterator[PipesParams]:
"""A `@contextmanager` that reads messages reported by an external process.
This method should start a thread to continuously read messages from some location
accessible to the external process. It should yield parameters that the external process
can use to direct its message output.
Args:
handler (PipesMessageHandler): The message handler to use to process messages read from
the external process.
Yields:
PipesParams: A dict of parameters that can be used by the external process to determine
where to write messages.
"""
def on_opened(self, opened_payload: PipesOpenedData) -> None:
"""Hook called when the external process has successfully launched and returned an opened
payload. By default, this is a no-op. Specific message readers can override this to consume information
that can only be obtained from the external process.
"""
def on_launched(self, launched_payload: PipesLaunchedData) -> None:
"""Hook that is called if `PipesSession.report_launched()` is called. By default, this
is a no-op. Specific message readers can override this to consume information that is only
available after the external process has been launched.
This hook is not necessarily called in every pipes session. It is useful primarily when we wish to
condition the behavior of the message reader on some parameter that is only available after
external process launch (such as a run id in the external system).
The code calling `open_pipes_session()` is responsible for calling `PipesSession.report_launched()`
if using a message reader that accesses `launched_payload`.
"""
@abstractmethod
def no_messages_debug_text(self) -> str:
"""A message to be displayed when no messages are received from the external process to aid with
debugging.
Example: "Attempted to read messages using a magic portal. Expected PipesMagicPortalMessageWriter
to be explicitly passed to open_dagster_pipes in the external process."
"""
def materialize_result_from_pipes_results(
all_results: Sequence[PipesExecutionResult],
) -> MaterializeResult:
mat_results: list[MaterializeResult] = [
mat_result for mat_result in all_results if isinstance(mat_result, MaterializeResult)
]
check_results: list[AssetCheckResult] = [
check_result for check_result in all_results if isinstance(check_result, AssetCheckResult)
]
if len(mat_results) == 0:
raise DagsterPipesError("No materialization results received from external process.")
if len(mat_results) > 1:
raise DagsterPipesError(
"Multiple materialize results returned with asset keys"
f" {sorted([check.not_none(mr.asset_key).to_user_string() for mr in mat_results])}."
" If you are materializing multiple assets in a pipes invocation, use"
" get_results() instead.",
)
mat_result = next(iter(mat_results))
for check_result in check_results:
if check_result.asset_key:
check.invariant(
mat_result.asset_key == check_result.asset_key,
"Check result specified an asset key that is not part of the returned"
" materialization. If this was deliberate, use get_results() instead.",
)
if check_results:
return mat_result._replace(
check_results=[*(mat_result.check_results or []), *check_results]
)
else:
return mat_result
def _check_result_from_pipes_results(
all_results: Sequence[PipesExecutionResult],
) -> AssetCheckResult:
mat_results: list[MaterializeResult] = [
mat_result for mat_result in all_results if isinstance(mat_result, MaterializeResult)
]
check_results: list[AssetCheckResult] = [
check_result for check_result in all_results if isinstance(check_result, AssetCheckResult)
]
# return the single asset check result if thats what we got
if len(mat_results) == 0 and len(check_results) == 1:
return next(iter(check_results))
# otherwise error
raise DagsterPipesError(
f"Did not find singular AssetCheckResult, got {len(mat_results)} MaterializeResults and"
f" {len(check_results)} AssetCheckResults. Correct the reported results or use"
" get_results() instead.",
)
| PipesMessageReader |
python | keon__algorithms | tests/test_dp.py | {
"start": 945,
"end": 1273
} | class ____(unittest.TestCase):
def test_climb_stairs(self):
self.assertEqual(climb_stairs(2), 2)
self.assertEqual(climb_stairs(10), 89)
def test_climb_stairs_optimized(self):
self.assertEqual(climb_stairs_optimized(2), 2)
self.assertEqual(climb_stairs_optimized(10), 89)
| TestClimbingStairs |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_operator.py | {
"start": 1268,
"end": 1641
} | class ____(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
| Seq2 |
python | huggingface__transformers | src/transformers/models/persimmon/modeling_persimmon.py | {
"start": 33651,
"end": 33930
} | class ____(GenericForTokenClassification, PersimmonPreTrainedModel): ...
__all__ = [
"PersimmonForCausalLM",
"PersimmonModel",
"PersimmonPreTrainedModel",
"PersimmonForSequenceClassification",
"PersimmonForTokenClassification",
]
| PersimmonForTokenClassification |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 26148,
"end": 26600
} | class ____(ProjectAdminMixin, PrivateViewMixin):
form_class = TranslationForm
def get_success_url(self):
return reverse(
"projects_translations",
args=[self.get_project().slug],
)
def get_form(self, data=None, files=None, **kwargs):
kwargs["parent"] = self.get_project()
kwargs["user"] = self.request.user
return self.form_class(data, files, **kwargs)
| ProjectTranslationsMixin |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 1764,
"end": 2223
} | class ____(Filters, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-filters/#api-rest-api-3-filter-post
"""
def generate(self):
for index in range(1, 20):
payload = json.dumps(
{"jql": "type = Bug and resolution is empty", "name": f"Test filter {index}", "description": "Lists all open bugs"}
)
self.generate_record(payload)
| FiltersGenerator |
python | kamyu104__LeetCode-Solutions | Python/find-if-path-exists-in-graph.py | {
"start": 2239,
"end": 3058
} | class ____(object):
def validPath(self, n, edges, start, end):
"""
:type n: int
:type edges: List[List[int]]
:type start: int
:type end: int
:rtype: bool
"""
def dfs(adj, start, target):
stk = [start]
lookup = set(stk)
while stk:
pos = stk.pop()
if pos == target:
return True
for nei in reversed(adj[pos]):
if nei in lookup:
continue
lookup.add(nei)
stk.append(nei)
return False
adj = collections.defaultdict(list)
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return dfs(adj, start, end)
| Solution3 |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 1446,
"end": 1553
} | class ____(ApeException):
"""
Raised when a problem occurs when using accounts.
"""
| AccountsError |
python | plotly__plotly.py | plotly/graph_objs/_scatterternary.py | {
"start": 215,
"end": 71953
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "scatterternary"
_valid_props = {
"a",
"asrc",
"b",
"bsrc",
"c",
"cliponaxis",
"connectgaps",
"csrc",
"customdata",
"customdatasrc",
"fill",
"fillcolor",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hoveron",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"marker",
"meta",
"metasrc",
"mode",
"name",
"opacity",
"selected",
"selectedpoints",
"showlegend",
"stream",
"subplot",
"sum",
"text",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatefallback",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
}
@property
def a(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'a' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["a"]
@a.setter
def a(self, val):
self["a"] = val
@property
def asrc(self):
"""
Sets the source reference on Chart Studio Cloud for `a`.
The 'asrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["asrc"]
@asrc.setter
def asrc(self, val):
self["asrc"] = val
@property
def b(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'b' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
@property
def bsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `b`.
The 'bsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bsrc"]
@bsrc.setter
def bsrc(self, val):
self["bsrc"] = val
@property
def c(self):
"""
Sets the quantity of component `a` in each data point. If `a`,
`b`, and `c` are all provided, they need not be normalized,
only the relative values matter. If only two arrays are
provided they must be normalized to match `ternary<i>.sum`.
The 'c' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["c"]
@c.setter
def c(self, val):
self["c"] = val
@property
def cliponaxis(self):
"""
Determines whether or not markers and text nodes are clipped
about the subplot axes. To show markers and text nodes above
axis lines and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
The 'cliponaxis' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cliponaxis"]
@cliponaxis.setter
def cliponaxis(self, val):
self["cliponaxis"] = val
@property
def connectgaps(self):
"""
Determines whether or not gaps (i.e. {nan} or missing values)
in the provided data arrays are connected.
The 'connectgaps' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["connectgaps"]
@connectgaps.setter
def connectgaps(self, val):
self["connectgaps"] = val
@property
def csrc(self):
"""
Sets the source reference on Chart Studio Cloud for `c`.
The 'csrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["csrc"]
@csrc.setter
def csrc(self, val):
self["csrc"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def fill(self):
"""
Sets the area to fill with a solid color. Use with `fillcolor`
if not "none". scatterternary has a subset of the options
available to scatter. "toself" connects the endpoints of the
trace (or each segment of the trace if it has gaps) into a
closed shape. "tonext" fills the space between two traces if
one completely encloses the other (eg consecutive contour
lines), and behaves like "toself" if there is no trace before
it. "tonext" should not be used if one trace does not enclose
the other.
The 'fill' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'toself', 'tonext']
Returns
-------
Any
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def fillcolor(self):
"""
Sets the fill color. Defaults to a half-transparent variant of
the line color, marker color, or marker line color, whichever
is available.
The 'fillcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["fillcolor"]
@fillcolor.setter
def fillcolor(self, val):
self["fillcolor"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['a', 'b', 'c', 'text', 'name'] joined with '+' characters
(e.g. 'a+b')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.scatterternary.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hoveron(self):
"""
Do the hover effects highlight individual points (markers or
line points) or do they highlight filled regions? If the fill
is "toself" or "tonext" and there are no markers or text, then
the default is "fills", otherwise it is "points".
The 'hoveron' property is a flaglist and may be specified
as a string containing:
- Any combination of ['points', 'fills'] joined with '+' characters
(e.g. 'points+fills')
Returns
-------
Any
"""
return self["hoveron"]
@hoveron.setter
def hoveron(self, val):
self["hoveron"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example `<extra>%{fullData.name}</extra>`.
To hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Sets hover text elements associated with each (a,b,c) point. If
a single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b,c). To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.scatterternary.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.scatterternary.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatterternary.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def mode(self):
"""
Determines the drawing mode for this scatter trace. If the
provided `mode` includes "text" then the `text` elements appear
at the coordinates. Otherwise, the `text` elements appear on
hover. If there are less than 20 points and the trace is not
stacked then the default is "lines+markers". Otherwise,
"lines".
The 'mode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lines', 'markers', 'text'] joined with '+' characters
(e.g. 'lines+markers')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["mode"]
@mode.setter
def mode(self, val):
self["mode"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.scatterternary.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.scatterternary.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
ternary subplot. If "ternary" (the default value), the data
refer to `layout.ternary`. If "ternary2", the data refer to
`layout.ternary2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'ternary', that may be specified as the string 'ternary'
optionally followed by an integer >= 1
(e.g. 'ternary', 'ternary1', 'ternary2', 'ternary3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
@property
def sum(self):
"""
The number each triplet should sum to, if only two of `a`, `b`,
and `c` are provided. This overrides `ternary<i>.sum` to
normalize this specific trace, but does not affect the values
displayed on the axes. 0 (or missing) means to use
ternary<i>.sum
The 'sum' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sum"]
@sum.setter
def sum(self, val):
self["sum"] = val
@property
def text(self):
"""
Sets text elements associated with each (a,b,c) point. If a
single string, the same string appears over all the data
points. If an array of strings, the items are mapped in order
to the the data points in (a,b,c). If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set, these
elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textfont(self):
"""
Sets the text font.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatterternary.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def textposition(self):
"""
Sets the positions of the `text` elements with respects to the
(x,y) coordinates.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`textposition`.
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appears on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`) are
available. Finally, the template string has access to variables
`a`, `b`, `c` and `text`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'texttemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["texttemplatefallback"]
@texttemplatefallback.setter
def texttemplatefallback(self, val):
self["texttemplatefallback"] = val
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.scatterternary.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
a
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
asrc
Sets the source reference on Chart Studio Cloud for
`a`.
b
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
bsrc
Sets the source reference on Chart Studio Cloud for
`b`.
c
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
cliponaxis
Determines whether or not markers and text nodes are
clipped about the subplot axes. To show markers and
text nodes above axis lines and tick labels, make sure
to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
csrc
Sets the source reference on Chart Studio Cloud for
`c`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scatterternary.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (a,b,c)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b,c).
To be seen, trace `hoverinfo` must contain a "text"
flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scatterternary.Legendgroup
title` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scatterternary.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scatterternary.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scatterternary.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scatterternary.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a ternary subplot. If "ternary" (the default
value), the data refer to `layout.ternary`. If
"ternary2", the data refer to `layout.ternary2`, and so
on.
sum
The number each triplet should sum to, if only two of
`a`, `b`, and `c` are provided. This overrides
`ternary<i>.sum` to normalize this specific trace, but
does not affect the values displayed on the axes. 0 (or
missing) means to use ternary<i>.sum
text
Sets text elements associated with each (a,b,c) point.
If a single string, the same string appears over all
the data points. If an array of strings, the items are
mapped in order to the the data points in (a,b,c). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `a`, `b`, `c` and `text`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scatterternary.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
a=None,
asrc=None,
b=None,
bsrc=None,
c=None,
cliponaxis=None,
connectgaps=None,
csrc=None,
customdata=None,
customdatasrc=None,
fill=None,
fillcolor=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hoveron=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
marker=None,
meta=None,
metasrc=None,
mode=None,
name=None,
opacity=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
subplot=None,
sum=None,
text=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatefallback=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
**kwargs,
):
"""
Construct a new Scatterternary object
Provides similar functionality to the "scatter" type but on a
ternary phase diagram. The data is provided by at least two
arrays out of `a`, `b`, `c` triplets.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.Scatterternary`
a
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
asrc
Sets the source reference on Chart Studio Cloud for
`a`.
b
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
bsrc
Sets the source reference on Chart Studio Cloud for
`b`.
c
Sets the quantity of component `a` in each data point.
If `a`, `b`, and `c` are all provided, they need not be
normalized, only the relative values matter. If only
two arrays are provided they must be normalized to
match `ternary<i>.sum`.
cliponaxis
Determines whether or not markers and text nodes are
clipped about the subplot axes. To show markers and
text nodes above axis lines and tick labels, make sure
to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
connectgaps
Determines whether or not gaps (i.e. {nan} or missing
values) in the provided data arrays are connected.
csrc
Sets the source reference on Chart Studio Cloud for
`c`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
fill
Sets the area to fill with a solid color. Use with
`fillcolor` if not "none". scatterternary has a subset
of the options available to scatter. "toself" connects
the endpoints of the trace (or each segment of the
trace if it has gaps) into a closed shape. "tonext"
fills the space between two traces if one completely
encloses the other (eg consecutive contour lines), and
behaves like "toself" if there is no trace before it.
"tonext" should not be used if one trace does not
enclose the other.
fillcolor
Sets the fill color. Defaults to a half-transparent
variant of the line color, marker color, or marker line
color, whichever is available.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.scatterternary.Hoverlabel`
instance or dict with compatible properties
hoveron
Do the hover effects highlight individual points
(markers or line points) or do they highlight filled
regions? If the fill is "toself" or "tonext" and there
are no markers or text, then the default is "fills",
otherwise it is "points".
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (a,b,c)
point. If a single string, the same string appears over
all the data points. If an array of strings, the items
are mapped in order to the the data points in (a,b,c).
To be seen, trace `hoverinfo` must contain a "text"
flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.scatterternary.Legendgroup
title` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.scatterternary.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.scatterternary.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
mode
Determines the drawing mode for this scatter trace. If
the provided `mode` includes "text" then the `text`
elements appear at the coordinates. Otherwise, the
`text` elements appear on hover. If there are less than
20 points and the trace is not stacked then the default
is "lines+markers". Otherwise, "lines".
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
selected
:class:`plotly.graph_objects.scatterternary.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.scatterternary.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a ternary subplot. If "ternary" (the default
value), the data refer to `layout.ternary`. If
"ternary2", the data refer to `layout.ternary2`, and so
on.
sum
The number each triplet should sum to, if only two of
`a`, `b`, and `c` are provided. This overrides
`ternary<i>.sum` to normalize this specific trace, but
does not affect the values displayed on the axes. 0 (or
missing) means to use ternary<i>.sum
text
Sets text elements associated with each (a,b,c) point.
If a single string, the same string appears over all
the data points. If an array of strings, the items are
mapped in order to the the data points in (a,b,c). If
trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textfont
Sets the text font.
textposition
Sets the positions of the `text` elements with respects
to the (x,y) coordinates.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `a`, `b`, `c` and `text`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.scatterternary.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Scatterternary
"""
super().__init__("scatterternary")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Scatterternary
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Scatterternary`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("a", arg, a)
self._set_property("asrc", arg, asrc)
self._set_property("b", arg, b)
self._set_property("bsrc", arg, bsrc)
self._set_property("c", arg, c)
self._set_property("cliponaxis", arg, cliponaxis)
self._set_property("connectgaps", arg, connectgaps)
self._set_property("csrc", arg, csrc)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("fill", arg, fill)
self._set_property("fillcolor", arg, fillcolor)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hoveron", arg, hoveron)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("mode", arg, mode)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("stream", arg, stream)
self._set_property("subplot", arg, subplot)
self._set_property("sum", arg, sum)
self._set_property("text", arg, text)
self._set_property("textfont", arg, textfont)
self._set_property("textposition", arg, textposition)
self._set_property("textpositionsrc", arg, textpositionsrc)
self._set_property("textsrc", arg, textsrc)
self._set_property("texttemplate", arg, texttemplate)
self._set_property("texttemplatefallback", arg, texttemplatefallback)
self._set_property("texttemplatesrc", arg, texttemplatesrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._props["type"] = "scatterternary"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Scatterternary |
python | getsentry__sentry | tests/sentry/integrations/msteams/notifications/test_issue_alert.py | {
"start": 720,
"end": 4783
} | class ____(MSTeamsActivityNotificationTest):
def test_issue_alert_user(self, mock_send_card: MagicMock) -> None:
"""Test that issue alerts are sent to a MS Teams user."""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "Member",
"targetIdentifier": str(self.user.id),
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
notification_uuid = str(uuid.uuid4())
notification = AlertRuleNotification(
Notification(event=event, rule=rule),
ActionTargetType.MEMBER,
self.user.id,
notification_uuid=notification_uuid,
)
with self.tasks():
notification.send()
mock_send_card.assert_called_once()
args, kwargs = mock_send_card.call_args
assert args[0] == "some_conversation_id"
body = args[1]["body"]
assert 4 == len(body)
assert (
f"Alert triggered [{rule.label}](http://testserver/organizations/baz/alerts/rules/bar/{rule.id}/details/)"
== body[0]["text"]
)
assert (
f"[{event.title}](http://testserver/organizations/{self.organization.slug}/issues/{event.group_id}/?referrer=issue\\_alert-msteams&notification\\_uuid={notification_uuid})"
== body[1]["text"]
)
assert (
f"{self.project.slug} | [Notification Settings](http://testserver/settings/account/notifications/alerts/?referrer=issue\\_alert-msteams-user&notification\\_uuid={notification_uuid}&organizationId={self.organization.id})"
== body[3]["columns"][1]["items"][0]["text"]
)
def test_issue_alert_owners(self, mock_send_card: MagicMock) -> None:
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
action_data = {
"id": "sentry.mail.actions.NotifyEmailAction",
"targetType": "IssueOwners",
"targetIdentifier": "",
}
rule = Rule.objects.create(
project=self.project,
label="ja rule",
data={
"match": "all",
"actions": [action_data],
},
)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification_uuid = str(uuid.uuid4())
notification = AlertRuleNotification(
Notification(event=event, rule=rule),
ActionTargetType.ISSUE_OWNERS,
self.user.id,
notification_uuid=notification_uuid,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
with self.tasks():
notification.send()
mock_send_card.assert_called_once()
args, kwargs = mock_send_card.call_args
assert args[0] == "some_conversation_id"
body = args[1]["body"]
assert 4 == len(body)
assert (
f"Alert triggered [{rule.label}](http://testserver/organizations/baz/alerts/rules/bar/{rule.id}/details/)"
== body[0]["text"]
)
assert (
f"[{event.title}](http://testserver/organizations/{self.organization.slug}/issues/{event.group_id}/?referrer=issue\\_alert-msteams&notification\\_uuid={notification_uuid})"
== body[1]["text"]
)
assert (
f"{self.project.slug} | [Notification Settings](http://testserver/settings/account/notifications/alerts/?referrer=issue\\_alert-msteams-user&notification\\_uuid={notification_uuid}&organizationId={self.organization.id})"
== body[3]["columns"][1]["items"][0]["text"]
)
| MSTeamsIssueAlertNotificationTest |
python | scipy__scipy | scipy/fftpack/tests/test_basic.py | {
"start": 3754,
"end": 4124
} | class ____(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
reason = ("single-precision FFT implementation is partially disabled, "
"until accuracy issues with large prime powers are resolved")
@pytest.mark.xfail(run=False, reason=reason)
def test_notice(self):
pass
| TestSingleFFT |
python | gevent__gevent | src/gevent/_config.py | {
"start": 17590,
"end": 17817
} | class ____(object):
document = False
@property
def kwarg_name(self):
return self.name[5:]
validate = staticmethod(validate_anything)
_convert = staticmethod(convert_str_value_as_is)
| AresSettingMixin |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_int.py | {
"start": 3215,
"end": 4165
} | class ____(BaseTestZDType):
test_cls = Int64
scalar_type = np.int64
valid_dtype = (np.dtype(">i8"), np.dtype("<i8"))
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.float64),
)
valid_json_v2 = (
{"name": ">i8", "object_codec_id": None},
{"name": "<i8", "object_codec_id": None},
)
valid_json_v3 = ("int64",)
invalid_json_v2 = (
"|i8",
"int64",
"|f8",
)
invalid_json_v3 = (
"|i8",
"|f8",
{"name": "int64", "configuration": {"endianness": "little"}},
)
scalar_v2_params = ((Int64(), 1), (Int64(), -1), (Int64(), 1.0))
scalar_v3_params = ((Int64(), 1), (Int64(), -1))
cast_value_params = (
(Int64(), 1, np.int64(1)),
(Int64(), -1, np.int64(-1)),
)
invalid_scalar_params = ((Int64(), {"set!"}), (Int64(), ("tuple",)))
item_size_params = (Int64(),)
| TestInt64 |
python | PrefectHQ__prefect | tests/experimental/test_sla.py | {
"start": 8694,
"end": 28890
} | class ____:
@pytest.fixture
def deployment_id(self):
return UUID("89f0ac57-514a-4eb1-a068-dbbf44d2e199")
class TestClientMethodCall:
async def test_create_slas(self, prefect_client, monkeypatch, deployment_id):
monkeypatch.setattr(prefect_client, "server_type", ServerType.CLOUD)
monkeypatch.setattr(
prefect_client,
"apply_slas_for_deployment",
mock.AsyncMock(name="mock_apply_slas_for_deployment"),
)
monkeypatch.setattr(
prefect_client,
"create_deployment",
mock.AsyncMock(
name="mock_create_deployment", return_value=deployment_id
),
)
sla = TimeToCompletionSla(
name="test-sla",
duration=timedelta(minutes=10).total_seconds(),
)
await _create_slas(prefect_client, deployment_id, [sla])
assert prefect_client.apply_slas_for_deployment.called is True
assert (
prefect_client.apply_slas_for_deployment.await_args_list[0].args[0]
== deployment_id
)
assert (
prefect_client.apply_slas_for_deployment.await_args_list[0]
.args[1][0]
.name
== sla.name
)
class TestSlaSyncing:
async def test_initialize_slas(self, deployment_id):
sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
slas = _initialize_deployment_slas(deployment_id, [sla_spec])
assert slas == [
TimeToCompletionSla(
name="test-sla",
duration=1800,
severity="high",
).set_deployment_id(deployment_id)
]
async def test_initialize_multiple_slas(self):
sla_spec_1 = {
"name": "test-sla-1",
"duration": 1800,
"severity": "high",
}
sla_spec_2 = {
"name": "test-sla-2",
"duration": 3600,
"severity": "critical",
}
deployment_id = uuid4()
slas = _initialize_deployment_slas(deployment_id, [sla_spec_1, sla_spec_2])
assert slas == [
TimeToCompletionSla(
name="test-sla-1",
duration=1800,
severity="high",
).set_deployment_id(deployment_id),
TimeToCompletionSla(
name="test-sla-2",
duration=3600,
severity="critical",
).set_deployment_id(deployment_id),
]
async def test_create_slas(self):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
deployment_id = uuid4()
slas = _initialize_deployment_slas(deployment_id, [sla_spec])
await _create_slas(client, deployment_id, slas)
assert slas[0]._deployment_id == deployment_id
assert slas[0].owner_resource == f"prefect.deployment.{deployment_id}"
client.apply_slas_for_deployment.assert_called_once_with(
deployment_id, slas
)
async def test_sla_creation_orchestrated(
self,
project_dir,
prefect_client,
work_pool,
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"work_pool": {
"name": work_pool.name,
},
"sla": [
{
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
],
}
]
expected_slas = _initialize_deployment_slas(
uuid4(), contents["deployments"][0]["sla"]
)
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy ./flows/hello.py:my_flow -n test-name-1",
expected_code=0,
)
assert create_slas.call_count == 1
client, deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
for sla in expected_slas:
sla.set_deployment_id(deployment_id)
assert slas == expected_slas
class TestSlaPassedViaCLI:
@pytest.mark.usefixtures("project_dir")
async def test_json_string_sla(self, docker_work_pool):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
expected_slas = [
TimeToCompletionSla(
name="test-sla",
duration=1800,
severity="high",
)
]
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 --sla"
f" '{json.dumps(sla_spec)}' -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
client, called_deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
for sla in expected_slas:
sla.set_deployment_id(called_deployment_id)
assert slas == expected_slas
@pytest.mark.usefixtures("project_dir")
async def test_passing_an_empty_list_calls_create_sla_method(
self, docker_work_pool
):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 --sla '[]' -p"
f" {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
_, _, slas = create_slas.call_args[0]
assert slas == []
@pytest.mark.usefixtures("project_dir")
async def test_json_file_sla(self, docker_work_pool):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
with open("sla.json", "w") as f:
json.dump({"sla": [sla_spec]}, f)
expected_slas = [
TimeToCompletionSla(
name="test-sla",
duration=1800,
severity="high",
)
]
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --sla sla.json -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
client, called_deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
for sla in expected_slas:
sla.set_deployment_id(called_deployment_id)
assert slas == expected_slas
@pytest.mark.usefixtures("project_dir")
async def test_yaml_file_sla(self, docker_work_pool):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
with open("sla.yaml", "w") as f:
yaml.safe_dump({"sla": [sla_spec]}, f)
expected_slas = [
TimeToCompletionSla(
name="test-sla",
duration=1800,
severity="high",
)
]
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --sla sla.yaml -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
client, called_deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
for sla in expected_slas:
sla.set_deployment_id(called_deployment_id)
assert slas == expected_slas
@pytest.mark.usefixtures("project_dir")
async def test_passing_empty_list_to_yaml_file_calls_create_sla_method(
self, docker_work_pool
):
with open("sla.yaml", "w") as f:
yaml.safe_dump({"sla": []}, f)
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --sla sla.yaml -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
_, _, slas = create_slas.call_args[0]
assert slas == []
@pytest.mark.usefixtures("project_dir")
async def test_nested_yaml_file_sla(self, docker_work_pool, tmpdir):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
slas_file = tmpdir.mkdir("my_stuff") / "sla.yaml"
with open(slas_file, "w") as f:
yaml.safe_dump({"sla": [sla_spec]}, f)
expected_slas = [
TimeToCompletionSla(
name="test-sla",
duration=1800,
severity="high",
)
]
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --sla my_stuff/sla.yaml -p {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
client, called_deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
for sla in expected_slas:
sla.set_deployment_id(called_deployment_id)
assert slas == expected_slas
@pytest.mark.usefixtures("project_dir")
async def test_multiple_sla_flags(self, docker_work_pool):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
sla_spec_1 = {
"name": "test-sla-1",
"duration": 1800,
"severity": "high",
}
sla_spec_2 = {
"name": "test-sla-2",
"duration": 3600,
"severity": "critical",
}
with open("sla.yaml", "w") as f:
yaml.safe_dump({"sla": [sla_spec_2]}, f)
expected_slas = [
TimeToCompletionSla(
name="test-sla-1",
duration=1800,
severity="high",
),
TimeToCompletionSla(
name="test-sla-2",
duration=3600,
severity="critical",
),
]
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 --sla"
f" '{json.dumps(sla_spec_1)}' --sla sla.yaml -p"
f" {docker_work_pool.name}"
),
expected_code=0,
)
assert create_slas.call_count == 1
client, called_deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
for sla in expected_slas:
sla.set_deployment_id(called_deployment_id)
assert slas == expected_slas
@pytest.mark.usefixtures("project_dir")
async def test_override_on_sla_conflict(self, docker_work_pool):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
cli_sla_spec = {
"name": "cli-sla",
"duration": 1800,
"severity": "high",
}
file_sla_spec = {
"name": "file-sla",
"duration": 1800,
"severity": "high",
}
expected_slas = [
TimeToCompletionSla(
name="cli-sla",
duration=1800,
severity="high",
)
]
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"name": "test-name-1",
"work_pool": {
"name": docker_work_pool.name,
},
"slas": [
file_sla_spec,
],
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(contents, f)
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
) as create_slas:
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" --sla '{json.dumps(cli_sla_spec)}'"
),
expected_code=0,
)
client, called_deployment_id, slas = create_slas.call_args[0]
assert isinstance(client, PrefectClient)
assert len(slas) == 1
for sla in expected_slas:
sla.set_deployment_id(called_deployment_id)
assert slas == expected_slas
@pytest.mark.usefixtures("project_dir")
async def test_invalid_sla_parsing(self, docker_work_pool):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
invalid_json_str_sla = "{foo: bar, baz: bat}"
invalid_yaml_sla = "invalid.yaml"
with open(invalid_yaml_sla, "w") as f:
f.write("pretty please, let me know if the flow runs for too long")
for invalid_sla in [invalid_json_str_sla, invalid_yaml_sla]:
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1"
f" -p {docker_work_pool.name} --sla '{invalid_sla}'"
),
expected_code=1,
expected_output_contains=["Failed to parse SLA"],
)
@pytest.mark.usefixtures("interactive_console")
async def test_slas_saved_to_prefect_yaml(
self,
docker_work_pool,
project_dir,
):
client = mock.AsyncMock()
client.server_type = ServerType.CLOUD
cli_sla_spec = {
"name": "test-sla",
"duration": 1800,
"severity": "high",
}
# ensure file is removed for save to occur
prefect_file = project_dir / "prefect.yaml"
prefect_file.unlink()
with mock.patch(
"prefect.cli.deploy._core._create_slas",
mock.AsyncMock(),
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name-1 -p"
f" {docker_work_pool.name} --sla"
f" '{json.dumps(cli_sla_spec)}'"
f" --prefect-file {prefect_file}"
),
user_input=(
# Decline schedule
"n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline docker build
+ "n"
+ readchar.key.ENTER
# Accept save configuration
+ "y"
+ readchar.key.ENTER
),
expected_code=0,
)
# Read the updated prefect.yaml
with prefect_file.open(mode="r") as f:
contents = yaml.safe_load(f)
assert "deployments" in contents
assert "sla" in contents["deployments"][-1]
assert contents["deployments"][-1]["sla"] == [cli_sla_spec]
| TestDeploymentCLI |
python | getsentry__sentry | src/sentry/search/events/builder/spans_metrics.py | {
"start": 581,
"end": 2793
} | class ____(MetricsQueryBuilder):
requires_organization_condition = True
spans_metrics_builder = True
has_transaction = False
config_class = SpansMetricsDatasetConfig
size_fields = SIZE_FIELDS
column_remapping = {
# We want to remap `message` to `span.description` for the free
# text search use case so that it searches the `span.description`
# when the user performs a free text search
"message": "span.description",
# This is to assist in the eap migration,
# span.description in span metrics is sentry.normalized_description in eap
"sentry.normalized_description": "span.description",
}
@property
def use_default_tags(self) -> bool:
return False
def get_field_type(self, field: str) -> str | None:
if field in self.meta_resolver_map:
return self.meta_resolver_map[field]
if field in ["span.duration", "span.self_time"]:
return "duration"
if unit := self.size_fields.get(field):
return unit
mri = constants.SPAN_METRICS_MAP.get(field)
if mri is not None:
parsed_mri = parse_mri(mri)
if parsed_mri is not None and parsed_mri.unit in constants.RESULT_TYPES:
return parsed_mri.unit
return None
def resolve_select(
self, selected_columns: list[str] | None, equations: list[str] | None
) -> list[SelectType]:
if selected_columns and "transaction" in selected_columns:
self.has_transaction = True
return super().resolve_select(selected_columns, equations)
def resolve_metric_index(self, value: str) -> int | None:
"""Layer on top of the metric indexer so we'll only hit it at most once per value"""
# This check is a bit brittle, and depends on resolve_conditions happening before resolve_select
if value == "transaction":
self.has_transaction = True
if not self.has_transaction and value == constants.SPAN_METRICS_MAP["span.self_time"]:
return super().resolve_metric_index(constants.SELF_TIME_LIGHT)
return super().resolve_metric_index(value)
| SpansMetricsQueryBuilder |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 118736,
"end": 120421
} | class ____:
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_sign_complex(self):
a = np.array([
np.inf, -np.inf, complex(0, np.inf), complex(0, -np.inf),
complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan
np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan
0.0, # 0.
3.0, -3.0, -2j, 3.0 + 4.0j, -8.0 + 6.0j
])
out = np.zeros(a.shape, a.dtype)
tgt = np.array([
1., -1., 1j, -1j,
] + [complex(np.nan, np.nan)] * 5 + [
0.0,
1.0, -1.0, -1j, 0.6 + 0.8j, -0.8 + 0.6j])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_(res is out)
assert_equal(res, tgt)
def test_sign_dtype_object(self):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
def test_sign_dtype_nan_object(self):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
# FIXME: a not used
a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
| TestSign |
python | google__pytype | pytype/convert.py | {
"start": 1143,
"end": 42775
} | class ____(utils.ContextWeakrefMixin):
"""Functions for creating the classes in abstract.py."""
unsolvable: abstract.Unsolvable
# Define this error inside Converter so that it is exposed to abstract.py
class TypeParameterError(Exception):
def __init__(self, type_param_name):
super().__init__(type_param_name)
self.type_param_name = type_param_name
def __init__(self, ctx):
super().__init__(ctx)
ctx.convert = self # to make constant_to_value calls below work
self._convert_cache: dict[Any, Any] = {}
# Initialize primitive_classes to empty to allow constant_to_value to run.
self.primitive_classes = ()
# object_type is needed to initialize the primitive class values.
self.object_type = self.constant_to_value(object)
self.unsolvable = abstract.Unsolvable(self.ctx)
self.type_type = self.constant_to_value(type)
self.ctx.converter_minimally_initialized = True
self.empty = abstract.Empty(self.ctx)
self.never = typing_overlay.Never(self.ctx)
# Now fill primitive_classes with the real values using constant_to_value.
primitive_classes = [
int,
float,
str,
bytes,
object,
NoneType,
complex,
bool,
slice,
types.CodeType,
EllipsisType,
super,
]
self.primitive_classes = {
v: self.constant_to_value(v) for v in primitive_classes
}
self.primitive_classes_by_name = {
".".join(self._type_to_name(x)): x for x in self.primitive_classes
}
self.none = self.build_concrete_value(None, NoneType)
self.true = self.build_concrete_value(True, bool)
self.false = self.build_concrete_value(False, bool)
self.ellipsis = self.build_concrete_value(Ellipsis, EllipsisType)
self.primitive_instances = {}
for name, cls in self.primitive_classes.items():
if name == NoneType:
# This is possible because all None instances are the same.
# Without it pytype could not reason that "x is None" is always true, if
# x is indeed None.
instance = self.none
elif name == EllipsisType:
instance = self.ellipsis
else:
instance = abstract.Instance(cls, self.ctx)
self.primitive_instances[name] = instance
self._convert_cache[(abstract.Instance, cls.pytd_cls)] = instance
self.none_type = self.primitive_classes[NoneType]
self.super_type = self.primitive_classes[super]
self.str_type = self.primitive_classes[str]
self.int_type = self.primitive_classes[int]
self.bool_type = self.primitive_classes[bool]
self.bytes_type = self.primitive_classes[bytes]
self.list_type = self.constant_to_value(list)
self.set_type = self.constant_to_value(set)
self.frozenset_type = self.constant_to_value(frozenset)
self.dict_type = self.constant_to_value(dict)
self.module_type = self.constant_to_value(types.ModuleType)
self.function_type = self.constant_to_value(types.FunctionType)
self.tuple_type = self.constant_to_value(tuple)
self.generator_type = self.constant_to_value(types.GeneratorType)
self.iterator_type = self.constant_to_value(IteratorType)
self.coroutine_type = self.constant_to_value(CoroutineType)
self.awaitable_type = self.constant_to_value(AwaitableType)
self.async_generator_type = self.constant_to_value(AsyncGeneratorType)
self.bool_values = {
True: self.true,
False: self.false,
None: self.primitive_instances[bool],
}
def constant_name(self, constant_type):
if constant_type is None:
return "constant"
elif isinstance(constant_type, tuple):
return f"({', '.join(self.constant_name(c) for c in constant_type)})"
else:
return constant_type.__name__
def _type_to_name(self, t):
"""Convert a type to its name."""
assert t.__class__ is type
if t is types.FunctionType:
return "typing", "Callable"
elif t is IteratorType:
return "builtins", "object"
elif t is CoroutineType:
return "typing", "Coroutine"
elif t is AwaitableType:
return "typing", "Awaitable"
elif t is AsyncGeneratorType:
return "typing", "AsyncGenerator"
elif t is types.GeneratorType:
return "typing", "Generator"
else:
return "builtins", t.__name__
def value_to_constant(self, val, constant_type):
if val.is_concrete and isinstance(val.pyval, constant_type or object):
return val.pyval
name = self.constant_name(constant_type)
raise abstract_utils.ConversionError(f"{val} is not of type {name}")
def lookup_value(self, module, name, subst=None):
pytd_cls = self.ctx.loader.lookup_pytd(module, name)
subst = subst or datatypes.AliasingDict()
return self.constant_to_value(pytd_cls, subst)
def tuple_to_value(self, content):
"""Create a VM tuple from the given sequence."""
content = tuple(content) # content might be a generator
value = abstract.Tuple(content, self.ctx)
return value
def build_none(self, node):
return self.none.to_variable(node)
def build_bool(self, node, value=None):
if value in self.bool_values:
return self.bool_values[value].to_variable(node)
else:
raise ValueError(f"Invalid bool value: {value!r}")
def build_concrete_value(self, value, typ):
typ = self.primitive_classes[typ]
return abstract.ConcreteValue(value, typ, self.ctx) # pytype: disable=wrong-arg-types
def build_int(self, node):
return self.primitive_instances[int].to_variable(node)
def build_string(self, node, s):
del node
return self.constant_to_var(s)
def build_nonatomic_string(self, node):
return self.primitive_instances[str].to_variable(node)
def build_content(self, elements, discard_concrete_values=True):
if len(elements) == 1 and (
not discard_concrete_values
or not any(v.is_concrete for v in elements[0].data)
):
return next(iter(elements))
var = self.ctx.program.NewVariable()
for v in elements:
for b in v.bindings:
if discard_concrete_values and b.data.is_concrete:
var.PasteBindingWithNewData(
b, self.get_maybe_abstract_instance(b.data)
)
else:
var.PasteBinding(b)
return var
def build_slice(self, node, start, stop, step=None):
typs = (int, type(None))
get_const = lambda x: abstract_utils.get_atomic_python_constant(x, typs)
try:
start = start and get_const(start)
stop = stop and get_const(stop)
step = step and get_const(step)
except abstract_utils.ConversionError:
return self.primitive_instances[slice].to_variable(node)
val = slice(start, stop, step)
return self.build_concrete_value(val, slice).to_variable(node)
def build_list(self, node, content):
"""Create a VM list from the given sequence."""
content = [var.AssignToNewVariable(node) for var in content]
return abstract.List(content, self.ctx).to_variable(node)
def build_collection_of_type(self, node, typ, var):
"""Create a collection Typ[T] with T derived from the given variable."""
ret = abstract.Instance(typ, self.ctx)
ret.merge_instance_type_parameter(node, abstract_utils.T, var)
return ret.to_variable(node)
def build_list_of_type(self, node, var):
"""Create a VM list with element type derived from the given variable."""
return self.build_collection_of_type(node, self.list_type, var)
def build_set(self, node, content):
"""Create a VM set from the given sequence."""
content = list(content) # content might be a generator
value = abstract.Instance(self.set_type, self.ctx)
value.merge_instance_type_parameter(
node, abstract_utils.T, self.build_content(content)
)
return value.to_variable(node)
def build_map(self, node):
"""Create an empty VM dict."""
return abstract.Dict(self.ctx).to_variable(node)
def build_tuple(self, node, content):
"""Create a VM tuple from the given sequence."""
return self.tuple_to_value(content).to_variable(node)
def make_typed_dict_builder(self):
"""Make a typed dict builder."""
return typed_dict.TypedDictBuilder(self.ctx)
def make_typed_dict(self, name, pytd_cls):
"""Make a typed dict from a pytd class."""
builder = typed_dict.TypedDictBuilder(self.ctx)
return builder.make_class_from_pyi(name, pytd_cls)
def make_namedtuple_builder(self):
"""Make a namedtuple builder."""
return named_tuple.NamedTupleClassBuilder(self.ctx)
def make_namedtuple(self, name, pytd_cls):
"""Make a namedtuple class from a pytd class."""
builder = named_tuple.NamedTupleClassBuilder(self.ctx)
return builder.make_class_from_pyi(name, pytd_cls)
def apply_dataclass_transform(self, cls_var, node):
cls = abstract_utils.get_atomic_value(cls_var)
# We need to propagate the metadata key since anything in the entire tree of
# subclasses is a dataclass, even without a decorator.
cls.metadata["__dataclass_transform__"] = True
args = function.Args(posargs=(cls_var,))
return dataclass_overlay.Dataclass.make(self.ctx).call(node, None, args)
def get_maybe_abstract_instance(self, data):
"""Get an instance of the same type as the given data, abstract if possible.
Get an abstract instance of primitive data stored as a
ConcreteValue. Return any other data as-is. This is used by
constant_to_var to discard concrete values that have been kept
around for InterpreterFunction.
This method intentionally does not descend into containers, as doing so
causes new timeouts. If you need to discard concrete values inside
containers, use abstract_utils.abstractify_variable instead.
Arguments:
data: The data.
Returns:
An instance of the same type as the data, abstract if possible.
"""
if data.is_concrete:
data_type = type(data.pyval)
if data_type in self.primitive_instances:
return self.primitive_instances[data_type]
return data
def _create_new_unknown_value(self, action) -> abstract.Unknown:
if not action or not self.ctx.vm.frame:
return abstract.Unknown(self.ctx)
# We allow only one Unknown at each point in the program, regardless of
# what the call stack is.
key = ("unknown", self.ctx.vm.frame.current_opcode, action)
if key not in self._convert_cache:
self._convert_cache[key] = abstract.Unknown(self.ctx)
return self._convert_cache[key]
def create_new_unknown(self, node, source=None, action=None, force=False):
"""Create a new variable containing unknown."""
if not force and not self.ctx.generate_unknowns:
# unsolvable instances are cheaper than unknown, so use those for --quick.
return self.unsolvable.to_variable(node)
unknown = self._create_new_unknown_value(action)
v = self.ctx.program.NewVariable()
val = v.AddBinding(
unknown, source_set=[source] if source else [], where=node
)
unknown.owner = val
self.ctx.vm.trace_unknown(unknown.class_name, val)
return v
def get_element_type(self, arg_type):
"""Extract the element type of a vararg or kwarg."""
if not isinstance(arg_type, abstract.ParameterizedClass):
assert isinstance(arg_type, abstract.Class) and arg_type.full_name in (
"builtins.dict",
"builtins.tuple",
)
return None
elif arg_type.base_cls is self.dict_type:
return arg_type.get_formal_type_parameter(abstract_utils.V)
else:
assert arg_type.base_cls is self.tuple_type
return arg_type.get_formal_type_parameter(abstract_utils.T)
def _copy_type_parameters(
self,
old_container: abstract.Class,
new_container_module: str,
new_container_name: str,
) -> abstract.BaseValue:
new_container = self.lookup_value(new_container_module, new_container_name)
if isinstance(old_container, abstract.ParameterizedClass):
return abstract.ParameterizedClass(
new_container, old_container.formal_type_parameters, self.ctx
)
else:
return new_container
def widen_type(self, container):
"""Widen a tuple to an iterable, or a dict to a mapping."""
if container.full_name == "builtins.tuple":
return self._copy_type_parameters(container, "typing", "Iterable")
else:
assert container.full_name == "builtins.dict", container.full_name
return self._copy_type_parameters(container, "typing", "Mapping")
def merge_values(self, values):
"""Merge a collection of values into a single one."""
if not values:
return self.empty
elif len(values) == 1:
return next(iter(values))
else:
return abstract.Union(values, self.ctx)
def merge_classes(self, instances):
"""Merge the classes of the given instances.
Args:
instances: An iterable of instances.
Returns:
An abstract.BaseValue created by merging the instances' classes.
"""
classes = {v.cls for v in instances if v.cls != self.empty}
# Sort the classes so that the same instances always generate the same
# merged class type.
return self.merge_values(sorted(classes, key=lambda cls: cls.full_name))
def convert_pytd_function(self, pytd_func, factory=abstract.PyTDFunction):
sigs = [
abstract.PyTDSignature(pytd_func.name, sig, self.ctx)
for sig in pytd_func.signatures
]
return factory(
pytd_func.name, sigs, pytd_func.kind, pytd_func.decorators, self.ctx
)
def constant_to_var(
self,
pyval,
subst=None,
node=None,
source_sets=None,
discard_concrete_values=False,
):
"""Convert a constant to a Variable.
This converts a constant to a cfg.Variable. Unlike constant_to_value, it
can handle things that need to be represented as a Variable with multiple
possible values (i.e., a union type), like pytd.Function.
Args:
pyval: The Python constant to convert. Can be a PyTD definition or a
builtin constant.
subst: The current type parameters.
node: The current CFG node. (For instances)
source_sets: An iterator over instances of SourceSet (or just tuples).
discard_concrete_values: Whether concrete values should be discarded from
type parameters.
Returns:
A cfg.Variable.
Raises:
TypeParameterError: if conversion is attempted on a type parameter without
a substitution.
ValueError: if pytype is not of a known type.
"""
source_sets = source_sets or [[]]
node = node or self.ctx.root_node
# These args never change in recursive calls
kwargs = {
"subst": subst,
"node": node,
"source_sets": source_sets,
"discard_concrete_values": discard_concrete_values,
}
def constant_to_value(new_pyval):
# Call constant_to_value with the given subst and node
return self.constant_to_value(new_pyval, subst, node)
if isinstance(pyval, pytd.NothingType):
return self.ctx.program.NewVariable([], [], self.ctx.root_node)
elif isinstance(pyval, pytd.Alias):
return self.constant_to_var(pyval.type, **kwargs)
elif isinstance(pyval, abstract_utils.AsInstance):
cls = pyval.cls
if isinstance(pyval, abstract_utils.AsReturnValue) and isinstance(
cls, pytd.NothingType
):
return self.never.to_variable(node)
else:
return self.pytd_cls_to_instance_var(cls, **kwargs)
elif isinstance(pyval, pytd.Constant):
return self.pytd_cls_to_instance_var(pyval.type, **kwargs)
result = constant_to_value(pyval)
if result is not None:
return result.to_variable(node)
# There might still be bugs on the abstract interpreter when it returns,
# e.g. a list of values instead of a list of types:
assert pyval.__class__ != cfg.Variable, pyval
if pyval.__class__ == tuple:
# This case needs to go at the end because many things are actually also
# tuples.
content = (self.constant_to_var(v, **kwargs) for v in pyval)
return self.build_tuple(self.ctx.root_node, content)
raise ValueError(f"Cannot convert {pyval.__class__} to an abstract value")
def pytd_cls_to_instance_var(
self,
cls,
subst=None,
node=None,
source_sets=None,
discard_concrete_values=False,
):
"""Convert a constant instance to a Variable.
This converts a constant to a cfg.Variable. Unlike constant_to_value, it
can handle things that need to be represented as a Variable with multiple
possible values (i.e., a union type), like pytd.Function.
Args:
cls: The pytd class to convert.
subst: The current type parameters.
node: The current CFG node. (For instances)
source_sets: An iterator over instances of SourceSet (or just tuples).
discard_concrete_values: Whether concrete values should be discarded from
type parameters.
Returns:
A cfg.Variable.
Raises:
TypeParameterError: if conversion is attempted on a type parameter without
a substitution.
ValueError: if pytype is not of a known type.
"""
source_sets = source_sets or [[]]
node = node or self.ctx.root_node
# These args never change in recursive calls
kwargs = {
"subst": subst,
"node": node,
"source_sets": source_sets,
"discard_concrete_values": discard_concrete_values,
}
def constant_to_instance_value(new_type):
# Call constant_to_value with the given subst and node
return self.constant_to_value(
abstract_utils.AsInstance(new_type), subst, node
)
if isinstance(cls, pytd.AnythingType):
return self.unsolvable.to_variable(node)
elif isinstance(cls, pytd.GenericType) and cls.name == "typing.ClassVar":
(param,) = cls.parameters
return self.pytd_cls_to_instance_var(param, **kwargs)
var = self.ctx.program.NewVariable()
for t in pytd_utils.UnpackUnion(cls):
if isinstance(t, pytd.TypeParameter):
if not subst or t.full_name not in subst:
raise self.TypeParameterError(t.full_name)
else:
for v in subst[t.full_name].bindings:
for source_set in source_sets:
if discard_concrete_values:
value = self.get_maybe_abstract_instance(v.data)
else:
value = v.data
var.AddBinding(value, source_set + [v], node)
elif isinstance(t, pytd.NothingType):
pass
else:
if isinstance(t, pytd.Annotated):
typ = constant_to_instance_value(t.base_type)
value = self._apply_metadata_annotations(typ, t.annotations)
else:
value = constant_to_instance_value(t)
for source_set in source_sets:
var.AddBinding(value, source_set, node)
return var
def constant_to_value(self, pyval, subst=None, node=None):
"""Like constant_to_var, but convert to an abstract.BaseValue.
This also memoizes the results. We don't memoize on name, as builtin types
like str or list might be reinitialized under different names (e.g. "param
1"), but we want the canonical name and type. We *do* memoize on the type
as well, to make sure that e.g. "1.0" and "1" get converted to different
constants. Memoization is an optimization, but an important one - mapping
constants like "None" to the same AbstractValue greatly simplifies the
cfg structures we're building.
Args:
pyval: The constant to convert.
subst: The current type parameters.
node: The current CFG node. (For instances)
Returns:
The converted constant. (Instance of BaseValue)
"""
node = node or self.ctx.root_node
if pyval.__class__ is tuple:
type_key = tuple(type(v) for v in pyval)
else:
type_key = type(pyval)
key = ("constant", pyval, type_key)
if key in self._convert_cache:
if self._convert_cache[key] is None:
self._convert_cache[key] = self.unsolvable
# This error is triggered by, e.g., classes inheriting from each other.
if not self.ctx.recursion_allowed:
name = getattr(pyval, "name", None) or pyval.__class__.__name__
self.ctx.errorlog.recursion_error(self.ctx.vm.frames, name)
return self._convert_cache[key]
else:
self._convert_cache[key] = None # for recursion detection
need_node = [False] # mutable value that can be modified by get_node
def get_node():
need_node[0] = True
return node
recursive = isinstance(pyval, pytd.LateType) and pyval.recursive
if recursive:
context = self.ctx.allow_recursive_convert()
else:
context = contextlib.nullcontext()
with context:
try:
value = self._constant_to_value(pyval, subst, get_node)
except NotImplementedError:
# We delete the cache entry so that future calls still raise
# NotImplementedError, rather than reporting a recursion error.
del self._convert_cache[key]
raise
if not need_node[0] or node is self.ctx.root_node:
# Values that contain a non-root node cannot be cached. Otherwise,
# we'd introduce bugs such as the following:
# if <condition>:
# d = {"a": 1} # "a" is cached here
# else:
# # the cached value of "a", which contains a node that is only
# # visible inside the "if", is used, which will eventually lead
# # pytype to think that the V->complex binding isn't visible.
# d = {"a": 1j}
if recursive:
annot = abstract.LateAnnotation(
pyval.name, self.ctx.vm.frames, self.ctx
)
annot.set_type(value)
value = annot
self._convert_cache[key] = value
return value
def _load_late_type(self, late_type):
"""Resolve a late type, possibly by loading a module."""
return self.ctx.loader.load_late_type(late_type)
def _create_module(self, ast):
if not ast:
raise abstract_utils.ModuleLoadError()
data = (
ast.constants
+ ast.type_params
+ ast.classes
+ ast.functions
+ ast.aliases
)
members = {}
for val in data:
name = val.name.removeprefix(f"{ast.name}.")
members[name] = val
return abstract.Module(self.ctx, ast.name, members, ast)
def _get_literal_value(self, pyval, subst):
"""Extract and convert the value of a pytd.Literal."""
if isinstance(pyval, pytd.Constant):
# Literal enums are stored as Constants with the name set to the member
# name and the type set to a ClassType pointing to the enum cls.
# However, the type may be a LateType due to pickling.
if isinstance(pyval.type, pytd.LateType):
typ = self._load_late_type(pyval.type)
else:
typ = pyval.type.cls
cls = self.constant_to_value(typ)
_, name = pyval.name.rsplit(".", 1)
# Bad values should have been caught by visitors.VerifyEnumValues.
assert cls.is_enum, f"Non-enum type used in Literal: {cls.official_name}"
assert name in cls, (
"Literal enum refers to non-existent member "
f"'{pyval.name}' of {cls.official_name}"
)
# The cls has already been converted, so don't try to convert the member.
return abstract_utils.get_atomic_value(cls.members[name])
if pyval == self.ctx.loader.lookup_pytd("builtins", "True"):
value = True
elif pyval == self.ctx.loader.lookup_pytd("builtins", "False"):
value = False
elif isinstance(pyval, str):
value = evaluator.eval_string_literal(pyval)
else:
value = pyval
return self.constant_to_value(value, subst)
def _special_constant_to_value(self, name):
"""Special-case construction of some pytd values."""
if name == "builtins.super":
return self.super_type
elif name == "builtins.object":
return self.object_type
elif name == "types.ModuleType":
return self.module_type
elif name == "_importlib_modulespec.ModuleType":
# Python 3's typeshed uses a stub file indirection to define ModuleType
# even though it is exported via types.pyi.
return self.module_type
elif name == "types.FunctionType":
return self.function_type
elif name in ("types.NoneType", "_typeshed.NoneType"):
# Since types.NoneType is new in 3.10, _typeshed defines its own
# equivalent for 3.9 and below:
# https://github.com/python/typeshed/blob/3ab3711f427231fe31e856e238bcbc58172ef983/stdlib/_typeshed/__init__.pyi#L240-L247
return self.none_type
elif name == "types.CodeType":
return self.primitive_classes[types.CodeType]
else:
return None
def _apply_metadata_annotations(self, typ, annotations):
if annotations[0] == "'pytype_metadata'":
try:
md = metadata.from_string(annotations[1])
if md["tag"] == "attr.ib":
ret = attr_overlay.AttribInstance.from_metadata(
self.ctx, self.ctx.root_node, typ, md
)
return ret
elif md["tag"] == "attr.s":
ret = attr_overlay.Attrs.from_metadata(self.ctx, md)
return ret
except (IndexError, ValueError, TypeError, KeyError):
details = "Wrong format for pytype_metadata."
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.frames, annotations[1], details
)
return typ
else:
return typ
def _maybe_load_from_overlay(self, module, member_name):
# The typing module cannot be loaded until setup is complete and the first
# VM frame is pushed.
if (
module == "typing"
and not self.ctx.vm.frame
or module not in overlay_dict.overlays
):
return None
overlay = self.ctx.vm.import_module(module, module, 0, bypass_strict=True)
if overlay.get_module(member_name) is not overlay:
return None
return overlay.maybe_load_member(member_name)
def _frozenset_literal_to_value(self, pyval: frozenset[Any]):
"""Convert a literal frozenset to an abstract value."""
instance = abstract.Instance(self.frozenset_type, self.ctx)
for element in pyval:
instance.merge_instance_type_parameter(
self.ctx.root_node,
abstract_utils.T,
self.constant_to_var(element, {}),
)
return instance
def _tuple_literal_to_value(self, pyval: tuple[Any, ...]):
"""Convert a literal tuple to an abstract value."""
return self.tuple_to_value(
[self.constant_to_var(item, {}) for item in pyval]
)
def _pytd_class_to_value(self, pyval: pytd.Class, node):
"""Convert a pytd.Class to an abstract value."""
if val := self._special_constant_to_value(pyval.name):
return val
module, dot, base_name = pyval.name.rpartition(".")
if overlay_member := self._maybe_load_from_overlay(module, base_name):
return overlay_member
try:
cls = abstract.PyTDClass.make(base_name, pyval, self.ctx)
except mro.MROError as e:
self.ctx.errorlog.mro_error(self.ctx.vm.frames, base_name, e.mro_seqs)
cls = self.unsolvable
else:
if dot:
cls.module = module
cls.call_metaclass_init(node)
return cls
def _pytd_class_to_instance_value(self, cls: pytd.Class, subst):
"""Convert a pytd.Class to an abstract instance value."""
# We should not call this method for generic classes
assert not cls.template
# This key is also used in __init__
key = (abstract.Instance, cls)
if key not in self._convert_cache:
if cls.name in ["builtins.type", "builtins.property"]:
# An instance of "type" or of an anonymous property can be anything.
instance = self._create_new_unknown_value("type")
else:
mycls = self.constant_to_value(cls, subst)
if isinstance(mycls, typed_dict.TypedDictClass):
instance = mycls.instantiate_value(self.ctx.root_node, None)
elif (
isinstance(mycls, abstract.PyTDClass)
and mycls.pytd_cls.name in self.primitive_classes_by_name
):
instance = self.primitive_instances[
self.primitive_classes_by_name[mycls.pytd_cls.name]
]
else:
instance = abstract.Instance(mycls, self.ctx)
log.info("New pytd instance for %s: %r", cls.name, instance)
self._convert_cache[key] = instance
return self._convert_cache[key]
def _pytd_generic_type_to_value(
self, pyval: pytd.GenericType, subst, get_node
):
"""Convert a pytd.GenericType to an abstract value."""
if isinstance(pyval.base_type, pytd.LateType):
actual = self._load_late_type(pyval.base_type)
if not isinstance(actual, pytd.ClassType):
return self.unsolvable
base = actual.cls
else:
assert isinstance(pyval.base_type, pytd.ClassType), pyval
base = pyval.base_type.cls
assert isinstance(base, pytd.Class), base
base_cls = self.constant_to_value(base, subst)
if not isinstance(base_cls, abstract.Class):
# base_cls can be, e.g., an unsolvable due to an mro error.
return self.unsolvable
if isinstance(pyval, pytd.TupleType):
abstract_class = abstract.TupleClass
template = list(range(len(pyval.parameters))) + [abstract_utils.T]
combined_parameter = pytd_utils.JoinTypes(pyval.parameters)
parameters = pyval.parameters + (combined_parameter,)
elif isinstance(pyval, pytd.CallableType):
abstract_class = abstract.CallableClass
template = list(range(len(pyval.args))) + [
abstract_utils.ARGS,
abstract_utils.RET,
]
parameters = pyval.args + (pytd_utils.JoinTypes(pyval.args), pyval.ret)
else:
if (
self.ctx.options.use_fiddle_overlay
and fiddle_overlay.is_fiddle_buildable_pytd(pyval)
):
# fiddle.Config[Foo] should call the constructor from the overlay, not
# create a generic PyTDClass.
node = get_node()
(param,) = pyval.parameters
underlying = self.constant_to_value(param, subst, node)
subclass_name = fiddle_overlay.get_fiddle_buildable_subclass(pyval)
try:
return fiddle_overlay.BuildableType.make(
subclass_name, underlying, self.ctx
)
except KeyError:
# We are in the middle of constructing the fiddle ast so
# fiddle.Config does not exist yet. Continue constructing a generic
# class.
pass
abstract_class = abstract.ParameterizedClass
if pyval.name == "typing.Generic":
pyval_template = pyval.parameters
else:
pyval_template = base.template
template = tuple(t.name for t in pyval_template)
parameters = pyval.parameters
assert pyval.name in ("typing.Generic", "typing.Protocol") or len(
parameters
) <= len(template)
# Delay type parameter loading to handle recursive types.
# See the ParameterizedClass.formal_type_parameters() property.
type_parameters = abstract_utils.LazyFormalTypeParameters(
template, parameters, subst
)
return abstract_class(base_cls, type_parameters, self.ctx)
def _pytd_generic_type_to_instance_value(
self, cls: pytd.GenericType, subst, get_node
):
"""Convert a pytd.GenericType to an abstract instance value."""
if isinstance(cls.base_type, pytd.LateType):
actual = self._load_late_type(cls.base_type)
if not isinstance(actual, pytd.ClassType):
return self.unsolvable
base_cls = actual.cls
else:
base_type = cls.base_type
assert isinstance(base_type, pytd.ClassType)
base_cls = base_type.cls
assert isinstance(base_cls, pytd.Class), base_cls
if base_cls.name == "builtins.type":
(c,) = cls.parameters
if isinstance(c, pytd.TypeParameter):
if not subst or c.full_name not in subst:
raise self.TypeParameterError(c.full_name)
# deformalize gets rid of any unexpected TypeVars, which can appear
# if something is annotated as Type[T].
return self.ctx.annotation_utils.deformalize(
self.merge_classes(subst[c.full_name].data)
)
else:
return self.constant_to_value(c, subst)
elif isinstance(cls, pytd.TupleType):
node = get_node()
content = tuple(
self.pytd_cls_to_instance_var(p, subst, node) for p in cls.parameters
)
return self.tuple_to_value(content)
elif isinstance(cls, pytd.CallableType):
clsval = self.constant_to_value(cls, subst)
return abstract.Instance(clsval, self.ctx)
elif (
self.ctx.options.use_fiddle_overlay
and fiddle_overlay.is_fiddle_buildable_pytd(base_cls)
):
# fiddle.Config[Foo] should call the constructor from the overlay, not
# create a generic PyTDClass.
node = get_node()
underlying = self.constant_to_value(cls.parameters[0], subst, node)
subclass_name = fiddle_overlay.get_fiddle_buildable_subclass(base_cls)
_, ret = fiddle_overlay.make_instance(
subclass_name, underlying, node, self.ctx
)
return ret
else:
clsval = self.constant_to_value(base_cls, subst)
instance = abstract.Instance(clsval, self.ctx)
num_params = len(cls.parameters)
assert num_params <= len(base_cls.template)
for i, formal in enumerate(base_cls.template):
if i < num_params:
node = get_node()
p = self.pytd_cls_to_instance_var(cls.parameters[i], subst, node)
else:
# An omitted type parameter implies `Any`.
node = self.ctx.root_node
p = self.unsolvable.to_variable(node)
instance.merge_instance_type_parameter(node, formal.name, p)
return instance
def _constant_to_value(self, pyval, subst, get_node):
"""Create a BaseValue that represents a python constant.
This supports both constant from code constant pools and PyTD constants such
as classes. This also supports builtin python objects such as int and float.
Args:
pyval: The python or PyTD value to convert.
subst: The current type parameters.
get_node: A getter function for the current node.
Returns:
A Value that represents the constant, or None if we couldn't convert.
Raises:
NotImplementedError: If we don't know how to convert a value.
TypeParameterError: If we can't find a substitution for a type parameter.
"""
if isinstance(pyval, str):
return self.build_concrete_value(pyval, str)
elif isinstance(pyval, bytes):
return self.build_concrete_value(pyval, bytes)
elif isinstance(pyval, bool):
return self.true if pyval else self.false
elif isinstance(pyval, int) and -1 <= pyval <= _MAX_IMPORT_DEPTH:
# For small integers, preserve the actual value (for things like the
# level in IMPORT_NAME).
return self.build_concrete_value(pyval, int)
elif pyval.__class__ in self.primitive_classes:
return self.primitive_instances[pyval.__class__]
elif pyval.__class__ is frozenset:
return self._frozenset_literal_to_value(pyval)
elif isinstance(pyval, (pycnite.types.CodeTypeBase, blocks.OrderedCode)):
# TODO(mdemello): We should never be dealing with a raw pycnite CodeType
# at this point.
return abstract.ConcreteValue(
pyval, self.primitive_classes[types.CodeType], self.ctx
)
elif pyval is super:
return special_builtins.Super.make(self.ctx)
elif pyval is object:
return special_builtins.Object.make(self.ctx)
elif pyval.__class__ is type:
try:
return self.lookup_value(*self._type_to_name(pyval), subst)
except (KeyError, AttributeError):
log.debug("Failed to find pytd", exc_info=True)
raise
elif isinstance(pyval, abstract_utils.AsInstance):
cls = pyval.cls
if isinstance(cls, pytd.LateType):
actual = self._load_late_type(cls)
if not isinstance(actual, pytd.ClassType):
return self.unsolvable
cls = actual.cls
if isinstance(cls, pytd.ClassType):
cls = cls.cls
if isinstance(cls, pytd.GenericType) and cls.name == "typing.ClassVar":
(param,) = cls.parameters
return self.constant_to_value(abstract_utils.AsInstance(param), subst)
elif isinstance(cls, pytd.GenericType) or (
isinstance(cls, pytd.Class) and cls.template
):
# If we're converting a generic Class, need to create a new instance of
# it. See test_classes.testGenericReinstantiated.
if isinstance(cls, pytd.Class):
params = tuple(t.type_param.upper_value for t in cls.template)
cls = pytd.GenericType(
base_type=pytd.ClassType(cls.name, cls), parameters=params
)
return self._pytd_generic_type_to_instance_value(cls, subst, get_node)
elif isinstance(cls, pytd.Class):
return self._pytd_class_to_instance_value(cls, subst)
elif isinstance(cls, pytd.Literal):
return self._get_literal_value(cls.value, subst)
else:
return self.constant_to_value(cls, subst)
elif isinstance(pyval, pytd.Node):
return self._pytd_constant_to_value(pyval, subst, get_node)
elif pyval.__class__ is tuple: # only match raw tuple, not namedtuple/Node
return self._tuple_literal_to_value(pyval)
else:
raise NotImplementedError(
f"Can't convert constant {type(pyval)} {pyval!r}"
)
def _pytd_constant_to_value(self, pyval: pytd.Node, subst, get_node):
"""Convert a pytd type to an abstract value.
Args:
pyval: The PyTD value to convert.
subst: The current type parameters.
get_node: A getter function for the current node.
Returns:
A Value that represents the constant, or None if we couldn't convert.
Raises:
NotImplementedError: If we don't know how to convert a value.
TypeParameterError: If we can't find a substitution for a type parameter.
"""
if isinstance(pyval, pytd.LateType):
actual = self._load_late_type(pyval)
return self._constant_to_value(actual, subst, get_node)
elif isinstance(pyval, pytd.TypeDeclUnit):
return self._create_module(pyval)
elif isinstance(pyval, pytd.Module):
mod = self.ctx.loader.import_name(pyval.module_name)
return self._create_module(mod)
elif isinstance(pyval, pytd.Class):
return self._pytd_class_to_value(pyval, get_node())
elif isinstance(pyval, pytd.Function):
f = self.convert_pytd_function(pyval)
f.is_abstract = pyval.is_abstract
return f
elif isinstance(pyval, pytd.ClassType):
if pyval.cls:
cls = pyval.cls
else:
# lookup_pytd raises a KeyError if the name is not found.
cls = self.ctx.loader.lookup_pytd(*pyval.name.split(".", 1))
assert isinstance(cls, pytd.Class)
return self.constant_to_value(cls, subst)
elif isinstance(pyval, pytd.NothingType):
return self.empty
elif isinstance(pyval, pytd.AnythingType):
return self.unsolvable
elif isinstance(pyval, pytd.Constant) and isinstance(
pyval.type, pytd.AnythingType
):
# We allow "X = ... # type: Any" to declare X as a type.
return self.unsolvable
elif (
isinstance(pyval, pytd.Constant)
and isinstance(pyval.type, pytd.GenericType)
and pyval.type.name == "builtins.type"
):
# `X: Type[other_mod.X]` is equivalent to `X = other_mod.X`.
(param,) = pyval.type.parameters
return self.constant_to_value(param, subst)
elif isinstance(pyval, pytd.UnionType):
options = [self.constant_to_value(t, subst) for t in pyval.type_list]
if len(options) > 1:
return abstract.Union(options, self.ctx)
else:
return options[0]
elif isinstance(pyval, (pytd.TypeParameter, pytd.ParamSpec)):
constraints = tuple(
self.constant_to_value(c, {}) for c in pyval.constraints
)
bound = pyval.bound and self.constant_to_value(pyval.bound, {})
if isinstance(pyval, pytd.ParamSpec):
cls = abstract.ParamSpec
else:
cls = abstract.TypeParameter
return cls(
pyval.name,
self.ctx,
constraints=constraints,
bound=bound,
scope=pyval.scope,
)
elif isinstance(pyval, (pytd.ParamSpecArgs, pytd.ParamSpecKwargs)):
# TODO(b/217789659): Support these.
return self.unsolvable
elif isinstance(pyval, pytd.Concatenate):
params = [self.constant_to_value(p, subst) for p in pyval.parameters]
return abstract.Concatenate(params, self.ctx)
elif (
isinstance(pyval, pytd.GenericType) and pyval.name == "typing.ClassVar"
):
(param,) = pyval.parameters
return self.constant_to_value(param, subst)
elif isinstance(pyval, pytd.GenericType):
return self._pytd_generic_type_to_value(pyval, subst, get_node)
elif isinstance(pyval, pytd.Literal):
value = self._get_literal_value(pyval.value, subst)
return abstract.LiteralClass(value, self.ctx)
elif isinstance(pyval, pytd.Annotated):
typ = self.constant_to_value(pyval.base_type, subst)
return self._apply_metadata_annotations(typ, pyval.annotations)
else:
raise NotImplementedError(
f"Can't convert pytd constant {type(pyval)} {pyval!r}"
)
| Converter |
python | google__pytype | pytype/tests/test_pickle2.py | {
"start": 164,
"end": 2096
} | class ____(test_base.BaseTest):
"""Tests for loading and saving pickled files."""
def test_container(self):
pickled = self.Infer(
"""
import collections, json
def f() -> collections.OrderedDict[int, int]:
return collections.OrderedDict({1: 1})
def g() -> json.JSONDecoder:
return json.JSONDecoder()
""",
pickle=True,
module_name="foo",
)
with test_utils.Tempdir() as d:
u = d.create_file("u.pickled", pickled)
ty = self.Infer(
"""
import u
r = u.f()
""",
pythonpath=[""],
imports_map={"u": u},
)
self.assertTypesMatchPytd(
ty,
"""
from typing import OrderedDict
import u
r = ... # type: OrderedDict[int, int]
""",
)
def test_nested_class_name_clash(self):
ty = self.Infer(
"""
class Foo:
pass
class Bar:
class Foo(Foo):
pass
""",
module_name="foo",
pickle=True,
)
ast = pickle_utils.DecodeAst(ty).ast
(base,) = ast.Lookup("foo.Bar").Lookup("foo.Bar.Foo").bases
self.assertEqual(base.name, "foo.Foo")
def test_late_type_indirection(self):
with self.DepTree([
(
"foo.py",
"""
class Foo:
pass
""",
{"pickle": True},
),
(
"bar.py",
"""
import foo
Bar = foo.Foo
""",
{"pickle": True},
),
(
"baz.pyi",
"""
import bar
class Baz:
x: bar.Bar
""",
{"pickle": True},
),
]):
self.Check("""
import baz
assert_type(baz.Baz.x, 'foo.Foo')
""")
self.Check("""
import baz, foo
assert_type(baz.Baz.x, foo.Foo)
""")
if __name__ == "__main__":
test_base.main()
| PickleTest |
python | falconry__falcon | falcon/bench/queues/claims.py | {
"start": 586,
"end": 684
} | class ____:
def on_post(self, req, resp, tenant_id, queue_name):
pass
| CollectionResource |
python | spyder-ide__spyder | spyder/plugins/mainmenu/api.py | {
"start": 3104,
"end": 3262
} | class ____:
StartDebug = 'start_debug_section'
ControlDebug = 'control_debug_section'
EditBreakpoints = 'edit_breakpoints_section'
| DebugMenuSections |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/fail_test_audit/package.py | {
"start": 225,
"end": 936
} | class ____(MakefilePackage):
"""Simple package attempting to re-use stand-alone test method as a build check."""
homepage = "http://github.com/dummy/fail-test-audit"
url = "https://github.com/dummy/fail-test-audit/archive/v1.0.tar.gz"
version("2.0", sha256="c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1")
version("1.0", sha256="abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234")
# Stand-alone test methods cannot be included in build_time_test_callbacks
build_time_test_callbacks = ["test_build_callbacks"]
def test_build_callbacks(self):
"""test build time test callbacks failure"""
print("test_build_callbacks")
| FailTestAudit |
python | sympy__sympy | sympy/physics/control/routh_table.py | {
"start": 135,
"end": 9775
} | class ____(MutableDenseMatrix):
r"""
A class for creating a Routh-Hurwitz table from a given polynomial.
It handles special cases with methods discussed in [1]_.
Note: When at least a row of the table is zero,
the property ``zero_row_case`` is set to True.
Explanation
============
In mathematics, the Routh-Hurwitz table is used to determine the number of
roots of a polynomial that have positive or negative real parts.
It's crucial in the control system theory because it can be used to
retrieve necessary and sufficient conditions for the stability of a linear
time-invariant control system.
Once the table is constructed, the stability of the system can be assessed
by counting the number of sign changes in the first column.
Each sign change corresponds to a root with a positive real part, whereas
each preservation of sign corresponds to a root with a negative real part.
There are two special cases to consider during the construction of the
table:
1. First Column Zero Case:
If a zero appears in the first column of a row (while the row is not
entirely zero), the Extended Routh's Table is constructed [2]_ and every
information of these rows is stored in ``zero_col_infos``.
2. Full Row Zero Case:
If an entire row becomes zero, we can substitute the row with the
coefficients of the derivative of an auxiliary polynomial.
The auxiliary polynomial is constructed using the row immediately
above the zero row [3]_.
For instance, consider the following example:
.. math::
\begin{matrix}3\\2\\1\end{matrix}\begin{bmatrix}b_3&b_1\\
b_2&b_0\\ 0&0\end{bmatrix}
The auxiliary polynomial will be: :math:`a(s) = b_2 s^2 + b_0`
The characteristic is that, if :math:`p(s)` is the polynomial we are
analyzing, :math:`p(s)=a(s)\cdot other(s)` and
the roots of :math:`a(s)` are symmetric about the origin in the
s-plane, so when we
fall in this case, we should note that there could be poles with only
imaginary part or poles with negative and positive real parts.
The table is constructed for a polynomial of the form
:math:`p(s) = b_n s^n + b_{n-1} s^{n-1} + \ldots + b_1 s + b_0`
and the table has :math:`n+1` rows and the following structure:
.. math::
\begin{bmatrix}b_n&b_{n-2}&b_{n-4}&\cdots\\
b_{n-1}&b_{n-3}&b_{n-5}&\cdots\\ c_{1}&c_2&c_3&\cdots\\
d_{1}&d_2&d_3&\cdots\\ \vdots&\vdots&\vdots&\ddots\end{bmatrix}
In this table, the elements in the subsequent rows are computed using the
formulas:
:math:`c_i = \frac{b_{n-1}\cdot b_{n-2i}-b_n\cdot b_{n-(2i+1)}}{b_{n-1}}`
:math:`d_i = \frac{c_1 \cdot b_{n-(2i+1)}-b_{n-1}\cdot c_{i+1}}{c_1}`
Parameters
==========
polynomial : :py:class:`~.Expr`, :py:class:`~.Number`
The polynomial whose Routh-Hurwitz table is to be created.
var : :py:class:`~.Symbol`
The symbol representing the variable in the polynomial.
infinitesimal_element : None, :py:class:`~.Symbol`, optional
The symbol representing the infinitesimal element for the first column
zero case.
If not provided, a default symbol ``epsilon`` will be used.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.control import RouthHurwitz
>>> b1, b2, b3, b4 = symbols('b_1 b_2 b_3 b_4')
>>> s = symbols("s")
Here is a generic example of how to use the ``RouthHurwitz`` class:
>>> p = b1*s**3 + b2*s**2 + b3*s + b4
>>> RouthHurwitz(p, s)
Matrix([
[ b_1, b_3],
[ b_2, b_4],
[(-b_1*b_4 + b_2*b_3)/b_2, 0],
[ b_4, 0]])
>>> RouthHurwitz(p, s)[:, 0]
Matrix([
[ b_1],
[ b_2],
[(-b_1*b_4 + b_2*b_3)/b_2],
[ b_4]])
Here you can see how the table appears in the first column zero case:
>>> p1 = s**4 + s**3 + 3*s**2 + 3*s + 3
>>> RouthHurwitz(p1, s)
Matrix([
[ 1, 3, 3],
[ 1, 3, 0],
[-3, 3, 0],
[ 4, 0, 0],
[ 3, 0, 0]])
>>> RouthHurwitz(p1, s).zero_col_infos
[(2, 1)]
>>> RouthHurwitz(p1, s).zero_row_case
False
Here you can see how the table appears in the full row zero case
(poles with only imaginary part):
>>> p2 = s**6 + 2*s**5 + 8*s**4 + 12*s**3 + 20*s**2 + 16*s + 16
>>> RouthHurwitz(p2, s)
Matrix([
[ 1, 8, 20, 16],
[ 2, 12, 16, 0],
[ 2, 12, 16, 0],
[ 8, 24, 0, 0],
[ 6, 16, 0, 0],
[8/3, 0, 0, 0],
[ 16, 0, 0, 0]])
>>> RouthHurwitz(p2, s).zero_row_case
True
>>> RouthHurwitz(p2, s).auxiliary_polynomials
[Poly(2*s**4 + 12*s**2 + 16, s, domain='ZZ')]
References
==========
.. [1] https://en.wikipedia.org/wiki/Routh-Hurwitz_stability_criterion
.. [2] https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=b1ed2c8cbd00da0a4aac7b7e9684255a833af6b4
.. [3] https://www.circuitbread.com/tutorials/routh-hurwitz-criterion-part-2-3-3
"""
def __new__(cls, polynomial, var):
if not isinstance(var, Symbol):
raise ValueError("var must be a Symbol")
n = Poly(polynomial, var).degree()
return super().__new__(cls, n + 1, n//2 + 1, [0]*(n + 1)*(n//2 + 1))
def __init__(self, polynomial, var):
self._var = var
self._polynomial = Poly(polynomial, var)
self._poly_degree = self._polynomial.degree()
self._coeffs = self._polynomial.all_coeffs()
self._zero_row_case = False
self._zero_col_infos = []
self._aux_poly_degrees = []
if self._poly_degree < 1:
self[0, 0] = self._coeffs[0]
return
self._build_table()
def _build_table(self):
"""Build the Routh-Hurwitz table."""
self._initialize()
self._calculate()
def _initialize(self):
""""Initialize the table with the coefficients of the polynomial."""
row, col = 0, 0
for coeff in self._coeffs:
self[row, col] = coeff
row = (row+1) % 2
col = col + 1 - row
if self[1, 0] != 0:
return
self._handle_special_cases(1)
def _calculate(self):
"""Calculate the table using the first 2 rows."""
for i in range(2, self.rows):
self._calculate_row(i)
self._handle_special_cases(i)
def _calculate_row(self, i):
active_row_length = self.cols - i//2
for j in range(active_row_length):
num = (self[i-1, 0] * self[i-2, j+1]
- self[i-2, 0] * self[i-1, j+1])
den = self[i-1, 0]
self[i, j] = num / den
def _handle_special_cases(self, i):
active_row_length = self.cols - i//2
"""Handle the first column zero case and the full row zero case."""
if all(self[i, j] == 0 for j in range(active_row_length)):
self._zero_row_case = True
aux_poly_degree = self._poly_degree - i + 1
self._aux_poly_degrees.append(aux_poly_degree)
# calculate the row using the auxiliary polynomial coefficients
# degrees
for j in range(self.cols):
aux_coeff_deg = aux_poly_degree - 2*j
if aux_coeff_deg < 0:
continue
self[i, j] = self[i - 1, j] * aux_coeff_deg
return
if self[i, 0] == 0:
n_zeros = self._count_consecutive_zeros(i)
self._zero_col_infos.append((i, n_zeros))
for k, expr in enumerate(self[i, n_zeros:active_row_length]):
self[i, k] = self[i, k] + (-1)**n_zeros * expr
def _count_consecutive_zeros(self, i):
"""
Count the number of consecutive zeros in the i-th row of the table.
"""
count = 0
for expr in self[i, :]:
if expr != 0:
break
count += 1
return count
@property
def zero_col_infos(self):
"""
Return a list of tuple.
- The first element of the tuple represents the index of a row in which
the First Column Zero Case occurs.
- The second element of the tuple represents the index of the first
column different from 0 before the Extended Routh's Table construction.
"""
return self._zero_col_infos
@property
def zero_row_case(self):
"""
Return True if during the building of the table the Full Row Zero Case
(see the explanation section) has been encountered, else False.
"""
return self._zero_row_case
@property
def auxiliary_polynomials(self):
"""
If ``zero_row_case`` is True, returns a list of auxiliary polynomials
associated with the Full Row Zero Case.
Otherwise, return None.
It is used to handle the Full Row Zero Case during the
construction of the Routh-Hurwitz table.
"""
if self.zero_row_case is False:
return None
polys = []
for aux_poly_degree in self._aux_poly_degrees:
aux_poly = 0
aux_poly_row = self._poly_degree - aux_poly_degree
for j, exp in enumerate(range(aux_poly_degree, -1, -2)):
aux_poly += self[aux_poly_row, j] * self._var**exp
polys.append(Poly(aux_poly, self._var))
return polys
| RouthHurwitz |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 32593,
"end": 32638
} | class ____(Stmt):
"""Break a loop."""
| Break |
python | kubernetes-client__python | kubernetes/client/api/resource_api.py | {
"start": 543,
"end": 5189
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/resource.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| ResourceApi |
python | rapidsai__cudf | python/cudf/cudf/core/dataframe.py | {
"start": 17354,
"end": 29217
} | class ____(_DataFrameIlocIndexer):
pass
@_performance_tracking
def _listlike_to_column_accessor(
data: Sequence,
columns: None | pd.Index,
index: None | Index,
nan_as_null: bool,
) -> tuple[dict[Any, ColumnBase], Index, pd.Index]:
"""
Convert a list-like to a dict for ColumnAccessor for DataFrame.__init__
Returns
-------
tuple[dict[Any, ColumnBase], Index, pd.Index]
- Mapping of column label: Column
- Resulting index (Index) from the data
- Resulting columns (pd.Index - store as host data) from the data
"""
if len(data) == 0:
if index is None:
index = cudf.RangeIndex(0)
if columns is not None:
col_data = {
col_label: column_empty(len(index), dtype=CUDF_STRING_DTYPE)
for col_label in columns
}
else:
col_data = {}
columns = pd.RangeIndex(0)
return (col_data, index, columns)
# We assume that all elements in data are the same type as the first element
first_element = data[0]
if is_scalar(first_element):
if columns is not None:
if len(columns) != 1:
raise ValueError("Passed column must be of length 1")
else:
columns = pd.RangeIndex(1)
if index is not None:
if len(index) != len(data):
raise ValueError(
"Passed index must be the same length as data."
)
else:
index = cudf.RangeIndex(len(data))
return (
{columns[0]: as_column(data, nan_as_null=nan_as_null)},
index,
columns,
)
elif isinstance(first_element, Series):
data_length = len(data)
if index is None:
index = _index_from_listlike_of_series(data)
else:
index_length = len(index)
if data_length != index_length:
# If the passed `index` length doesn't match
# length of Series objects in `data`, we must
# check if `data` can be duplicated/expanded
# to match the length of index. For that we
# check if the length of index is a factor
# of length of data.
#
# 1. If yes, we extend data
# until length of data is equal to length of index.
# 2. If no, we throw an error stating the
# shape of resulting `data` and `index`
# Simple example
# >>> import pandas as pd
# >>> s = pd.Series([1, 2, 3])
# >>> pd.DataFrame([s], index=['a', 'b'])
# 0 1 2
# a 1 2 3
# b 1 2 3
# >>> pd.DataFrame([s], index=['a', 'b', 'c'])
# 0 1 2
# a 1 2 3
# b 1 2 3
# c 1 2 3
if index_length % data_length == 0:
data = list(
itertools.chain.from_iterable(
itertools.repeat(data, index_length // data_length)
)
)
data_length = len(data)
else:
raise ValueError(
f"Length of values ({data_length}) does "
f"not match length of index ({index_length})"
)
if data_length > 1:
common_dtype = find_common_type([ser.dtype for ser in data])
data = [ser.astype(common_dtype) for ser in data]
if all(len(first_element) == len(ser) for ser in data):
if data_length == 1:
temp_index = first_element.index
else:
temp_index = Index._concat(
[ser.index for ser in data]
).drop_duplicates()
temp_data: dict[Hashable, ColumnBase] = {}
for i, ser in enumerate(data):
if not ser.index.is_unique:
raise ValueError(
"Reindexing only valid with uniquely valued Index "
"objects"
)
elif not ser.index.equals(temp_index):
ser = ser.reindex(temp_index)
temp_data[i] = ser._column
temp_frame = DataFrame._from_data(
ColumnAccessor(
temp_data,
verify=False,
rangeindex=True,
),
index=temp_index,
)
transpose = temp_frame.T
else:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="The behavior of array concatenation",
category=FutureWarning,
)
transpose = cudf.concat(data, axis=1).T
if columns is None:
columns = pd.RangeIndex(transpose._num_columns)
col_data = transpose._data
else:
col_data = {}
for col_label in columns:
try:
col_data[col_label] = transpose._data[col_label]
except KeyError:
col_data[col_label] = column_empty(
len(index), dtype=np.dtype(np.float64)
)
return (col_data, index, columns)
elif isinstance(first_element, dict):
from_pandas = DataFrame(
pd.DataFrame(data),
index=index,
columns=columns,
nan_as_null=nan_as_null,
)
return (
from_pandas._data,
from_pandas.index,
from_pandas._data.to_pandas_index,
)
elif not can_convert_to_column(first_element):
raise TypeError(f"Cannot convert {type(first_element)} to a column")
else:
if index is None:
index = cudf.RangeIndex(len(data))
data = list(itertools.zip_longest(*data))
if columns is None:
if isinstance(first_element, tuple) and hasattr(
first_element, "_fields"
):
# pandas behavior is to use the fields from the first
# namedtuple as the column names
columns = pd.Index(first_element._fields)
else:
columns = pd.RangeIndex(len(data))
col_data = {
col_label: as_column(col_values, nan_as_null=nan_as_null)
for col_label, col_values in zip(columns, data, strict=True)
}
return (
col_data,
index,
columns,
)
@_performance_tracking
def _array_to_column_accessor(
data: np.ndarray | cupy.ndarray,
columns: None | pd.Index,
nan_as_null: bool,
) -> ColumnAccessor:
"""Convert a 1D or 2D numpy or cupy array to a ColumnAccessor for DataFrame.__init__"""
if data.ndim not in {1, 2}:
raise ValueError(
f"records dimension expected 1 or 2 but found: {data.ndim}"
)
if data.ndim == 1:
data = data.reshape(len(data), 1)
if columns is not None:
if len(columns) != data.shape[1]:
raise ValueError(
f"columns length expected {data.shape[1]} but "
f"found {len(columns)}"
)
columns_labels = columns
else:
columns_labels = pd.RangeIndex(data.shape[1])
return ColumnAccessor(
{
column_label: as_column(data[:, i], nan_as_null=nan_as_null)
for column_label, i in zip(
columns_labels, range(data.shape[1]), strict=True
)
},
verify=False,
rangeindex=isinstance(columns_labels, pd.RangeIndex),
multiindex=isinstance(columns_labels, pd.MultiIndex),
label_dtype=columns_labels.dtype,
level_names=tuple(columns_labels.names),
)
@_performance_tracking
def _mapping_to_column_accessor(
data: Mapping,
index: None | Index,
dtype: None | Dtype,
nan_as_null: bool,
) -> tuple[dict[Any, ColumnBase], Index, pd.Index]:
"""
Convert a mapping (dict-like) to a dict for ColumnAccessor for DataFrame.__init__
Returns
-------
tuple[dict[Any, ColumnBase], Index, pd.Index]
- Mapping of column label: Column
- Resulting index from the data
- Resulting columns from the data
"""
if len(data) == 0:
return (
{},
cudf.RangeIndex(0) if index is None else index,
pd.RangeIndex(0),
)
data = dict(data)
# 1) Align indexes of all data.values() that are Series/dicts
values_as_series = {
key: Series(val, nan_as_null=nan_as_null, dtype=dtype)
for key, val in data.items()
if isinstance(val, (pd.Series, Series, dict))
}
if values_as_series:
aligned_input_series = cudf.core.series._align_indices(
list(values_as_series.values())
)
data = data.copy()
for key, aligned_series in zip(
values_as_series.keys(), aligned_input_series, strict=True
):
if index is not None:
aligned_series = aligned_series.reindex(index=index)
data[key] = aligned_series
index_from_data = aligned_series.index
else:
index_from_data = None
value_lengths = set()
result_index = None
if index_from_data is not None:
value_lengths.add(len(index_from_data))
result_index = index_from_data
elif index is not None:
result_index = index
# 2) Convert all array-like data.values() to columns
scalar_keys = []
tuple_key_count = 0
tuple_key_lengths = set()
col_data = {}
for key, value in data.items():
if is_scalar(value):
scalar_keys.append(key)
col_data[key] = value
else:
if isinstance(key, tuple):
tuple_key_count += 1
tuple_key_lengths.add(len(key))
column = as_column(value, nan_as_null=nan_as_null, dtype=dtype)
value_lengths.add(len(column))
col_data[key] = column
if tuple_key_count not in {0, len(data)}:
raise ValueError("All dict keys must be tuples if a tuple key exists.")
if len(scalar_keys) != len(data) and len(value_lengths) > 1:
raise ValueError(
"Found varying value lengths when all values "
f"must have the same length: {value_lengths}"
)
elif len(scalar_keys) == len(data):
# All data.values() are scalars
if index is None:
raise ValueError(
"If using all scalar values, you must pass an index"
)
scalar_length = len(index)
else:
scalar_length = value_lengths.pop()
# 3) Convert all remaining scalar data.values() to columns
for key in scalar_keys:
scalar = col_data[key]
if scalar is None or scalar is cudf.NA:
scalar = pa.scalar(None, type=pa.string())
col_data[key] = as_column(
scalar, nan_as_null=nan_as_null, length=scalar_length, dtype=dtype
)
if tuple_key_count and len(tuple_key_lengths) > 1:
# All tuple keys must be the same length
final_length = max(tuple_key_lengths)
col_data = {
old_key
if len(old_key) == final_length
else old_key + ("",) * (final_length - len(old_key)): column
for old_key, column in col_data.items()
}
if result_index is None:
result_index = cudf.RangeIndex(scalar_length)
return col_data, result_index, pd.Index(col_data)
| _DataFrameiAtIndexer |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 43617,
"end": 47001
} | class ____(TestCase):
@xfail # (reason="bools not interned")
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
# assert_equal(d[::-2].sum(), d[::-2].size)
@xpassIfTorchDynamo_np # (reason="frombuffer")
def test_sum_2(self):
d = np.frombuffer(b"\xff\xff" * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2**i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o + 1 :]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o + 1 :]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
def _test_cast_from_flexible(self, dtype):
# empty string -> false
for n in range(3):
v = np.array(b"", (dtype, n))
assert_equal(bool(v), False)
assert_equal(bool(v[()]), False)
assert_equal(v.astype(bool), False)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.False_)
# anything else -> true
for n in range(1, 4):
for val in [b"a", b"0", b" "]:
v = np.array(val, (dtype, n))
assert_equal(bool(v), True)
assert_equal(bool(v[()]), True)
assert_equal(v.astype(bool), True)
assert_(isinstance(v.astype(bool), np.ndarray))
assert_(v[()].astype(bool) is np.True_)
@skip(reason="np.void")
def test_cast_from_void(self):
self._test_cast_from_flexible(np.void)
@xfail # (reason="See gh-9847")
def test_cast_from_unicode(self):
self._test_cast_from_flexible(np.str_)
@xfail # (reason="See gh-9847")
def test_cast_from_bytes(self):
self._test_cast_from_flexible(np.bytes_)
@instantiate_parametrized_tests
| TestBool |
python | dask__distributed | distributed/semaphore.py | {
"start": 9102,
"end": 20900
} | class ____(SyncMethodMixin):
"""Semaphore
This `semaphore <https://en.wikipedia.org/wiki/Semaphore_(programming)>`_
will track leases on the scheduler which can be acquired and
released by an instance of this class. If the maximum amount of leases are
already acquired, it is not possible to acquire more and the caller waits
until another lease has been released.
The lifetime or leases are controlled using a timeout. This timeout is
refreshed in regular intervals by the ``Client`` of this instance and
provides protection from deadlocks or resource starvation in case of worker
failure.
The timeout can be controlled using the configuration option
``distributed.scheduler.locks.lease-timeout`` and the interval in which the
scheduler verifies the timeout is set using the option
``distributed.scheduler.locks.lease-validation-interval``.
A noticeable difference to the Semaphore of the python standard library is
that this implementation does not allow to release more often than it was
acquired. If this happens, a warning is emitted but the internal state is
not modified.
.. warning::
This implementation is susceptible to lease overbooking in case of
lease timeouts. It is advised to monitor log information and adjust
above configuration options to suitable values for the user application.
Parameters
----------
max_leases: int (optional)
The maximum amount of leases that may be granted at the same time. This
effectively sets an upper limit to the amount of parallel access to a specific resource.
Defaults to 1.
name: string (optional)
Name of the semaphore to acquire. Choosing the same name allows two
disconnected processes to coordinate. If not given, a random
name will be generated.
register: bool
If True, register the semaphore with the scheduler. This needs to be
done before any leases can be acquired. If not done during
initialization, this can also be done by calling the register method of
this class.
When registering, this needs to be awaited.
scheduler_rpc: ConnectionPool
The ConnectionPool to connect to the scheduler. If None is provided, it
uses the worker or client pool. This parameter is mostly used for
testing.
loop: IOLoop
The event loop this instance is using. If None is provided, reuse the
loop of the active worker or client.
Examples
--------
>>> from distributed import Semaphore
... sem = Semaphore(max_leases=2, name='my_database')
...
... def access_resource(s, sem):
... # This automatically acquires a lease from the semaphore (if available) which will be
... # released when leaving the context manager.
... with sem:
... pass
...
... futures = client.map(access_resource, range(10), sem=sem)
... client.gather(futures)
... # Once done, close the semaphore to clean up the state on scheduler side.
... sem.close()
Notes
-----
If a client attempts to release the semaphore but doesn't have a lease acquired, this will raise an exception.
dask executes functions by default assuming they are pure, when using semaphore acquire/releases inside
such a function, it must be noted that there *are* in fact side-effects, thus, the function can no longer be
considered pure. If this is not taken into account, this may lead to unexpected behavior.
"""
def __init__(
self,
max_leases=1,
name=None,
scheduler_rpc=None,
loop=None,
):
self._scheduler = scheduler_rpc
self._loop = loop
self.name = name or "semaphore-" + uuid.uuid4().hex
self.max_leases = max_leases
self.id = uuid.uuid4().hex
self._leases = deque()
self.refresh_leases = True
self._registered = False
# this should give ample time to refresh without introducing another
# config parameter since this *must* be smaller than the timeout anyhow
lease_timeout = dask.config.get("distributed.scheduler.locks.lease-timeout")
if lease_timeout == "inf":
return
## Below is all code for the lease timeout validation
lease_timeout = parse_timedelta(
dask.config.get("distributed.scheduler.locks.lease-timeout"),
default="s",
)
refresh_leases_interval = lease_timeout / 5
pc = PeriodicCallback(
self._refresh_leases, callback_time=refresh_leases_interval * 1000
)
self.refresh_callback = pc
# Need to start the callback using IOLoop.add_callback to ensure that the
# PC uses the correct event loop.
if self.loop is not None:
self.loop.add_callback(pc.start)
@property
def scheduler(self):
self._bind_late()
return self._scheduler
@property
def loop(self):
self._bind_late()
return self._loop
def _bind_late(self):
if self._scheduler is None or self._loop is None:
try:
try:
worker = get_worker()
self._scheduler = self._scheduler or worker.scheduler
self._loop = self._loop or worker.loop
except ValueError:
client = get_client()
self._scheduler = self._scheduler or client.scheduler
self._loop = self._loop or client.loop
except ValueError:
pass
def _verify_running(self):
if not self.scheduler or not self.loop:
raise RuntimeError(
f"{type(self)} object not properly initialized. This can happen if the object is being deserialized outside of the context of a Client or Worker."
)
async def _register(self):
if self._registered:
return
lease_timeout = dask.config.get("distributed.scheduler.locks.lease-timeout")
if lease_timeout == "inf":
lease_timeout = None
else:
lease_timeout = parse_timedelta(lease_timeout, "s")
await retry_operation(
self.scheduler.semaphore_register,
name=self.name,
max_leases=self.max_leases,
lease_timeout=lease_timeout,
operation=f"semaphore register id={self.id} name={self.name}",
)
self._registered = True
def register(self, **kwargs):
return self.sync(self._register)
def __await__(self):
async def create_semaphore():
await self._register()
return self
return create_semaphore().__await__()
async def _refresh_leases(self):
if self.refresh_leases and self._leases:
logger.debug(
"%s refreshing leases for %s with IDs %s",
self.id,
self.name,
self._leases,
)
await retry_operation(
self.scheduler.semaphore_refresh_leases,
lease_ids=list(self._leases),
name=self.name,
operation="semaphore refresh leases: id=%s, lease_ids=%s, name=%s"
% (self.id, list(self._leases), self.name),
)
async def _acquire(self, timeout=None):
await self
lease_id = uuid.uuid4().hex
logger.debug(
"%s requests lease for %s with ID %s", self.id, self.name, lease_id
)
# Using a unique lease id generated here allows us to retry since the
# server handle is idempotent
result = await retry_operation(
self.scheduler.semaphore_acquire,
name=self.name,
timeout=timeout,
lease_id=lease_id,
operation="semaphore acquire: id=%s, lease_id=%s, name=%s"
% (self.id, lease_id, self.name),
)
if result:
self._leases.append(lease_id)
return result
def acquire(self, timeout=None):
"""
Acquire a semaphore.
If the internal counter is greater than zero, decrement it by one and return True immediately.
If it is zero, wait until a release() is called and return True.
Parameters
----------
timeout : number or string or timedelta, optional
Seconds to wait on acquiring the semaphore. This does not
include local coroutine time, network transfer time, etc..
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
"""
self._verify_running()
timeout = parse_timedelta(timeout)
return self.sync(self._acquire, timeout=timeout)
async def _release(self, lease_id):
try:
await retry_operation(
self.scheduler.semaphore_release,
name=self.name,
lease_id=lease_id,
operation="semaphore release: id=%s, lease_id=%s, name=%s"
% (self.id, lease_id, self.name),
)
return True
except Exception: # Release fails for whatever reason
logger.error(
"Release failed for id=%s, lease_id=%s, name=%s. Cluster network might be unstable?"
% (self.id, lease_id, self.name),
exc_info=True,
)
return False
def release(self):
"""
Release the semaphore.
Returns
-------
bool
This value indicates whether a lease was released immediately or not. Note that a user should *not* retry
this operation. Under certain circumstances (e.g. scheduler overload) the lease may not be released
immediately, but it will always be automatically released after a specific interval configured using
"distributed.scheduler.locks.lease-validation-interval" and "distributed.scheduler.locks.lease-timeout".
"""
self._verify_running()
if not self._leases:
raise RuntimeError("Released too often")
# popleft to release the oldest lease first
lease_id = self._leases.popleft()
logger.debug("%s releases %s for %s", self.id, lease_id, self.name)
return self.sync(self._release, lease_id=lease_id)
def get_value(self):
"""
Return the number of currently registered leases.
"""
self._verify_running()
return self.sync(self.scheduler.semaphore_value, name=self.name)
def __enter__(self):
self.register()
self._verify_running()
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.release()
async def __aenter__(self):
await self
self._verify_running()
await self.acquire()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.release()
def __getstate__(self):
# Do not serialize the address since workers may have different
# addresses for the scheduler (e.g. if a proxy is between them)
return (self.name, self.max_leases)
def __setstate__(self, state):
name, max_leases = state
self.__init__(
name=name,
max_leases=max_leases,
)
def close(self):
self._verify_running()
self.refresh_callback.stop()
return self.sync(self.scheduler.semaphore_close, name=self.name)
def __del__(self):
if hasattr(self, "refresh_callback"):
self.refresh_callback.stop()
| Semaphore |
python | kamyu104__LeetCode-Solutions | Python/maximum-unique-subarray-sum-after-deletion.py | {
"start": 42,
"end": 260
} | class ____(object):
def maxSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
mx = max(nums)
return mx if mx < 0 else sum(x for x in set(nums) if x >= 0)
| Solution |
python | django__django | tests/model_enums/tests.py | {
"start": 9127,
"end": 9312
} | class ____(ipaddress.IPv4Address, models.Choices):
LOCALHOST = "127.0.0.1", "Localhost"
GATEWAY = "192.168.0.1", "Gateway"
BROADCAST = "192.168.0.255", "Broadcast"
| IPv4Address |
python | Lightning-AI__lightning | src/lightning/pytorch/demos/transformer.py | {
"start": 2901,
"end": 4175
} | class ____(nn.Module):
def __init__(self, dim: int, dropout: float = 0.1, max_len: int = 5000) -> None:
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
self.max_len = max_len
self.pe: Optional[Tensor] = None
def forward(self, x: Tensor) -> Tensor:
if self.pe is None:
# 1) can't use buffer, see https://github.com/pytorch/pytorch/issues/68407
# 2) can't use parameter because pe gets sliced and DDP requires all params to participate in forward
# TODO: Could make this a `nn.Parameter` with `requires_grad=False`
self.pe = self._init_pos_encoding(device=x.device)
x = x + self.pe[:, x.size(1)]
return self.dropout(x)
def _init_pos_encoding(self, device: torch.device) -> Tensor:
pe = torch.zeros(self.max_len, self.dim, device=device)
position = torch.arange(0, self.max_len, dtype=torch.float, device=device).unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.dim, 2, device=device).float() * (-math.log(10000.0) / self.dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
return pe
| PositionalEncoding |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 160667,
"end": 167444
} | class ____:
"""This is the main external interface class for solving.
It manages solver configuration and preferences in one place. It sets up the solve
and passes the setup method to the driver, as well.
"""
def __init__(self):
# Compute possible compilers first, so we see them as externals
_ = spack.compilers.config.all_compilers(init_config=True)
self._conc_cache = ConcretizationCache()
self.driver = PyclingoDriver(conc_cache=self._conc_cache)
self.selector = ReusableSpecsSelector(configuration=spack.config.CONFIG)
@staticmethod
def _check_input_and_extract_concrete_specs(
specs: Sequence[spack.spec.Spec],
) -> List[spack.spec.Spec]:
reusable: List[spack.spec.Spec] = []
analyzer = create_graph_analyzer()
for root in specs:
for s in root.traverse():
if s.concrete:
reusable.append(s)
else:
if spack.repo.PATH.is_virtual(s.name):
continue
# Error if direct dependencies cannot be satisfied
deps = {
edge.spec.name
for edge in s.edges_to_dependencies()
if edge.direct and edge.when == EMPTY_SPEC
}
if deps:
graph = analyzer.possible_dependencies(
s, allowed_deps=dt.ALL, transitive=False
)
deps.difference_update(graph.real_pkgs, graph.virtuals)
if deps:
start_str = f"'{root}'" if s == root else f"'{s}' in '{root}'"
raise UnsatisfiableSpecError(
f"{start_str} cannot depend on {', '.join(deps)}"
)
try:
spack.repo.PATH.get_pkg_class(s.fullname)
except spack.repo.UnknownPackageError:
raise UnsatisfiableSpecError(
f"cannot concretize '{root}', since '{s.name}' does not exist"
)
spack.spec.Spec.ensure_valid_variants(s)
return reusable
def solve_with_stats(
self,
specs: Sequence[spack.spec.Spec],
out: Optional[io.IOBase] = None,
timers: bool = False,
stats: bool = False,
tests: spack.concretize.TestsType = False,
setup_only: bool = False,
allow_deprecated: bool = False,
) -> Tuple[Result, Optional[spack.util.timer.Timer], Optional[Dict]]:
"""
Concretize a set of specs and track the timing and statistics for the solve
Arguments:
specs: List of ``Spec`` objects to solve for.
out: Optionally write the generate ASP program to a file-like object.
timers: Print out coarse timers for different solve phases.
stats: Print out detailed stats from clingo.
tests: If True, concretize test dependencies for all packages.
If a tuple of package names, concretize test dependencies for named
packages (defaults to False: do not concretize test dependencies).
setup_only: if True, stop after setup and don't solve (default False).
allow_deprecated: allow deprecated version in the solve
"""
specs = [s.lookup_hash() for s in specs]
reusable_specs = self._check_input_and_extract_concrete_specs(specs)
reusable_specs.extend(self.selector.reusable_specs(specs))
setup = SpackSolverSetup(tests=tests)
output = OutputConfiguration(timers=timers, stats=stats, out=out, setup_only=setup_only)
result = self.driver.solve(
setup, specs, reuse=reusable_specs, output=output, allow_deprecated=allow_deprecated
)
self._conc_cache.cleanup()
return result
def solve(self, specs: Sequence[spack.spec.Spec], **kwargs) -> Result:
"""
Convenience function for concretizing a set of specs and ignoring timing
and statistics. Uses the same kwargs as solve_with_stats.
"""
# Check upfront that the variants are admissible
result, _, _ = self.solve_with_stats(specs, **kwargs)
return result
def solve_in_rounds(
self,
specs: Sequence[spack.spec.Spec],
out: Optional[io.IOBase] = None,
timers: bool = False,
stats: bool = False,
tests: spack.concretize.TestsType = False,
allow_deprecated: bool = False,
) -> Generator[Result, None, None]:
"""Solve for a stable model of specs in multiple rounds.
This relaxes the assumption of solve that everything must be consistent and
solvable in a single round. Each round tries to maximize the reuse of specs
from previous rounds.
The function is a generator that yields the result of each round.
Arguments:
specs (list): list of Specs to solve.
out: Optionally write the generate ASP program to a file-like object.
timers (bool): print timing if set to True
stats (bool): print internal statistics if set to True
tests (bool): add test dependencies to the solve
allow_deprecated (bool): allow deprecated version in the solve
"""
specs = [s.lookup_hash() for s in specs]
reusable_specs = self._check_input_and_extract_concrete_specs(specs)
reusable_specs.extend(self.selector.reusable_specs(specs))
setup = SpackSolverSetup(tests=tests)
# Tell clingo that we don't have to solve all the inputs at once
setup.concretize_everything = False
input_specs = specs
output = OutputConfiguration(timers=timers, stats=stats, out=out, setup_only=False)
while True:
result, _, _ = self.driver.solve(
setup,
input_specs,
reuse=reusable_specs,
output=output,
allow_deprecated=allow_deprecated,
)
yield result
# If we don't have unsolved specs we are done
if not result.unsolved_specs:
break
if not result.specs:
# This is also a problem: no specs were solved for, which means we would be in a
# loop if we tried again
raise OutputDoesNotSatisfyInputError(result.unsolved_specs)
input_specs = list(x for (x, y) in result.unsolved_specs)
for spec in result.specs:
reusable_specs.extend(spec.traverse())
self._conc_cache.cleanup()
| Solver |
python | tensorflow__tensorflow | tensorflow/python/ops/lookup_ops.py | {
"start": 20710,
"end": 21366
} | class ____:
"""The key and value content to get from each line.
This class defines the key and value used for `tf.lookup.TextFileInitializer`.
The key and value content to get from each line is specified either
by the following, or a value `>=0`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
"""
WHOLE_LINE = -2
LINE_NUMBER = -1
@tf_export("lookup.TextFileInitializer")
| TextFileIndex |
python | huggingface__transformers | src/transformers/models/grounding_dino/processing_grounding_dino.py | {
"start": 3906,
"end": 11503
} | class ____(ProcessorMixin):
r"""
Constructs a Grounding DINO processor which wraps a Deformable DETR image processor and a BERT tokenizer into a
single processor.
[`GroundingDinoProcessor`] offers all the functionalities of [`GroundingDinoImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~GroundingDinoProcessor.__call__`] and [`~GroundingDinoProcessor.decode`]
for more information.
Args:
image_processor (`GroundingDinoImageProcessor`):
An instance of [`GroundingDinoImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
"""
valid_processor_kwargs = GroundingDinoProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[GroundingDinoProcessorKwargs],
) -> BatchEncoding:
"""
This method uses [`GroundingDinoImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Args:
images (`ImageInput`, `list[ImageInput]`, *optional*):
The image or batch of images to be processed. The image might be either PIL image, numpy array or a torch tensor.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`, *optional*):
Candidate labels to be detected on the image. The text might be one of the following:
- A list of candidate labels (strings) to be detected on the image (e.g. ["a cat", "a dog"]).
- A batch of candidate labels to be detected on the batch of images (e.g. [["a cat", "a dog"], ["a car", "a person"]]).
- A merged candidate labels string to be detected on the image, separated by "." (e.g. "a cat. a dog.").
- A batch of merged candidate labels text to be detected on the batch of images (e.g. ["a cat. a dog.", "a car. a person."]).
"""
if text is not None:
text = self._preprocess_input_text(text)
return super().__call__(images=images, text=text, **kwargs)
def _preprocess_input_text(self, text):
"""
Preprocess input text to ensure that labels are in the correct format for the model.
If the text is a list of candidate labels, merge the candidate labels into a single string,
for example, ["a cat", "a dog"] -> "a cat. a dog.". In case candidate labels are already in a form of
"a cat. a dog.", the text is returned as is.
"""
if _is_list_of_candidate_labels(text):
text = _merge_candidate_labels_text(text)
# for batched input
elif isinstance(text, (list, tuple)) and all(_is_list_of_candidate_labels(t) for t in text):
text = [_merge_candidate_labels_text(sample) for sample in text]
return text
def post_process_grounded_object_detection(
self,
outputs: "GroundingDinoObjectDetectionOutput",
input_ids: Optional[TensorType] = None,
threshold: float = 0.25,
text_threshold: float = 0.25,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
text_labels: Optional[list[list[str]]] = None,
):
"""
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text label.
Args:
outputs ([`GroundingDinoObjectDetectionOutput`]):
Raw outputs of the model.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The token ids of the input text. If not provided will be taken from the model output.
threshold (`float`, *optional*, defaults to 0.25):
Threshold to keep object detection predictions based on confidence score.
text_threshold (`float`, *optional*, defaults to 0.25):
Score threshold to keep text detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
text_labels (`list[list[str]]`, *optional*):
List of candidate labels to be detected on each image. At the moment it's *NOT used*, but required
to be in signature for the zero-shot object detection pipeline. Text labels are instead extracted
from the `input_ids` tensor provided in `outputs`.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the
- **scores**: tensor of confidence scores for detected objects
- **boxes**: tensor of bounding boxes in [x0, y0, x1, y1] format
- **labels**: list of text labels for each detected object (will be replaced with integer ids in v4.51.0)
- **text_labels**: list of text labels for detected objects
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
input_ids = input_ids if input_ids is not None else outputs.input_ids
if target_sizes is not None and len(target_sizes) != len(batch_logits):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
batch_probs = torch.sigmoid(batch_logits) # (batch_size, num_queries, 256)
batch_scores = torch.max(batch_probs, dim=-1)[0] # (batch_size, num_queries)
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(batch_boxes.device)
batch_boxes = batch_boxes * scale_fct[:, None, :]
results = []
for idx, (scores, boxes, probs) in enumerate(zip(batch_scores, batch_boxes, batch_probs)):
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
# extract text labels
prob = probs[keep]
label_ids = get_phrases_from_posmap(prob > text_threshold, input_ids[idx])
objects_text_labels = self.batch_decode(label_ids)
result = DictWithDeprecationWarning(
{
"scores": scores,
"boxes": boxes,
"text_labels": objects_text_labels,
# TODO: @pavel, set labels to None since v4.51.0 or find a way to extract ids
"labels": objects_text_labels,
}
)
results.append(result)
return results
__all__ = ["GroundingDinoProcessor"]
| GroundingDinoProcessor |
python | tornadoweb__tornado | tornado/template.py | {
"start": 20949,
"end": 21047
} | class ____(_Node):
def __init__(self, name: str) -> None:
self.name = name
| _ExtendsBlock |
python | django__django | tests/file_storage/tests.py | {
"start": 23910,
"end": 24378
} | class ____(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save("custom_storage", ContentFile("custom contents"))
self.assertEqual(first, "custom_storage")
second = self.storage.save("custom_storage", ContentFile("more contents"))
self.assertEqual(second, "custom_storage.2")
self.storage.delete(first)
self.storage.delete(second)
| CustomStorageTests |
python | apache__avro | lang/py/avro/tether/tether_task.py | {
"start": 3979,
"end": 5000
} | class ____:
"""
This is a small requestor subclass I created for the HTTP protocol.
Since the HTTP protocol isn't persistent, we need to instantiate
a new transciever and new requestor for each request.
But I wanted to use of the requestor to be identical to that for
SocketTransciever so that we can seamlessly switch between the two.
"""
def __init__(self, server, port, protocol):
"""
Instantiate the class.
Parameters
----------------------------------------------------------------------
server - The server hostname
port - Which port to use
protocol - The protocol for the communication
"""
self.server = server
self.port = port
self.protocol = protocol
def request(self, *args, **param):
transciever = avro.ipc.HTTPTransceiver(self.server, self.port)
requestor = avro.ipc.Requestor(self.protocol, transciever)
return requestor.request(*args, **param)
| HTTPRequestor |
python | Textualize__textual | docs/examples/widgets/tabbed_content_label_color.py | {
"start": 103,
"end": 574
} | class ____(App):
CSS = """
TabbedContent #--content-tab-green {
color: green;
}
TabbedContent #--content-tab-red {
color: red;
}
"""
def compose(self) -> ComposeResult:
with TabbedContent():
with TabPane("Red", id="red"):
yield Label("Red!")
with TabPane("Green", id="green"):
yield Label("Green!")
if __name__ == "__main__":
ColorTabsApp().run()
| ColorTabsApp |
python | kamyu104__LeetCode-Solutions | Python/shortest-unsorted-continuous-subarray.py | {
"start": 29,
"end": 569
} | class ____(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
left, right = -1, -2
min_from_right, max_from_left = nums[-1], nums[0]
for i in xrange(1, n):
max_from_left = max(max_from_left, nums[i])
min_from_right = min(min_from_right, nums[n-1-i])
if nums[i] < max_from_left: right = i
if nums[n-1-i] > min_from_right: left = n-1-i
# Time: O(nlogn)
# Space: O(n)
| Solution |
python | bokeh__bokeh | src/bokeh/core/property/container.py | {
"start": 5671,
"end": 5869
} | class ____(Seq[T]):
""" Accept NumPy array values.
"""
@classmethod
def _is_seq(cls, value: Any) -> bool:
import numpy as np
return isinstance(value, np.ndarray)
| Array |
python | TheAlgorithms__Python | machine_learning/sequential_minimum_optimization.py | {
"start": 13253,
"end": 20163
} | class ____:
def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0):
self.degree = np.float64(degree)
self.coef0 = np.float64(coef0)
self.gamma = np.float64(gamma)
self._kernel_name = kernel
self._kernel = self._get_kernel(kernel_name=kernel)
self._check()
def _polynomial(self, v1, v2):
return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree
def _linear(self, v1, v2):
return np.inner(v1, v2) + self.coef0
def _rbf(self, v1, v2):
return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2))
def _check(self):
if self._kernel == self._rbf and self.gamma < 0:
raise ValueError("gamma value must be non-negative")
def _get_kernel(self, kernel_name):
maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf}
return maps[kernel_name]
def __call__(self, v1, v2):
return self._kernel(v1, v2)
def __repr__(self):
return self._kernel_name
def count_time(func):
def call_func(*args, **kwargs):
import time
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
print(f"SMO algorithm cost {end_time - start_time} seconds")
return call_func
@count_time
def test_cancer_data():
print("Hello!\nStart test SVM using the SMO algorithm!")
# 0: download dataset and load into pandas' dataframe
if not os.path.exists(r"cancer_data.csv"):
request = urllib.request.Request( # noqa: S310
CANCER_DATASET_URL,
headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"},
)
response = urllib.request.urlopen(request) # noqa: S310
content = response.read().decode("utf-8")
with open(r"cancer_data.csv", "w") as f:
f.write(content)
data = pd.read_csv(
"cancer_data.csv",
header=None,
dtype={0: str}, # Assuming the first column contains string data
)
# 1: pre-processing data
del data[data.columns.tolist()[0]]
data = data.dropna(axis=0)
data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
samples = np.array(data)[:, :]
# 2: dividing data into train_data data and test_data data
train_data, test_data = samples[:328, :], samples[328:, :]
test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
# 3: choose kernel function, and set initial alphas to zero (optional)
my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
al = np.zeros(train_data.shape[0])
# 4: calculating best alphas using SMO algorithm and predict test_data samples
mysvm = SmoSVM(
train=train_data,
kernel_func=my_kernel,
alpha_list=al,
cost=0.4,
b=0.0,
tolerance=0.001,
)
mysvm.fit()
predict = mysvm.predict(test_samples)
# 5: check accuracy
score = 0
test_num = test_tags.shape[0]
for i in range(test_tags.shape[0]):
if test_tags[i] == predict[i]:
score += 1
print(f"\nAll: {test_num}\nCorrect: {score}\nIncorrect: {test_num - score}")
print(f"Rough Accuracy: {score / test_tags.shape[0]}")
def test_demonstration():
# change stdout
print("\nStarting plot, please wait!")
sys.stdout = open(os.devnull, "w")
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
ax4 = plt.subplot2grid((2, 2), (1, 1))
ax1.set_title("Linear SVM, cost = 0.1")
test_linear_kernel(ax1, cost=0.1)
ax2.set_title("Linear SVM, cost = 500")
test_linear_kernel(ax2, cost=500)
ax3.set_title("RBF kernel SVM, cost = 0.1")
test_rbf_kernel(ax3, cost=0.1)
ax4.set_title("RBF kernel SVM, cost = 500")
test_rbf_kernel(ax4, cost=500)
sys.stdout = sys.__stdout__
print("Plot done!")
def test_linear_kernel(ax, cost):
train_x, train_y = make_blobs(
n_samples=500, centers=2, n_features=2, random_state=1
)
train_y[train_y == 0] = -1
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x, train_y)
train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
my_kernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5)
mysvm = SmoSVM(
train=train_data,
kernel_func=my_kernel,
cost=cost,
tolerance=0.001,
auto_norm=False,
)
mysvm.fit()
plot_partition_boundary(mysvm, train_data, ax=ax)
def test_rbf_kernel(ax, cost):
train_x, train_y = make_circles(
n_samples=500, noise=0.1, factor=0.1, random_state=1
)
train_y[train_y == 0] = -1
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x, train_y)
train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
mysvm = SmoSVM(
train=train_data,
kernel_func=my_kernel,
cost=cost,
tolerance=0.001,
auto_norm=False,
)
mysvm.fit()
plot_partition_boundary(mysvm, train_data, ax=ax)
def plot_partition_boundary(
model, train_data, ax, resolution=100, colors=("b", "k", "r")
):
"""
We cannot get the optimal w of our kernel SVM model, which is different from a
linear SVM. For this reason, we generate randomly distributed points with high
density, and predicted values of these points are calculated using our trained
model. Then we could use this predicted values to draw contour map, and this contour
map represents the SVM's partition boundary.
"""
train_data_x = train_data[:, 1]
train_data_y = train_data[:, 2]
train_data_tags = train_data[:, 0]
xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution)
yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution)
test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape(
resolution * resolution, 2
)
test_tags = model.predict(test_samples, classify=False)
grid = test_tags.reshape((len(xrange), len(yrange)))
# Plot contour map which represents the partition boundary
ax.contour(
xrange,
yrange,
np.asmatrix(grid).T,
levels=(-1, 0, 1),
linestyles=("--", "-", "--"),
linewidths=(1, 1, 1),
colors=colors,
)
# Plot all train samples
ax.scatter(
train_data_x,
train_data_y,
c=train_data_tags,
cmap=plt.cm.Dark2,
lw=0,
alpha=0.5,
)
# Plot support vectors
support = model.support
ax.scatter(
train_data_x[support],
train_data_y[support],
c=train_data_tags[support],
cmap=plt.cm.Dark2,
)
if __name__ == "__main__":
test_cancer_data()
test_demonstration()
plt.show()
| Kernel |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 39646,
"end": 39839
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("CREATED_AT", "MONTHLY_PRICE_IN_CENTS")
| SponsorsTierOrderField |
python | getsentry__sentry | tests/sentry/preprod/test_tasks.py | {
"start": 15759,
"end": 18996
} | class ____(BaseAssembleTest):
def setUp(self) -> None:
super().setUp()
self.preprod_artifact = PreprodArtifact.objects.create(
project=self.project, state=PreprodArtifact.ArtifactState.UPLOADED
)
def _run_task_and_verify_status(
self, content, checksum=None, chunks=None, artifact_id=None, org_id=None, project_id=None
):
checksum = checksum or sha1(content).hexdigest()
blob = FileBlob.from_file_with_organization(ContentFile(content), self.organization)
chunks = chunks or [blob.checksum]
assemble_preprod_artifact_installable_app(
org_id=org_id or self.organization.id,
project_id=project_id or self.project.id,
checksum=checksum,
chunks=chunks,
artifact_id=artifact_id or self.preprod_artifact.id,
)
status, details = get_assemble_status(
AssembleTask.PREPROD_ARTIFACT_INSTALLABLE_APP, project_id or self.project.id, checksum
)
delete_assemble_status(
AssembleTask.PREPROD_ARTIFACT_INSTALLABLE_APP, project_id or self.project.id, checksum
)
return status, details
def test_assemble_preprod_artifact_installable_app_success(self) -> None:
status, details = self._run_task_and_verify_status(b"test installable app content")
assert status == ChunkFileState.OK
assert details is None
# Verify installable app file was created
installable_files = File.objects.filter(type="preprod.file")
assert len(installable_files) == 1
assert installable_files[0].name.startswith("preprod-file-")
# Verify PreprodArtifact was updated with installable app file ID
self.preprod_artifact.refresh_from_db()
assert self.preprod_artifact.installable_app_file_id == installable_files[0].id
def test_assemble_preprod_artifact_installable_app_error_cases(self) -> None:
# Test nonexistent artifact
status, details = self._run_task_and_verify_status(
b"nonexistent artifact", artifact_id=99999
)
assert status == ChunkFileState.ERROR
# Test checksum mismatch
status, details = self._run_task_and_verify_status(b"checksum mismatch", checksum="b" * 40)
assert status == ChunkFileState.ERROR
assert "checksum mismatch" in details
# Test missing chunks
status, details = self._run_task_and_verify_status(
b"missing chunks", chunks=["nonexistent" + "1" * 32]
)
assert status == ChunkFileState.ERROR
assert "Not all chunks available" in details
# Test nonexistent org
status, details = self._run_task_and_verify_status(b"nonexistent org", org_id=99999)
assert status == ChunkFileState.ERROR
# Test nonexistent project
status, details = self._run_task_and_verify_status(b"nonexistent project", project_id=99999)
assert status == ChunkFileState.ERROR
# Verify PreprodArtifact was not updated for error cases
self.preprod_artifact.refresh_from_db()
assert self.preprod_artifact.installable_app_file_id is None
| AssemblePreprodArtifactInstallableAppTest |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/i18n/main.py | {
"start": 800,
"end": 1476
} | class ____(BaseHandler):
"""A simple handler with internationalized strings.
This handler demonstrates how to internationalize strings in
Python, Jinja2 template and Javascript.
"""
def get(self):
"""A get handler for this sample.
It just shows internationalized strings in Python, Jinja2
template and Javascript.
"""
context = dict(message=gettext("Hello World from Python code!"))
template = self.jinja2_env.get_template("index.jinja2")
self.response.out.write(template.render(context))
application = webapp2.WSGIApplication(
[
("/", MainHandler),
],
debug=True,
)
| MainHandler |
python | pytorch__pytorch | test/inductor/test_extension_backend.py | {
"start": 1491,
"end": 3392
} | class ____(TestCase):
module = None
# Use a lock file so that only one test can build this extension at a time
lock_file = "extension_device.lock"
lock = FileLock(lock_file)
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
cls.lock.acquire(timeout=600)
except Timeout:
# This shouldn't happen, still attempt to build the extension anyway
pass
# Build Extension
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
source_file_path = os.path.dirname(os.path.abspath(__file__))
source_file = os.path.join(
source_file_path, "extension_backends/cpp/extension_device.cpp"
)
cls.module = torch.utils.cpp_extension.load(
name="extension_device",
sources=[
str(source_file),
],
extra_cflags=["-g"],
verbose=True,
)
@classmethod
def tearDownClass(cls):
cls._stack.close()
super().tearDownClass()
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
cls.lock.release()
if os.path.exists(cls.lock_file):
os.remove(cls.lock_file)
def setUp(self):
torch._dynamo.reset()
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
assert self.module is not None
def tearDown(self):
super().tearDown()
torch._dynamo.reset()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@unittest.skipIf(IS_FBCODE, "cpp_extension doesn't work in fbcode right now")
| BaseExtensionBackendTests |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 35326,
"end": 37210
} | class ____(PsutilTestCase):
def test_ips(self):
for name, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == psutil.AF_LINK:
assert addr.address == get_mac_address(name)
elif addr.family == socket.AF_INET:
assert addr.address == get_ipv4_address(name)
assert addr.netmask == get_ipv4_netmask(name)
if addr.broadcast is not None:
assert addr.broadcast == get_ipv4_broadcast(name)
else:
assert get_ipv4_broadcast(name) == '0.0.0.0'
elif addr.family == socket.AF_INET6:
# IPv6 addresses can have a percent symbol at the end.
# E.g. these 2 are equivalent:
# "fe80::1ff:fe23:4567:890a"
# "fe80::1ff:fe23:4567:890a%eth0"
# That is the "zone id" portion, which usually is the name
# of the network interface.
address = addr.address.split('%')[0]
assert address in get_ipv6_addresses(name)
# XXX - not reliable when having virtual NICs installed by Docker.
# @pytest.mark.skipif(not shutil.which("ip"),
# reason="'ip' utility not available")
# def test_net_if_names(self):
# out = sh("ip addr").strip()
# nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x]
# found = 0
# for line in out.split('\n'):
# line = line.strip()
# if re.search(r"^\d+:", line):
# found += 1
# name = line.split(':')[1].strip()
# assert name in nics
# assert len(nics) == found
@pytest.mark.skipif(not LINUX, reason="LINUX only")
| TestSystemNetIfAddrs |
python | pallets__werkzeug | examples/couchy/application.py | {
"start": 429,
"end": 1493
} | class ____:
def __init__(self, db_uri):
local.application = self
server = Server(db_uri)
try:
db = server.create("urls")
except Exception:
db = server["urls"]
self.dispatch = SharedDataMiddleware(self.dispatch, {"/static": STATIC_PATH})
URL.db = db
def dispatch(self, environ, start_response):
local.application = self
request = Request(environ)
local.url_adapter = adapter = url_map.bind_to_environ(environ)
try:
endpoint, values = adapter.match()
handler = getattr(views, endpoint)
response = handler(request, **values)
except NotFound:
response = views.not_found(request)
response.status_code = 404
except HTTPException as e:
response = e
return ClosingIterator(
response(environ, start_response), [local_manager.cleanup]
)
def __call__(self, environ, start_response):
return self.dispatch(environ, start_response)
| Couchy |
python | TheAlgorithms__Python | data_structures/binary_tree/binary_search_tree_recursive.py | {
"start": 7400,
"end": 16490
} | class ____(unittest.TestCase):
@staticmethod
def _get_binary_search_tree() -> BinarySearchTree:
r"""
8
/ \
3 10
/ \ \
1 6 14
/ \ /
4 7 13
\
5
"""
t = BinarySearchTree()
t.put(8)
t.put(3)
t.put(6)
t.put(1)
t.put(10)
t.put(14)
t.put(13)
t.put(4)
t.put(7)
t.put(5)
return t
def test_put(self) -> None:
t = BinarySearchTree()
assert t.is_empty()
t.put(8)
r"""
8
"""
assert t.root is not None
assert t.root.parent is None
assert t.root.label == 8
t.put(10)
r"""
8
\
10
"""
assert t.root.right is not None
assert t.root.right.parent == t.root
assert t.root.right.label == 10
t.put(3)
r"""
8
/ \
3 10
"""
assert t.root.left is not None
assert t.root.left.parent == t.root
assert t.root.left.label == 3
t.put(6)
r"""
8
/ \
3 10
\
6
"""
assert t.root.left.right is not None
assert t.root.left.right.parent == t.root.left
assert t.root.left.right.label == 6
t.put(1)
r"""
8
/ \
3 10
/ \
1 6
"""
assert t.root.left.left is not None
assert t.root.left.left.parent == t.root.left
assert t.root.left.left.label == 1
with pytest.raises(ValueError):
t.put(1)
def test_search(self) -> None:
t = self._get_binary_search_tree()
node = t.search(6)
assert node.label == 6
node = t.search(13)
assert node.label == 13
with pytest.raises(ValueError):
t.search(2)
def test_remove(self) -> None:
t = self._get_binary_search_tree()
t.remove(13)
r"""
8
/ \
3 10
/ \ \
1 6 14
/ \
4 7
\
5
"""
assert t.root is not None
assert t.root.right is not None
assert t.root.right.right is not None
assert t.root.right.right.right is None
assert t.root.right.right.left is None
t.remove(7)
r"""
8
/ \
3 10
/ \ \
1 6 14
/
4
\
5
"""
assert t.root.left is not None
assert t.root.left.right is not None
assert t.root.left.right.left is not None
assert t.root.left.right.right is None
assert t.root.left.right.left.label == 4
t.remove(6)
r"""
8
/ \
3 10
/ \ \
1 4 14
\
5
"""
assert t.root.left.left is not None
assert t.root.left.right.right is not None
assert t.root.left.left.label == 1
assert t.root.left.right.label == 4
assert t.root.left.right.right.label == 5
assert t.root.left.right.left is None
assert t.root.left.left.parent == t.root.left
assert t.root.left.right.parent == t.root.left
t.remove(3)
r"""
8
/ \
4 10
/ \ \
1 5 14
"""
assert t.root is not None
assert t.root.left.label == 4
assert t.root.left.right.label == 5
assert t.root.left.left.label == 1
assert t.root.left.parent == t.root
assert t.root.left.left.parent == t.root.left
assert t.root.left.right.parent == t.root.left
t.remove(4)
r"""
8
/ \
5 10
/ \
1 14
"""
assert t.root.left is not None
assert t.root.left.left is not None
assert t.root.left.label == 5
assert t.root.left.right is None
assert t.root.left.left.label == 1
assert t.root.left.parent == t.root
assert t.root.left.left.parent == t.root.left
def test_remove_2(self) -> None:
t = self._get_binary_search_tree()
t.remove(3)
r"""
8
/ \
4 10
/ \ \
1 6 14
/ \ /
5 7 13
"""
assert t.root is not None
assert t.root.left is not None
assert t.root.left.left is not None
assert t.root.left.right is not None
assert t.root.left.right.left is not None
assert t.root.left.right.right is not None
assert t.root.left.label == 4
assert t.root.left.right.label == 6
assert t.root.left.left.label == 1
assert t.root.left.right.right.label == 7
assert t.root.left.right.left.label == 5
assert t.root.left.parent == t.root
assert t.root.left.right.parent == t.root.left
assert t.root.left.left.parent == t.root.left
assert t.root.left.right.left.parent == t.root.left.right
def test_empty(self) -> None:
t = self._get_binary_search_tree()
t.empty()
assert t.root is None
def test_is_empty(self) -> None:
t = self._get_binary_search_tree()
assert not t.is_empty()
t.empty()
assert t.is_empty()
def test_exists(self) -> None:
t = self._get_binary_search_tree()
assert t.exists(6)
assert not t.exists(-1)
def test_get_max_label(self) -> None:
t = self._get_binary_search_tree()
assert t.get_max_label() == 14
t.empty()
with pytest.raises(ValueError):
t.get_max_label()
def test_get_min_label(self) -> None:
t = self._get_binary_search_tree()
assert t.get_min_label() == 1
t.empty()
with pytest.raises(ValueError):
t.get_min_label()
def test_inorder_traversal(self) -> None:
t = self._get_binary_search_tree()
inorder_traversal_nodes = [i.label for i in t.inorder_traversal()]
assert inorder_traversal_nodes == [1, 3, 4, 5, 6, 7, 8, 10, 13, 14]
def test_preorder_traversal(self) -> None:
t = self._get_binary_search_tree()
preorder_traversal_nodes = [i.label for i in t.preorder_traversal()]
assert preorder_traversal_nodes == [8, 3, 1, 6, 4, 5, 7, 10, 14, 13]
def binary_search_tree_example() -> None:
r"""
Example
8
/ \
3 10
/ \ \
1 6 14
/ \ /
4 7 13
\
5
Example After Deletion
4
/ \
1 7
\
5
"""
t = BinarySearchTree()
t.put(8)
t.put(3)
t.put(6)
t.put(1)
t.put(10)
t.put(14)
t.put(13)
t.put(4)
t.put(7)
t.put(5)
print(
"""
8
/ \\
3 10
/ \\ \\
1 6 14
/ \\ /
4 7 13
\\
5
"""
)
print("Label 6 exists:", t.exists(6))
print("Label 13 exists:", t.exists(13))
print("Label -1 exists:", t.exists(-1))
print("Label 12 exists:", t.exists(12))
# Prints all the elements of the list in inorder traversal
inorder_traversal_nodes = [i.label for i in t.inorder_traversal()]
print("Inorder traversal:", inorder_traversal_nodes)
# Prints all the elements of the list in preorder traversal
preorder_traversal_nodes = [i.label for i in t.preorder_traversal()]
print("Preorder traversal:", preorder_traversal_nodes)
print("Max. label:", t.get_max_label())
print("Min. label:", t.get_min_label())
# Delete elements
print("\nDeleting elements 13, 10, 8, 3, 6, 14")
print(
"""
4
/ \\
1 7
\\
5
"""
)
t.remove(13)
t.remove(10)
t.remove(8)
t.remove(3)
t.remove(6)
t.remove(14)
# Prints all the elements of the list in inorder traversal after delete
inorder_traversal_nodes = [i.label for i in t.inorder_traversal()]
print("Inorder traversal after delete:", inorder_traversal_nodes)
# Prints all the elements of the list in preorder traversal after delete
preorder_traversal_nodes = [i.label for i in t.preorder_traversal()]
print("Preorder traversal after delete:", preorder_traversal_nodes)
print("Max. label:", t.get_max_label())
print("Min. label:", t.get_min_label())
if __name__ == "__main__":
binary_search_tree_example()
| BinarySearchTreeTest |
python | jazzband__django-pipeline | tests/tests/test_storage.py | {
"start": 1065,
"end": 4914
} | class ____(TestCase):
def tearDown(self):
staticfiles_storage._setup()
@pipeline_settings(JS_COMPRESSOR=None, CSS_COMPRESSOR=None)
def test_post_process_dry_run(self):
default_collector.collect()
processed_files = PipelineStorage().post_process({}, True)
self.assertEqual(list(processed_files), [])
@pipeline_settings(
JS_COMPRESSOR=None,
CSS_COMPRESSOR=None,
COMPILERS=["tests.tests.test_storage.DummyCSSCompiler"],
)
def test_post_process(self):
default_collector.collect()
storage = PipelineStorage()
processed_files = storage.post_process({})
self.assertTrue(("screen.css", "screen.css", True) in processed_files)
self.assertTrue(("scripts.js", "scripts.js", True) in processed_files)
@override_settings(
STATICFILES_STORAGE="tests.tests.test_storage.PipelineNoPathStorage",
)
@pipeline_settings(
JS_COMPRESSOR=None,
CSS_COMPRESSOR=None,
COMPILERS=["tests.tests.test_storage.DummyCSSCompiler"],
)
def test_post_process_no_path(self):
"""
Test post_process with a storage that doesn't implement the path method.
"""
staticfiles_storage._setup()
try:
call_command("collectstatic", verbosity=0, interactive=False)
except NotImplementedError:
self.fail("Received an error running collectstatic")
@modify_settings(STATICFILES_FINDERS={"append": "pipeline.finders.PipelineFinder"})
def test_nonexistent_file_pipeline_finder(self):
path = finders.find("nothing.css")
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={"append": "pipeline.finders.PipelineFinder"})
def test_nonexistent_file_pipeline_finder_find_all(self):
if django.__version__ < "5.2":
self.skipTest("Only applicable to Django 5.2 and up")
path = finders.find("nothing.css", find_all=True)
self.assertIsNotNone(path)
self.assertEqual([], path)
@modify_settings(STATICFILES_FINDERS={"append": "pipeline.finders.PipelineFinder"})
def test_nonexistent_file_pipeline_finder_all(self):
if django.__version__ < "6.0":
self.skipTest("Only applicable to versions of Django before 6.0")
path = finders.find("nothing.css", all=True)
self.assertIsNotNone(path)
self.assertEqual([], path)
@modify_settings(
STATICFILES_FINDERS={"append": "pipeline.finders.CachedFileFinder"}
)
def test_nonexistent_file_cached_finder(self):
path = finders.find("nothing.css")
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={"append": "pipeline.finders.PipelineFinder"})
def test_nonexistent_double_extension_file_pipeline_finder(self):
path = finders.find("app.css.map")
self.assertIsNone(path)
@modify_settings(
STATICFILES_FINDERS={"append": "pipeline.finders.CachedFileFinder"}
)
def test_nonexistent_double_extension_file_cached_finder(self):
path = finders.find("app.css.map")
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={"append": "pipeline.finders.ManifestFinder"})
def test_manifest_finder_finds_stylesheet(self):
path = finders.find("screen.css")
self.assertIsNotNone(path)
path = finders.find("screen.scss")
self.assertIsNone(path)
@modify_settings(STATICFILES_FINDERS={"append": "pipeline.finders.ManifestFinder"})
def test_manifest_finder_finds_all_stylesheet(self):
paths = finders.find("screen.css", all=True)
self.assertIsNotNone(paths)
self.assertEqual(1, len(paths))
paths = finders.find("screen.scss", all=True)
self.assertIsNotNone(paths)
self.assertEqual([], paths)
| StorageTest |
python | huggingface__transformers | src/transformers/models/time_series_transformer/modeling_time_series_transformer.py | {
"start": 60178,
"end": 84621
} | class ____(TimeSeriesTransformerPreTrainedModel):
def __init__(self, config: TimeSeriesTransformerConfig):
super().__init__(config)
self.model = TimeSeriesTransformerModel(config)
if config.distribution_output == "student_t":
self.distribution_output = StudentTOutput(dim=config.input_size)
elif config.distribution_output == "normal":
self.distribution_output = NormalOutput(dim=config.input_size)
elif config.distribution_output == "negative_binomial":
self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
else:
raise ValueError(f"Unknown distribution output {config.distribution_output}")
self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
self.target_shape = self.distribution_output.event_shape
if config.loss == "nll":
self.loss = nll
else:
raise ValueError(f"Unknown loss function {config.loss}")
# Initialize weights of distribution_output and apply final processing
self.post_init()
def output_params(self, dec_output):
return self.parameter_projection(dec_output)
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
sliced_params = params
if trailing_n is not None:
sliced_params = [p[:, -trailing_n:] for p in params]
return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
past_time_features: torch.Tensor,
past_observed_mask: torch.Tensor,
static_categorical_features: Optional[torch.Tensor] = None,
static_real_features: Optional[torch.Tensor] = None,
future_values: Optional[torch.Tensor] = None,
future_time_features: Optional[torch.Tensor] = None,
future_observed_mask: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
use_cache: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[Seq2SeqTSModelOutput, tuple]:
r"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import TimeSeriesTransformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = TimeSeriesTransformerForPrediction.from_pretrained(
... "huggingface/time-series-transformer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if future_values is not None:
use_cache = False
outputs = self.model(
past_values=past_values,
past_time_features=past_time_features,
past_observed_mask=past_observed_mask,
static_categorical_features=static_categorical_features,
static_real_features=static_real_features,
future_values=future_values,
future_time_features=future_time_features,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
use_cache=use_cache,
return_dict=return_dict,
cache_position=cache_position,
)
prediction_loss = None
params = None
if future_values is not None:
params = self.output_params(outputs[0]) # outputs.last_hidden_state
# loc is 3rd last and scale is 2nd last output
distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
loss = self.loss(distribution, future_values)
if future_observed_mask is None:
future_observed_mask = torch.ones_like(future_values)
if len(self.target_shape) == 0:
loss_weights = future_observed_mask
else:
loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
prediction_loss = weighted_average(loss, weights=loss_weights)
if not return_dict:
outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:]
return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
return Seq2SeqTSPredictionOutput(
loss=prediction_loss,
params=params,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
loc=outputs.loc,
scale=outputs.scale,
static_features=outputs.static_features,
)
@torch.no_grad()
def generate(
self,
past_values: torch.Tensor,
past_time_features: torch.Tensor,
future_time_features: torch.Tensor,
past_observed_mask: Optional[torch.Tensor] = None,
static_categorical_features: Optional[torch.Tensor] = None,
static_real_features: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> SampleTSPredictionOutput:
r"""
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
"""
outputs = self(
static_categorical_features=static_categorical_features,
static_real_features=static_real_features,
past_time_features=past_time_features,
past_values=past_values,
past_observed_mask=past_observed_mask,
future_time_features=future_time_features,
future_values=None,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
use_cache=True,
)
decoder = self.model.get_decoder()
enc_last_hidden = outputs.encoder_last_hidden_state
loc = outputs.loc
scale = outputs.scale
static_feat = outputs.static_features
num_parallel_samples = self.config.num_parallel_samples
repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_past_values = (
past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
) / repeated_scale
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
future_samples = []
# greedy decoding
for k in range(self.config.prediction_length):
lagged_sequence = self.model.get_lagged_subsequences(
sequence=repeated_past_values,
subsequences_length=1 + k,
shift=1,
)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1)
dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
dec_last_hidden = dec_output.last_hidden_state
params = self.parameter_projection(dec_last_hidden[:, -1:])
distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
next_sample = distr.sample()
repeated_past_values = torch.cat(
(repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1
)
future_samples.append(next_sample)
concat_future_samples = torch.cat(future_samples, dim=1)
return SampleTSPredictionOutput(
sequences=concat_future_samples.reshape(
(-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
)
)
__all__ = ["TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel"]
| TimeSeriesTransformerForPrediction |
python | Unity-Technologies__ml-agents | ml-agents-envs/tests/simple_test_envs.py | {
"start": 10473,
"end": 19063
} | class ____(BaseEnv):
"""
The MultiAgentEnvironment maintains a list of SimpleEnvironment, one for each agent.
When sending DecisionSteps and TerminalSteps to the trainers, it first batches the
decision steps from the individual environments. When setting actions, it indexes the
batched ActionTuple to obtain the ActionTuple for individual agents
"""
def __init__(
self,
brain_names,
step_size=STEP_SIZE,
num_visual=0,
num_vector=1,
num_var_len=0,
vis_obs_size=VIS_OBS_SIZE,
vec_obs_size=OBS_SIZE,
var_len_obs_size=VAR_LEN_SIZE,
action_sizes=(1, 0),
num_agents=2,
goal_indices=None,
):
super().__init__()
self.envs = {}
self.dones = {}
self.just_died = set()
self.names = brain_names
self.final_rewards: Dict[str, List[float]] = {}
for name in brain_names:
self.final_rewards[name] = []
for i in range(num_agents):
name_and_num = name + str(i)
self.envs[name_and_num] = SimpleEnvironment(
[name],
step_size,
num_visual,
num_vector,
num_var_len,
vis_obs_size,
vec_obs_size,
var_len_obs_size,
action_sizes,
goal_indices,
)
self.dones[name_and_num] = False
self.envs[name_and_num].reset()
# All envs have the same behavior spec, so just get the last one.
self.behavior_spec = self.envs[name_and_num].behavior_spec
self.action_spec = self.envs[name_and_num].action_spec
self.num_agents = num_agents
self._side_channel_manager = SideChannelManager([])
@property
def all_done(self):
return all(self.dones.values())
@property
def behavior_specs(self):
behavior_dict = {}
for n in self.names:
behavior_dict[n] = self.behavior_spec
return BehaviorMapping(behavior_dict)
def set_action_for_agent(self, behavior_name, agent_id, action):
pass
def set_actions(self, behavior_name, action):
# The ActionTuple contains the actions for all n_agents. This
# slices the ActionTuple into an action tuple for each environment
# and sets it. The index j is used to ignore agents that have already
# reached done.
j = 0
for i in range(self.num_agents):
_act = ActionTuple()
name_and_num = behavior_name + str(i)
env = self.envs[name_and_num]
if not self.dones[name_and_num]:
if self.action_spec.continuous_size > 0:
_act.add_continuous(action.continuous[j : j + 1])
if self.action_spec.discrete_size > 0:
_disc_list = [action.discrete[j, :]]
_act.add_discrete(np.array(_disc_list))
j += 1
env.action[behavior_name] = _act
def get_steps(self, behavior_name):
# This gets the individual DecisionSteps and TerminalSteps
# from the envs and merges them into a batch to be sent
# to the AgentProcessor.
dec_vec_obs = []
dec_reward = []
dec_group_reward = []
dec_agent_id = []
dec_group_id = []
ter_vec_obs = []
ter_reward = []
ter_group_reward = []
ter_agent_id = []
ter_group_id = []
interrupted = []
action_mask = None
terminal_step = TerminalSteps.empty(self.behavior_spec)
decision_step = None
for i in range(self.num_agents):
name_and_num = behavior_name + str(i)
env = self.envs[name_and_num]
_dec, _term = env.step_result[behavior_name]
if not self.dones[name_and_num]:
dec_agent_id.append(i)
dec_group_id.append(1)
if len(dec_vec_obs) > 0:
for j, obs in enumerate(_dec.obs):
dec_vec_obs[j] = np.concatenate((dec_vec_obs[j], obs), axis=0)
else:
for obs in _dec.obs:
dec_vec_obs.append(obs)
dec_reward.append(_dec.reward[0])
dec_group_reward.append(_dec.group_reward[0])
if _dec.action_mask is not None:
if action_mask is None:
action_mask = []
if len(action_mask) > 0:
action_mask[0] = np.concatenate(
(action_mask[0], _dec.action_mask[0]), axis=0
)
else:
action_mask.append(_dec.action_mask[0])
if len(_term.reward) > 0 and name_and_num in self.just_died:
ter_agent_id.append(i)
ter_group_id.append(1)
if len(ter_vec_obs) > 0:
for j, obs in enumerate(_term.obs):
ter_vec_obs[j] = np.concatenate((ter_vec_obs[j], obs), axis=0)
else:
for obs in _term.obs:
ter_vec_obs.append(obs)
ter_reward.append(_term.reward[0])
ter_group_reward.append(_term.group_reward[0])
interrupted.append(False)
self.just_died.remove(name_and_num)
decision_step = DecisionSteps(
dec_vec_obs,
dec_reward,
dec_agent_id,
action_mask,
dec_group_id,
dec_group_reward,
)
terminal_step = TerminalSteps(
ter_vec_obs,
ter_reward,
interrupted,
ter_agent_id,
ter_group_id,
ter_group_reward,
)
if self.all_done:
decision_step = DecisionSteps([], [], [], [], [], [])
return (decision_step, terminal_step)
def step(self) -> None:
# Steps all environments and calls reset if all agents are done.
for name in self.names:
for i in range(self.num_agents):
name_and_num = name + str(i)
# Does not step the env if done
if not self.dones[name_and_num]:
env = self.envs[name_and_num]
# Reproducing part of env step to intercept Dones
assert all(action is not None for action in env.action.values())
done = env._take_action(name)
reward = env._compute_reward(name, done)
self.dones[name_and_num] = done
if done:
self.just_died.add(name_and_num)
if self.all_done:
env.step_result[name] = env._make_batched_step(
name, done, 0.0, reward
)
self.final_rewards[name].append(reward)
# self.reset()
elif done:
# This agent has finished but others are still running.
# This gives a reward of the time penalty if this agent
# is successful and the negative env reward if it fails.
ceil_reward = min(-TIME_PENALTY, reward)
env.step_result[name] = env._make_batched_step(
name, done, ceil_reward, 0.0
)
self.final_rewards[name].append(reward)
else:
env.step_result[name] = env._make_batched_step(
name, done, reward, 0.0
)
def reset(self) -> None: # type: ignore
for name in self.names:
for i in range(self.num_agents):
name_and_num = name + str(i)
self.dones[name_and_num] = False
self.dones = {}
self.just_died = set()
self.final_rewards = {}
for name in self.names:
self.final_rewards[name] = []
for i in range(self.num_agents):
name_and_num = name + str(i)
self.dones[name_and_num] = False
self.envs[name_and_num].reset()
@property
def reset_parameters(self) -> Dict[str, str]:
return {}
def close(self):
pass
| MultiAgentEnvironment |
python | getsentry__sentry-python | tests/test_client.py | {
"start": 40221,
"end": 47330
} | class ____:
"""
Tests for client reports related to spans.
"""
__test__ = False
@staticmethod
def span_dropper(spans_to_drop):
"""
Returns a function that can be used to drop spans from an event.
"""
def drop_spans(event, _):
event["spans"] = event["spans"][spans_to_drop:]
return event
return drop_spans
@staticmethod
def mock_transaction_event(span_count):
"""
Returns a mock transaction event with the given number of spans.
"""
return defaultdict(
mock.MagicMock,
type="transaction",
spans=[mock.MagicMock() for _ in range(span_count)],
)
def __init__(self, span_count):
"""Configures a test case with the number of spans dropped and whether the transaction was dropped."""
self.span_count = span_count
self.expected_record_lost_event_calls = Counter()
self.before_send = lambda event, _: event
self.event_processor = lambda event, _: event
def _update_resulting_calls(self, reason, drops_transactions=0, drops_spans=0):
"""
Updates the expected calls with the given resulting calls.
"""
if drops_transactions > 0:
self.expected_record_lost_event_calls[
(reason, "transaction", None, drops_transactions)
] += 1
if drops_spans > 0:
self.expected_record_lost_event_calls[
(reason, "span", None, drops_spans)
] += 1
def with_before_send(
self,
before_send,
*,
drops_transactions=0,
drops_spans=0,
):
self.before_send = before_send
self._update_resulting_calls(
"before_send",
drops_transactions,
drops_spans,
)
return self
def with_event_processor(
self,
event_processor,
*,
drops_transactions=0,
drops_spans=0,
):
self.event_processor = event_processor
self._update_resulting_calls(
"event_processor",
drops_transactions,
drops_spans,
)
return self
def run(self, sentry_init, capture_record_lost_event_calls):
"""Runs the test case with the configured parameters."""
sentry_init(before_send_transaction=self.before_send)
record_lost_event_calls = capture_record_lost_event_calls()
with sentry_sdk.isolation_scope() as scope:
scope.add_event_processor(self.event_processor)
event = self.mock_transaction_event(self.span_count)
sentry_sdk.get_client().capture_event(event, scope=scope)
# We use counters to ensure that the calls are made the expected number of times, disregarding order.
assert Counter(record_lost_event_calls) == self.expected_record_lost_event_calls
@pytest.mark.parametrize(
"test_config",
(
TestSpanClientReports(span_count=10), # No spans dropped
TestSpanClientReports(span_count=0).with_before_send(
lambda e, _: None,
drops_transactions=1,
drops_spans=1,
),
TestSpanClientReports(span_count=10).with_before_send(
lambda e, _: None,
drops_transactions=1,
drops_spans=11,
),
TestSpanClientReports(span_count=10).with_before_send(
TestSpanClientReports.span_dropper(3),
drops_spans=3,
),
TestSpanClientReports(span_count=10).with_before_send(
TestSpanClientReports.span_dropper(10),
drops_spans=10,
),
TestSpanClientReports(span_count=10).with_event_processor(
lambda e, _: None,
drops_transactions=1,
drops_spans=11,
),
TestSpanClientReports(span_count=10).with_event_processor(
TestSpanClientReports.span_dropper(3),
drops_spans=3,
),
TestSpanClientReports(span_count=10).with_event_processor(
TestSpanClientReports.span_dropper(10),
drops_spans=10,
),
TestSpanClientReports(span_count=10)
.with_event_processor(
TestSpanClientReports.span_dropper(3),
drops_spans=3,
)
.with_before_send(
TestSpanClientReports.span_dropper(5),
drops_spans=5,
),
TestSpanClientReports(10)
.with_event_processor(
TestSpanClientReports.span_dropper(3),
drops_spans=3,
)
.with_before_send(
lambda e, _: None,
drops_transactions=1,
drops_spans=8, # 3 of the 11 (incl. transaction) spans already dropped
),
),
)
def test_dropped_transaction(sentry_init, capture_record_lost_event_calls, test_config):
test_config.run(sentry_init, capture_record_lost_event_calls)
@pytest.mark.parametrize("enable_tracing", [True, False])
def test_enable_tracing_deprecated(sentry_init, enable_tracing):
with pytest.warns(DeprecationWarning):
sentry_init(enable_tracing=enable_tracing)
def make_options_transport_cls():
"""Make an options transport class that captures the options passed to it."""
# We need a unique class for each test so that the options are not
# shared between tests.
class OptionsTransport(Transport):
"""Transport that captures the options passed to it."""
def __init__(self, options):
super().__init__(options)
type(self).options = options
def capture_envelope(self, _):
pass
return OptionsTransport
@contextlib.contextmanager
def clear_env_var(name):
"""Helper to clear the a given environment variable,
and restore it to its original value on exit."""
old_value = os.environ.pop(name, None)
try:
yield
finally:
if old_value is not None:
os.environ[name] = old_value
elif name in os.environ:
del os.environ[name]
@pytest.mark.parametrize(
("env_value", "arg_value", "expected_value"),
[
(None, None, False), # default
("0", None, False), # env var false
("1", None, True), # env var true
(None, False, False), # arg false
(None, True, True), # arg true
# Argument overrides environment variable
("0", True, True), # env false, arg true
("1", False, False), # env true, arg false
],
)
def test_keep_alive(env_value, arg_value, expected_value):
transport_cls = make_options_transport_cls()
keep_alive_kwarg = {} if arg_value is None else {"keep_alive": arg_value}
with clear_env_var("SENTRY_KEEP_ALIVE"):
if env_value is not None:
os.environ["SENTRY_KEEP_ALIVE"] = env_value
sentry_sdk.init(
dsn="http://foo@sentry.io/123",
transport=transport_cls,
**keep_alive_kwarg,
)
assert transport_cls.options["keep_alive"] is expected_value
| TestSpanClientReports |
python | coleifer__peewee | examples/blog/app.py | {
"start": 4857,
"end": 9501
} | class ____(FTSModel):
content = TextField()
class Meta:
database = database
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get('logged_in'):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.path))
return inner
@app.route('/login/', methods=['GET', 'POST'])
def login():
next_url = request.args.get('next') or request.form.get('next')
if request.method == 'POST' and request.form.get('password'):
password = request.form.get('password')
# TODO: If using a one-way hash, you would also hash the user-submitted
# password and do the comparison on the hashed versions.
if password == app.config['ADMIN_PASSWORD']:
session['logged_in'] = True
session.permanent = True # Use cookie to store session.
flash('You are now logged in.', 'success')
return redirect(next_url or url_for('index'))
else:
flash('Incorrect password.', 'danger')
return render_template('login.html', next_url=next_url)
@app.route('/logout/', methods=['GET', 'POST'])
def logout():
if request.method == 'POST':
session.clear()
return redirect(url_for('login'))
return render_template('logout.html')
@app.route('/')
def index():
search_query = request.args.get('q')
if search_query:
query = Entry.search(search_query)
else:
query = Entry.public().order_by(Entry.timestamp.desc())
# The `object_list` helper will take a base query and then handle
# paginating the results if there are more than 20. For more info see
# the docs:
# http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#object_list
return object_list(
'index.html',
query,
search=search_query,
check_bounds=False)
def _create_or_edit(entry, template):
if request.method == 'POST':
entry.title = request.form.get('title') or ''
entry.content = request.form.get('content') or ''
entry.published = request.form.get('published') or False
if not (entry.title and entry.content):
flash('Title and Content are required.', 'danger')
else:
# Wrap the call to save in a transaction so we can roll it back
# cleanly in the event of an integrity error.
try:
with database.atomic():
entry.save()
except IntegrityError:
flash('Error: this title is already in use.', 'danger')
else:
flash('Entry saved successfully.', 'success')
if entry.published:
return redirect(url_for('detail', slug=entry.slug))
else:
return redirect(url_for('edit', slug=entry.slug))
return render_template(template, entry=entry)
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
return _create_or_edit(Entry(title='', content=''), 'create.html')
@app.route('/drafts/')
@login_required
def drafts():
query = Entry.drafts().order_by(Entry.timestamp.desc())
return object_list('index.html', query, check_bounds=False)
@app.route('/<slug>/')
def detail(slug):
if session.get('logged_in'):
query = Entry.select()
else:
query = Entry.public()
entry = get_object_or_404(query, Entry.slug == slug)
return render_template('detail.html', entry=entry)
@app.route('/<slug>/edit/', methods=['GET', 'POST'])
@login_required
def edit(slug):
entry = get_object_or_404(Entry, Entry.slug == slug)
return _create_or_edit(entry, 'edit.html')
@app.template_filter('clean_querystring')
def clean_querystring(request_args, *keys_to_remove, **new_values):
# We'll use this template filter in the pagination include. This filter
# will take the current URL and allow us to preserve the arguments in the
# querystring while replacing any that we need to overwrite. For instance
# if your URL is /?q=search+query&page=2 and we want to preserve the search
# term but make a link to page 3, this filter will allow us to do that.
querystring = dict((key, value) for key, value in request_args.items())
for key in keys_to_remove:
querystring.pop(key, None)
querystring.update(new_values)
return urllib.urlencode(querystring)
@app.errorhandler(404)
def not_found(exc):
return Response('<h3>Not found</h3>'), 404
def main():
database.create_tables([Entry, FTSEntry], safe=True)
app.run(debug=True)
if __name__ == '__main__':
main()
| FTSEntry |
python | joerick__pyinstrument | pyinstrument/session.py | {
"start": 508,
"end": 7705
} | class ____:
def __init__(
self,
frame_records: list[FrameRecordType],
start_time: float,
duration: float,
min_interval: float,
max_interval: float,
sample_count: int,
start_call_stack: list[str],
target_description: str,
cpu_time: float,
sys_path: list[str],
sys_prefixes: list[str],
):
"""Session()
Represents a profile session, contains the data collected during a profile session.
:meta private:
"""
self.frame_records = frame_records
self.start_time = start_time
self.duration = duration
self.min_interval = min_interval
self.max_interval = max_interval
self.sample_count = sample_count
self.start_call_stack = start_call_stack
self.target_description = target_description
self.cpu_time = cpu_time
self.sys_path = sys_path
self.sys_prefixes = sys_prefixes
self._short_file_path_cache = {}
@staticmethod
def load(filename: PathOrStr) -> Session:
"""
Load a previously saved session from disk.
:param filename: The path to load from.
:rtype: Session
"""
with open(filename) as f:
return Session.from_json(json.load(f))
def save(self, filename: PathOrStr) -> None:
"""
Saves a Session object to disk, in a JSON format.
:param filename: The path to save to. Using the ``.pyisession`` extension is recommended.
"""
with open(filename, "w") as f:
json.dump(self.to_json(), f)
def to_json(self, include_frame_records: bool = True):
result: dict[str, Any] = {
"start_time": self.start_time,
"duration": self.duration,
"min_interval": self.min_interval,
"max_interval": self.max_interval,
"sample_count": self.sample_count,
"start_call_stack": self.start_call_stack,
"target_description": self.target_description,
"cpu_time": self.cpu_time,
"sys_path": self.sys_path,
"sys_prefixes": self.sys_prefixes,
}
if include_frame_records:
result["frame_records"] = self.frame_records
return result
@staticmethod
def from_json(json_dict: dict[str, Any]):
return Session(
frame_records=json_dict["frame_records"],
start_time=json_dict["start_time"],
min_interval=json_dict.get("min_interval", 0.001),
max_interval=json_dict.get("max_interval", 0.001),
duration=json_dict["duration"],
sample_count=json_dict["sample_count"],
start_call_stack=json_dict["start_call_stack"],
target_description=json_dict["target_description"],
cpu_time=json_dict["cpu_time"] or 0,
sys_path=json_dict.get("sys_path", sys.path),
sys_prefixes=json_dict.get("sys_prefixes", Session.current_sys_prefixes()),
)
@staticmethod
def combine(session1: Session, session2: Session) -> Session:
"""
Combines two :class:`Session` objects.
Sessions that are joined in this way probably shouldn't be interpreted
as timelines, because the samples are simply concatenated. But
aggregate views (the default) of this data will work.
:rtype: Session
"""
if session1.start_time > session2.start_time:
# swap them around so that session1 is the first one
session1, session2 = session2, session1
return Session(
frame_records=session1.frame_records + session2.frame_records,
start_time=session1.start_time,
min_interval=min(session1.min_interval, session2.min_interval),
max_interval=max(session1.max_interval, session2.max_interval),
duration=session1.duration + session2.duration,
sample_count=session1.sample_count + session2.sample_count,
start_call_stack=session1.start_call_stack,
target_description=session1.target_description,
cpu_time=session1.cpu_time + session2.cpu_time,
sys_path=(
session1.sys_path + [p for p in session2.sys_path if p not in session1.sys_path]
),
sys_prefixes=list(set([*session1.sys_prefixes, *session2.sys_prefixes])),
)
@staticmethod
def current_sys_prefixes() -> list[str]:
return [sys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix]
def root_frame(self, trim_stem: bool = True) -> Frame | None:
"""
Parses the internal frame records and returns a tree of :class:`Frame`
objects. This object can be rendered using a :class:`Renderer`
object.
:rtype: A :class:`Frame` object, or None if the session is empty.
"""
root_frame = build_frame_tree(self.frame_records, context=self)
if root_frame is None:
return None
if trim_stem:
root_frame = self._trim_stem(root_frame)
return root_frame
def _trim_stem(self, frame: Frame):
# trim the start of the tree before any branches.
# we also don't want to trim beyond the call to profiler.start()
start_stack = deque(frame_info_get_identifier(info) for info in self.start_call_stack)
if start_stack.popleft() != frame.identifier:
# the frame doesn't match where the profiler was started. Don't trim.
return frame
while frame.total_self_time == 0 and len(frame.children) == 1:
# check child matches the start_call_stack, otherwise stop descending
if len(start_stack) == 0 or frame.children[0].identifier != start_stack.popleft():
break
frame = frame.children[0]
frame.remove_from_parent()
return frame
_short_file_path_cache: dict[str, str]
def shorten_path(self, path: str) -> str:
"""
Shorten a path to a more readable form, relative to sys_path. Used by
Frame.short_file_path.
"""
if path in self._short_file_path_cache:
return self._short_file_path_cache[path]
result = path
# if os.sep doesn't appear, probably not a file path at all, more
# likely <built-in> or similar
if len(path.split(os.sep)) > 1:
for sys_path_entry in self.sys_path:
# On Windows, if path and sys_path_entry are on
# different drives, relpath will result in exception,
# because it cannot compute a relpath in this case.
# The root cause is that on Windows, there is no root
# dir like '/' on Linux.
try:
candidate = os.path.relpath(path, sys_path_entry)
except ValueError:
continue
if not result or (len(candidate.split(os.sep)) < len(result.split(os.sep))):
result = candidate
self._short_file_path_cache[path] = result
return result
| Session |
python | numpy__numpy | numpy/_core/tests/test_cpu_features.py | {
"start": 13110,
"end": 13502
} | class ____(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = {"VSX2": "ARCH_2_07", "VSX3": "ARCH_3_00", "VSX4": "ARCH_3_1"}
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
| Test_POWER_Features |
python | ray-project__ray | python/ray/tests/horovod/horovod_example.py | {
"start": 488,
"end": 7150
} | class ____(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_fn(
data_dir=None,
seed=42,
use_cuda=False,
batch_size=64,
use_adasum=False,
lr=0.01,
momentum=0.5,
num_epochs=10,
log_interval=10,
):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(seed)
if use_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
data_dir = data_dir or "./data"
with FileLock(os.path.expanduser("~/.horovod_lock")):
train_dataset = datasets.MNIST(
data_dir,
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank()
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler, **kwargs
)
model = Net()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not use_adasum else 1
if use_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=momentum)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
op=hvd.Adasum if use_adasum else hvd.Average,
)
for epoch in range(1, num_epochs + 1):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_loader):
if use_cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# Horovod: use train_sampler to determine the number of
# examples in this worker's partition.
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_sampler),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
def main(
num_workers, use_gpu, timeout_s=30, placement_group_timeout_s=100, kwargs=None
):
kwargs = kwargs or {}
if use_gpu:
kwargs["use_cuda"] = True
settings = RayExecutor.create_settings(
timeout_s=timeout_s, placement_group_timeout_s=placement_group_timeout_s
)
executor = RayExecutor(settings, use_gpu=use_gpu, num_workers=num_workers)
executor.start()
executor.run(train_fn, kwargs=kwargs)
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch MNIST Example",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--num-epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)",
)
parser.add_argument(
"--use-cuda", action="store_true", default=False, help="enables CUDA training"
)
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction",
)
parser.add_argument(
"--num-workers",
type=int,
default=4,
help="Number of Ray workers to use for training.",
)
parser.add_argument(
"--data-dir",
help="location of the training dataset in the local filesystem ("
"will be downloaded if needed)",
)
parser.add_argument(
"--address",
required=False,
type=str,
default=None,
help="Address of Ray cluster.",
)
args = parser.parse_args()
import ray
if args.address:
ray.init(args.address)
else:
ray.init()
kwargs = {
"data_dir": args.data_dir,
"seed": args.seed,
"use_cuda": args.use_cuda if args.use_cuda else False,
"batch_size": args.batch_size,
"use_adasum": args.use_adasum if args.use_adasum else False,
"lr": args.lr,
"momentum": args.momentum,
"num_epochs": args.num_epochs,
"log_interval": args.log_interval,
}
main(
num_workers=args.num_workers,
use_gpu=args.use_cuda if args.use_cuda else False,
kwargs=kwargs,
)
| Net |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 979,
"end": 1024
} | class ____(Class4[T_contra]): ...
| Class4_Child1 |
python | PyCQA__pylint | tests/functional/b/bugfix_local_scope_metaclass_1177.py | {
"start": 136,
"end": 165
} | class ____(type):
pass
| Meta |
python | pytorch__pytorch | test/cpp_extensions/torch_stable_test_extension/setup.py | {
"start": 271,
"end": 1826
} | class ____(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove extension
for path in (ROOT_DIR / "torch_stable_test").glob("**/*.so"):
path.unlink()
# Remove build and dist and egg-info directories
dirs = [
ROOT_DIR / "build",
ROOT_DIR / "dist",
ROOT_DIR / "torch_stable_test.egg-info",
]
for path in dirs:
if path.exists():
shutil.rmtree(str(path), ignore_errors=True)
def get_extension():
extra_compile_args = {
"cxx": ["-fdiagnostics-color=always", "-DTORCH_STABLE_ONLY"],
}
sources = list(CSRC_DIR.glob("**/*.cpp"))
return [
CppExtension(
"torch_stable_test._C",
sources=sorted(str(s) for s in sources),
py_limited_api=True,
extra_compile_args=extra_compile_args,
extra_link_args=[],
)
]
setup(
name="torch_stable_test",
version="0.0",
author="PyTorch Core Team",
description="Test extension to verify TORCH_STABLE_ONLY flag",
packages=find_packages(exclude=("test",)),
package_data={"torch_stable_test": ["*.dll", "*.dylib", "*.so"]},
install_requires=[
"torch",
],
ext_modules=get_extension(),
cmdclass={
"build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
"clean": clean,
},
options={"bdist_wheel": {"py_limited_api": "cp39"}},
)
| clean |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels34.py | {
"start": 315,
"end": 1983
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels34.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48497792, 48499712]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"custom": [
{
"value": "=Sheet1!$D$1",
"font": {
"bold": 1,
"italic": 1,
"color": "red",
"baseline": -1,
},
}
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | django/db/models/fields/related_descriptors.py | {
"start": 15160,
"end": 17501
} | class ____(ForwardManyToOneDescriptor):
"""
Accessor to the related object on the forward side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance.
"""
def get_object(self, instance):
if self.field.remote_field.parent_link:
deferred = instance.get_deferred_fields()
# Because it's a parent link, all the data is available in the
# instance, so populate the parent model with this data.
rel_model = self.field.remote_field.model
fields = [field.attname for field in rel_model._meta.concrete_fields]
# If any of the related model's fields are deferred, fallback to
# fetching all fields from the related model. This avoids a query
# on the related model for every deferred field.
if not any(field in fields for field in deferred):
kwargs = {field: getattr(instance, field) for field in fields}
obj = rel_model(**kwargs)
obj._state.adding = instance._state.adding
obj._state.db = instance._state.db
obj._state.fetch_mode = instance._state.fetch_mode
return obj
return super().get_object(instance)
def __set__(self, instance, value):
super().__set__(instance, value)
# If the primary key is a link to a parent model and a parent instance
# is being set, update the value of the inherited pk(s).
if self.field.primary_key and self.field.remote_field.parent_link:
opts = instance._meta
# Inherited primary key fields from this object's base classes.
inherited_pk_fields = [
field
for field in opts.concrete_fields
if field.primary_key and field.remote_field
]
for field in inherited_pk_fields:
rel_model_pk_name = field.remote_field.model._meta.pk.attname
raw_value = (
getattr(value, rel_model_pk_name) if value is not None else None
)
setattr(instance, rel_model_pk_name, raw_value)
| ForwardOneToOneDescriptor |
python | numba__numba | numba/core/event.py | {
"start": 5998,
"end": 7104
} | class ____(Listener):
"""A listener that measures the total time spent between *START* and
*END* events during the time this listener is active.
"""
def __init__(self):
self._depth = 0
def on_start(self, event):
if self._depth == 0:
self._ts = timer()
self._depth += 1
def on_end(self, event):
self._depth -= 1
if self._depth == 0:
last = getattr(self, "_duration", 0)
self._duration = (timer() - self._ts) + last
@property
def done(self):
"""Returns a ``bool`` indicating whether a measurement has been made.
When this returns ``False``, the matching event has never fired.
If and only if this returns ``True``, ``.duration`` can be read without
error.
"""
return hasattr(self, "_duration")
@property
def duration(self):
"""Returns the measured duration.
This may raise ``AttributeError``. Users can use ``.done`` to check
that a measurement has been made.
"""
return self._duration
| TimingListener |
python | huggingface__transformers | src/transformers/models/owlvit/modeling_owlvit.py | {
"start": 2178,
"end": 6806
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of
[`OwlViTVisionModel`].
text_model_output (tuple[`BaseModelOutputWithPooling`]):
The output of the [`OwlViTTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`OwlViTVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
# Copied from transformers.loss.loss_for_object_detection._upcast
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
# Copied from transformers.loss.loss_for_object_detection.box_area
def box_area(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
Args:
boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
< x2` and `0 <= y1 < y2`.
Returns:
`torch.FloatTensor`: a tensor containing the area for each box.
"""
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# Copied from transformers.loss.loss_for_object_detection.box_iou
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
# Copied from transformers.loss.loss_for_object_detection.generalized_box_iou
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
Returns:
`torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
iou, union = box_iou(boxes1, boxes2)
top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
area = width_height[:, :, 0] * width_height[:, :, 1]
return iou - (area - union) / area
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`OwlViTForObjectDetection`].
"""
)
| OwlViTOutput |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.