language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | celery__celery | t/unit/security/case.py | {
"start": 16,
"end": 109
} | class ____:
def setup_method(self):
pytest.importorskip('cryptography')
| SecurityCase |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 3814,
"end": 4453
} | class ____(RequestHandler):
def get(self) -> None:
# set Content-Encoding manually to avoid automatic gzip encoding
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Encoding", "gzip")
# Triggering the potential bug seems to depend on input length.
# This length is taken from the bad-response example reported in
# https://github.com/tornadoweb/tornado/pull/2875 (uncompressed).
text = "".join(f"Hello World {i}\n" for i in range(9000))[:149051]
body = gzip.compress(text.encode(), compresslevel=6) + b"\00"
self.write(body)
| InvalidGzipHandler |
python | eth-brownie__brownie | brownie/test/managers/runner.py | {
"start": 19163,
"end": 21034
} | class ____(PytestBrownieRunner):
"""
Brownie plugin xdist worker hooks.
Hooks in this class are loaded on worker processes when using xdist.
"""
def __init__(self, config, project):
self.workerid = int("".join(i for i in config.workerinput["workerid"] if i.isdigit()))
# network ID is passed to the worker via `pytest_configure_node` in the master
network_id = config.workerinput["network"] or CONFIG.settings["networks"]["default"]
CONFIG.networks[network_id]["cmd_settings"]["port"] += self.workerid
super().__init__(config, project)
def pytest_collection_modifyitems(self, items):
"""
Called after collection has been performed, may filter or re-order the
items in-place.
If any tests do not use the `module_isolation` fixture, all tests are
discarded. This in turn causes `PytestBrownieMaster.pytest_sessionfinish`
to raise an exception notifying the user that xdist may only be used
when tests are properly isolated.
Arguments
---------
items : List[_pytest.nodes.Item]
List of item objects representing the collected tests
"""
if next((i for i in items if "module_isolation" not in i.fixturenames), False):
items.clear()
return True
super().pytest_collection_modifyitems(items)
def pytest_sessionfinish(self):
"""
Called after whole test run finished, right before returning the exit
status to the system.
Stores test results in `build/tests-{workerid}.json`. Each of these files
is then aggregated in `PytestBrownieMaster.pytest_sessionfinish`.
"""
self.tests = keyfilter(self.results.__contains__, self.tests)
self._sessionfinish(f"tests-{self.workerid}.json")
| PytestBrownieXdistRunner |
python | pytorch__pytorch | torch/utils/checkpoint.py | {
"start": 42565,
"end": 45819
} | class ____(RuntimeError):
pass
def _get_debug_context_and_cb() -> Tuple[Callable[[], Any], Callable[[CheckpointError], None]]:
# This function returns the context_fn and error_cb to be used by the
# checkpointing mechanism. error_cb is invoked when an error is detected
# during unpack.
# record_context_cpp is not support on non-linux non-x86_64 platforms
cpp_tb = platform.machine() == 'x86_64' and platform.system() == 'Linux'
class CaptureLogs:
def __init__(self) -> None:
self.logs = None
self.tbs = None
def get_context_manager(self):
@contextlib.contextmanager
def logging_mode():
with LoggingTensorMode(), \
capture_logs(True, python_tb=True, script_tb=True, cpp_tb=cpp_tb) as logs_and_tb:
# pyrefly: ignore [bad-assignment]
self.logs, self.tbs = logs_and_tb
yield logs_and_tb
return logging_mode()
capture_logs_fwd = CaptureLogs()
capture_logs_recompute = CaptureLogs()
def unpack_error_cb(e: CheckpointError) -> NoReturn:
def get_str_tb(label, capture_logs):
out = ""
total_len = len(capture_logs.logs)
for i, (log, tb) in enumerate(zip(capture_logs.logs, capture_logs.tbs, strict=False)):
out += f"{log} ({i + 1} of {total_len} in {label})\n\n"
found_torch_dispatch = False
for line in tb:
# Start printing stack trace only after __torch_dispatch__ is found
is_torch_dispatch = line['name'] == '__torch_dispatch__'
if not found_torch_dispatch and not is_torch_dispatch:
continue
elif is_torch_dispatch:
found_torch_dispatch = True
continue
out += f"{line['filename']}:{line['line']}:{line['name']}\n"
out += "\n\n"
return out
if capture_logs_fwd.logs is None:
raise AssertionError("capture_logs_fwd.logs is None")
if capture_logs_recompute.logs is None:
raise AssertionError("capture_logs_recompute.logs is None")
raise CheckpointError(
_checkpoint_error_template.format(
forward_traces=get_str_tb("original", capture_logs_fwd),
recompute_traces=get_str_tb("recompute", capture_logs_recompute),
forward_ops="\n".join(capture_logs_fwd.logs),
recompute_ops="\n".join(capture_logs_recompute.logs)
)
) from e
def context_fn():
return capture_logs_fwd.get_context_manager(), capture_logs_recompute.get_context_manager()
return context_fn, unpack_error_cb
def _default_meta_extractor(x: torch.Tensor) -> Dict[str, Any]:
# These properties are fast to check, easy to understand
return {
"shape": x.shape,
"dtype": x.dtype,
"device": x.device
}
_allowed_determinism_checks_to_fns: Dict[str, Callable[[torch.Tensor], Any]] = {
_DEFAULT_DETERMINISM_MODE: _default_meta_extractor,
"none": lambda _: None,
}
# See Rule 5
| CheckpointError |
python | getsentry__sentry | tests/snuba/sessions/test_sessions.py | {
"start": 34732,
"end": 41070
} | class ____(TestCase, BaseMetricsTestCase):
"""
TestClass that tests that `get_current_and_previous_crash_free_rates` returns the correct
`currentCrashFreeRate` and `previousCrashFreeRate` for each project
TestData:
Project 1:
In the last 24h -> 2 Exited Sessions / 2 Total Sessions -> 100% Crash free rate
In the previous 24h (>24h & <48h) -> 2 Exited + 1 Crashed Sessions / 3 Sessions -> 66.7%
Project 2:
In the last 24h -> 1 Exited + 1 Crashed / 2 Total Sessions -> 50% Crash free rate
In the previous 24h (>24h & <48h) -> 0 Sessions -> None
Project 3:
In the last 24h -> 0 Sessions -> None
In the previous 24h (>24h & <48h) -> 4 Exited + 1 Crashed / 5 Total Sessions -> 80%
"""
backend = MetricsReleaseHealthBackend()
def setUp(self) -> None:
super().setUp()
self.session_started = time.time() // 60 * 60
self.session_started_gt_24_lt_48 = self.session_started - 30 * 60 * 60
self.project2 = self.create_project(
name="Bar2",
slug="bar2",
teams=[self.team],
fire_project_created=True,
organization=self.organization,
)
self.project3 = self.create_project(
name="Bar3",
slug="bar3",
teams=[self.team],
fire_project_created=True,
organization=self.organization,
)
# Project 1
for _ in range(0, 2):
self.store_session(
self.build_session(
**{
"project_id": self.project.id,
"org_id": self.project.organization_id,
"status": "exited",
}
)
)
for idx in range(0, 3):
status = "exited"
if idx == 2:
status = "crashed"
self.store_session(
self.build_session(
**{
"project_id": self.project.id,
"org_id": self.project.organization_id,
"status": status,
"started": self.session_started_gt_24_lt_48,
}
)
)
# Project 2
for i in range(0, 2):
status = "exited"
if i == 1:
status = "crashed"
self.store_session(
self.build_session(
**{
"project_id": self.project2.id,
"org_id": self.project2.organization_id,
"status": status,
}
)
)
# Project 3
for i in range(0, 5):
status = "exited"
if i == 4:
status = "crashed"
self.store_session(
self.build_session(
**{
"project_id": self.project3.id,
"org_id": self.project3.organization_id,
"status": status,
"started": self.session_started_gt_24_lt_48,
}
)
)
def test_get_current_and_previous_crash_free_rates(self) -> None:
now = timezone.now().replace(minute=15, second=23)
last_24h_start = now - 24 * timedelta(hours=1)
last_48h_start = now - 2 * 24 * timedelta(hours=1)
data = self.backend.get_current_and_previous_crash_free_rates(
org_id=self.organization.id,
project_ids=[self.project.id, self.project2.id, self.project3.id],
current_start=last_24h_start,
current_end=now,
previous_start=last_48h_start,
previous_end=last_24h_start,
rollup=3600,
)
assert data == {
self.project.id: {
"currentCrashFreeRate": 100,
"previousCrashFreeRate": 66.66666666666667,
},
self.project2.id: {"currentCrashFreeRate": 50.0, "previousCrashFreeRate": None},
self.project3.id: {"currentCrashFreeRate": None, "previousCrashFreeRate": 80.0},
}
def test_get_current_and_previous_crash_free_rates_with_zero_sessions(self) -> None:
now = timezone.now().replace(minute=15, second=23)
last_48h_start = now - 2 * 24 * timedelta(hours=1)
last_72h_start = now - 3 * 24 * timedelta(hours=1)
last_96h_start = now - 4 * 24 * timedelta(hours=1)
data = self.backend.get_current_and_previous_crash_free_rates(
org_id=self.organization.id,
project_ids=[self.project.id],
current_start=last_72h_start,
current_end=last_48h_start,
previous_start=last_96h_start,
previous_end=last_72h_start,
rollup=3600,
)
assert data == {
self.project.id: {
"currentCrashFreeRate": None,
"previousCrashFreeRate": None,
},
}
def test_extract_crash_free_rate_from_result_groups(self) -> None:
result_groups = [
{"by": {"project_id": 1}, "totals": {"rate": 0.66}},
{"by": {"project_id": 2}, "totals": {"rate": 0.8}},
]
crash_free_rates = self.backend._extract_crash_free_rates_from_result_groups(result_groups)
assert crash_free_rates[1] == 0.66 * 100
assert crash_free_rates[2] == 0.8 * 100
def test_extract_crash_free_rate_from_result_groups_with_none(self) -> None:
result_groups = [
{"by": {"project_id": 1}, "totals": {"rate": 0.66}},
{"by": {"project_id": 2}, "totals": {"rate": None}},
]
crash_free_rates = self.backend._extract_crash_free_rates_from_result_groups(result_groups)
assert crash_free_rates[1] == 0.66 * 100
assert crash_free_rates[2] is None
def test_extract_crash_free_rates_from_result_groups_only_none(self) -> None:
result_groups = [
{"by": {"project_id": 2}, "totals": {"rate": None}},
]
crash_free_rates = self.backend._extract_crash_free_rates_from_result_groups(result_groups)
assert crash_free_rates[2] is None
| GetCrashFreeRateTestCase |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 27458,
"end": 27967
} | class ____(BaseFormatter):
"""A PNG formatter.
To define the callables that compute the PNG representation of your
objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be raw PNG data, *not*
base64 encoded.
"""
format_type = Unicode('image/png')
print_method = ObjectName('_repr_png_')
_return_type = (bytes, str)
| PNGFormatter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchClass1.py | {
"start": 12155,
"end": 12226
} | class ____(Generic[T]):
__match_args__ = ("x",)
x: list[T]
| ClassE |
python | anthropics__anthropic-sdk-python | src/anthropic/types/raw_message_start_event.py | {
"start": 225,
"end": 321
} | class ____(BaseModel):
message: Message
type: Literal["message_start"]
| RawMessageStartEvent |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 93401,
"end": 95418
} | class ____(Response):
"""
Response of projects.get_hyperparam_values endpoint.
:param total: Total number of distinct parameter values
:type total: int
:param values: The list of the unique values for the parameter
:type values: Sequence[str]
"""
_service = "projects"
_action = "get_hyperparam_values"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"total": {
"description": "Total number of distinct parameter values",
"type": ["integer", "null"],
},
"values": {
"description": "The list of the unique values for the parameter",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, total: Optional[int] = None, values: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetHyperparamValuesResponse, self).__init__(**kwargs)
self.total = total
self.values = values
@schema_property("total")
def total(self) -> Optional[int]:
return self._property_total
@total.setter
def total(self, value: Optional[int]) -> None:
if value is None:
self._property_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total", six.integer_types)
self._property_total = value
@schema_property("values")
def values(self) -> Optional[List[str]]:
return self._property_values
@values.setter
def values(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_values = None
return
self.assert_isinstance(value, "values", (list, tuple))
self.assert_isinstance(value, "values", six.string_types, is_array=True)
self._property_values = value
| GetHyperparamValuesResponse |
python | networkx__networkx | networkx/algorithms/tests/test_euler.py | {
"start": 5889,
"end": 6429
} | class ____:
def testfind_path_start(self):
find_path_start = nx.algorithms.euler._find_path_start
# Test digraphs return correct starting node.
G = nx.path_graph(6, create_using=nx.DiGraph)
assert find_path_start(G) == 0
edges = [(0, 1), (1, 2), (2, 0), (4, 0)]
assert find_path_start(nx.DiGraph(edges)) == 4
# Test graph with no Eulerian path return None.
edges = [(0, 1), (1, 2), (2, 3), (2, 4)]
assert find_path_start(nx.DiGraph(edges)) is None
| TestFindPathStart |
python | readthedocs__readthedocs.org | readthedocs/core/admin.py | {
"start": 2323,
"end": 4170
} | class ____(ExtraSimpleHistoryAdmin, UserAdminImpersonateMixin, UserAdmin):
"""Admin configuration for User."""
list_display = (
"username",
"email",
"first_name",
"last_name",
"is_staff",
"is_banned",
)
list_filter = (UserProjectFilter,) + UserAdmin.list_filter
actions = ["ban_user", "sync_remote_repositories_action"]
inlines = [UserProjectInline]
# Open a new tab when impersonating a user.
open_new_window = True
@admin.display(
description="Banned",
boolean=True,
)
def is_banned(self, obj):
return hasattr(obj, "profile") and obj.profile.banned
@admin.action(description="Ban user")
def ban_user(self, request, queryset):
users = []
for profile in UserProfile.objects.filter(user__in=queryset):
profile.banned = True
profile.save()
users.append(profile.user.username)
self.message_user(request, "Banned users: %s" % ", ".join(users))
@admin.action(description="Sync remote repositories")
def sync_remote_repositories_action(self, request, queryset):
formatted_task_urls = []
for user_id, username in queryset.values_list("id", "username"):
result = sync_remote_repositories.delay(user_id=user_id)
job_status_url = reverse("api_job_status", kwargs={"task_id": result.task_id})
formatted_task_urls.append(
format_html("<a href='{}'>{} task</a>", job_status_url, username)
)
self.message_user(
request,
mark_safe(
"Following sync remote repository tasks were triggered: {}".format(
", ".join(formatted_task_urls)
)
),
)
@admin.register(UserProfile)
| UserAdminExtra |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 3711,
"end": 3923
} | class ____(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
class MPTTMeta:
left_attr = "testing"
| NewStyleMPTTMeta |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/ValueLabel.py | {
"start": 116,
"end": 3501
} | class ____(QtWidgets.QLabel):
"""
QLabel specifically for displaying numerical values.
Extends QLabel adding some extra functionality:
- displaying units with si prefix
- built-in exponential averaging
"""
def __init__(self, parent=None, suffix='', siPrefix=False, averageTime=0, formatStr=None, suffixPower=1):
"""
============== ==================================================================================
**Arguments:**
suffix (str or None) The suffix to place after the value
siPrefix (bool) Whether to add an SI prefix to the units and display a scaled value
averageTime (float) The length of time in seconds to average values. If this value
is 0, then no averaging is performed. As this value increases
the display value will appear to change more slowly and smoothly.
formatStr (str) Optionally, provide a format string to use when displaying text. The text
will be generated by calling formatStr.format(value=, avgValue=, suffix=)
(see Python documentation on str.format)
This option is not compatible with siPrefix
suffixPower (int/float) The power to which the suffix is raised. For example, if suffix='m²',
the suffixPower should be 2. This ensures correct scaling when using SI prefixes.
Supports positive, negative and non-integral powers. Ignored if siPrefix is False.
Note: The power only affects the scaling, not the suffix itself. For example, with
suffix='m' and suffixPower=2, the displayed suffix will still be 'm'.
============== ==================================================================================
"""
QtWidgets.QLabel.__init__(self, parent)
self.values = []
self.averageTime = averageTime ## no averaging by default
self.suffix = suffix
self.siPrefix = siPrefix
if formatStr is None:
formatStr = '{avgValue:0.2g} {suffix}'
self.formatStr = formatStr
self.suffixPower = suffixPower
def setValue(self, value):
now = perf_counter()
self.values.append((now, value))
cutoff = now - self.averageTime
while len(self.values) > 0 and self.values[0][0] < cutoff:
self.values.pop(0)
self.update()
def setFormatStr(self, text):
self.formatStr = text
self.update()
def setAverageTime(self, t):
self.averageTime = t
def averageValue(self):
return sum(v[1] for v in self.values) / float(len(self.values))
def paintEvent(self, ev):
self.setText(self.generateText())
return super().paintEvent(ev)
def generateText(self):
if len(self.values) == 0:
return ''
avg = self.averageValue()
val = self.values[-1][1]
if self.siPrefix:
return fn.siFormat(avg, suffix=self.suffix, power=self.suffixPower)
else:
return self.formatStr.format(value=val, avgValue=avg, suffix=self.suffix)
| ValueLabel |
python | doocs__leetcode | solution/1000-1099/1041.Robot Bounded In Circle/Solution.py | {
"start": 0,
"end": 373
} | class ____:
def isRobotBounded(self, instructions: str) -> bool:
k = 0
dist = [0] * 4
for c in instructions:
if c == 'L':
k = (k + 1) % 4
elif c == 'R':
k = (k + 3) % 4
else:
dist[k] += 1
return (dist[0] == dist[2] and dist[1] == dist[3]) or k != 0
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy.py | {
"start": 9807,
"end": 11538
} | class ____(
CollectiveAllReduceStrategy,
metaclass=_CollectiveAllReduceStrategyExperimentalMeta,
):
__doc__ = CollectiveAllReduceStrategy.__doc__
@deprecation.deprecated(
None, "use distribute.MultiWorkerMirroredStrategy instead"
)
def __init__(
self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None,
):
"""Creates the strategy.
Args:
communication: optional
`tf.distribute.experimental.CommunicationImplementation`. This is a hint
on the preferred collective communication implementation. Possible
values include `AUTO`, `RING`, and `NCCL`.
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
"""
communication_options = collective_util.Options(
implementation=communication
)
super(_CollectiveAllReduceStrategyExperimental, self).__init__(
cluster_resolver, communication_options
)
@classmethod
def _from_local_devices(
cls,
devices,
communication=collective_util.CommunicationImplementation.AUTO,
):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication)
# pylint: disable=protected-access
obj.extended._initialize_local(
tfconfig_cluster_resolver.TFConfigClusterResolver(), devices=devices
)
return obj
_CollectiveAllReduceStrategyExperimental.__name__ = (
CollectiveAllReduceStrategy.__name__
)
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"]) # pylint: disable=missing-docstring
| _CollectiveAllReduceStrategyExperimental |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-postgresml/llama_index/indices/managed/postgresml/retriever.py | {
"start": 499,
"end": 3348
} | class ____(BaseRetriever):
"""
PostgresML Retriever.
Args:
index (PostgresMLIndex): the PostgresML Index
"""
def __init__(
self,
index: PostgresMLIndex,
callback_manager: Optional[CallbackManager] = None,
pgml_query: Optional[Dict[str, Any]] = None,
limit: Optional[int] = 5,
rerank: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
"""Initialize params."""
self._index = index
self._pgml_query = pgml_query
self._limit = limit
self._rerank = rerank
super().__init__(callback_manager)
def _retrieve(
self,
query_bundle: Optional[QueryBundle] = None,
**kwargs: Any,
) -> List[NodeWithScore]:
return run_async_tasks([self._aretrieve(query_bundle, **kwargs)])[0]
async def _aretrieve(
self,
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
async def do_vector_search():
if self._pgml_query:
return await self._index.collection.vector_search(
self._pgml_query,
self._index.pipeline,
)
else:
if not query_bundle:
raise Exception(
"Must provide either query or query_bundle to retrieve and aretrieve"
)
if self._rerank is not None:
self._rerank = self._rerank | {"query": query_bundle.query_str}
return await self._index.collection.vector_search(
{
"query": {
"fields": {
"content": {
"query": query_bundle.query_str,
"parameters": {"prompt": "query: "},
}
}
},
"rerank": self._rerank,
"limit": self._limit,
},
self._index.pipeline,
)
results = await do_vector_search()
return [
NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["score"],
)
if self._rerank is None
else NodeWithScore(
node=TextNode(
id_=r["document"]["id"],
text=r["chunk"],
metadata=r["document"]["metadata"],
),
score=r["rerank_score"],
)
for r in results
]
| PostgresMLRetriever |
python | astropy__astropy | astropy/units/tests/test_quantity.py | {
"start": 32268,
"end": 64278
} | class ____:
scalarintq = u.Quantity(1, unit="m", dtype=int)
scalarfloatq = u.Quantity(1.3, unit="m")
arrq = u.Quantity([1, 2.3, 8.9], unit="m")
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1.0, unit="m-1")
q3 = u.Quantity(1, unit="m-1", dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, ".2f") == "3.14"
assert f"{q1:cds}" == "3.14"
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, "02d") == "01 m"
assert format(self.scalarfloatq, ".1f") == "1.3 m"
assert format(self.scalarfloatq, ".0f") == "1 m"
assert f"{self.scalarintq:cds}" == "1 m"
assert f"{self.scalarfloatq:cds}" == "1.3 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.0).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + ">")
def test_to_string(self):
qscalar = u.Quantity(1.5e14, "m/s")
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = "Quantity as KMS: 150000000000.0 km / s"
assert f"Quantity as KMS: {qscalar.to_string(unit=u.km / u.s)}" == res
# With precision set
res = "Quantity as KMS: 1.500e+11 km / s"
assert (
f"Quantity as KMS: {qscalar.to_string(precision=3, unit=u.km / u.s)}" == res
)
# Precision set + formatter (precision should be overwritten)
res = "2e+11 km / s"
assert (
f"{qscalar.to_string(precision=3, formatter='.0e', unit=u.km / u.s)}" == res
)
# Invalid format
with pytest.raises(ValueError):
qscalar.to_string(format="test")
res = r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex") == res
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
assert qscalar.to_string(format="latex", subfmt="display") == res
res = r"$1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline") == res
assert qscalar.to_string(format="latex_inline", subfmt="inline") == res
res = r"$\displaystyle 1.5 \times 10^{14} \; \mathrm{m\,s^{-1}}$"
assert qscalar.to_string(format="latex_inline", subfmt="display") == res
res = "[0 1 2] (Unit not initialised)"
assert np.arange(3).view(u.Quantity).to_string() == res
@pytest.mark.parametrize(
"quant, input_unit, format_spec, expected_result",
[
pytest.param(
u.Quantity(1.5e14, "m/s"),
None,
".2e",
"1.50e+14 m / s",
id="scientific_notation",
),
pytest.param(
u.Quantity(0.123, "m/s"),
None,
"0.3f",
"0.123 m / s",
id="float_format",
),
pytest.param(
u.Quantity(0.000123, "km/s"),
"m/s",
".2e",
"1.23e-01 m / s",
id="scientific_notation_with_zero",
),
pytest.param(
u.Quantity(1.23456789e15, "m/s"),
None,
".2e",
"1.23e+15 m / s",
id="scientific_notation_large_number",
),
pytest.param(
u.Quantity(123, "m"),
None,
">10",
" 123.0 m",
id="right_aligned",
),
pytest.param(
u.Quantity(123, "m"),
"km",
"=+10",
"+ 0.123 km",
id="sign_alignment_positive",
),
pytest.param(
u.Quantity(-123, "m"),
"cm",
"=+10",
"- 12300.0 cm",
id="sign_alignment_negative",
),
pytest.param(
u.Quantity(123, "m"),
None,
"^10",
" 123.0 m",
id="center_alignment",
),
pytest.param(
u.Quantity(123, "m"),
None,
"<10",
"123.0 m",
id="left_aligned",
),
pytest.param(
u.Quantity(123, "m"),
None,
"010",
"00000123.0 m",
id="zero_padding",
),
pytest.param(
u.Quantity(1234567, "m"),
None,
",",
"1,234,567.0 m",
id="thousands_separator",
),
pytest.param(
u.Quantity(137000000, "lyr"),
None,
">+30,.2e",
" +1.37e+08 lyr",
id="large_number_complex_format",
),
pytest.param(
u.Quantity(1234567, "m"),
None,
"_",
"1_234_567.0 m",
id="custom_separator",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
".2f",
"2.50-1.20j",
id="complex_number_float_format",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
".2e",
"2.50e+00-1.20e+00j",
id="complex_number_scientific_notation",
),
pytest.param(
u.Quantity(2012, "m/s"),
None,
None,
"2012.0 m / s",
id="default_format",
),
],
)
def test_format_spec(self, quant, input_unit, format_spec, expected_result):
assert (
quant.to_string(formatter=format_spec, unit=input_unit) == expected_result
)
@pytest.mark.parametrize(
"quant, input_unit, format_spec, format, expected_result",
[
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
None,
"latex",
r"$(2.5-1.2i) \; \mathrm{}$",
id="complex_number_latex_default",
),
pytest.param(
u.Quantity(1.2e3, "m"),
None,
None,
"latex",
r"$1200 \; \mathrm{m}$",
id="complex_number_latex_default",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
"+.2f",
"latex",
r"$(+2.50-1.20i) \; \mathrm{}$",
id="complex_number_latex_positive_format",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
"-.2f",
"latex",
r"$(2.50-1.20i) \; \mathrm{}$",
id="complex_number_latex_negative_format",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
">+20.5f",
"latex",
r"$(+2.50000-1.20000i) \; \mathrm{}$",
id="complex_number_latex_positive_alignment",
),
pytest.param(
u.Quantity(137000000, "lyr"),
None,
">+30,.2e",
"latex",
r"$+1.37 \times 10^{8} \; \mathrm{lyr}$",
id="large_number_latex_complex_format",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
" .2f",
"latex",
r"$( 2.50-1.20i) \; \mathrm{}$",
id="complex_number_latex_space_format",
),
pytest.param(
u.Quantity(1.23456789e15, "m/s"),
None,
".3e",
"latex",
r"$1.235 \times 10^{15} \; \mathrm{\frac{m}{s}}$",
id="scientific_notation_latex_format",
),
pytest.param(
u.Quantity(123.456, "km/s"),
None,
".2f",
"latex",
r"$123.46 \; \mathrm{\frac{km}{s}}$",
id="float_latex_format",
),
pytest.param(
u.Quantity(123.456, "m/s"),
None,
".2f",
"latex_inline",
r"$123.46 \; \mathrm{m\,s^{-1}}$",
id="inline_latex_format",
),
pytest.param(
u.Quantity(123.456, "m/s"),
None,
".3e",
"latex_inline",
r"$1.235 \times 10^{2} \; \mathrm{m\,s^{-1}}$",
id="scientific_notation_inline_latex_format",
),
pytest.param(
u.Quantity(1239999123, "m/s"),
None,
None,
"latex",
r"$1.2399991 \times 10^{9} \; \mathrm{\frac{m}{s}}$",
id="default_exponential_latex_format",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
None,
None,
"latex",
r"$(2.5-1.2i) \; \mathrm{}$",
id="default_complex_latex_format",
),
],
)
def test_format_spec_latex(
self, quant, input_unit, format_spec, format, expected_result
):
assert (
quant.to_string(formatter=format_spec, format=format, unit=input_unit)
== expected_result
)
@pytest.mark.parametrize(
"quant, formatter, expected_result",
[
pytest.param(
1.2345 * u.kg,
lambda x: f"{float(x):.2f}",
r"1.23 kg",
id="explicit_formatting",
),
pytest.param(
35.0 * u.lyr,
{
"float": lambda x: f"{float(x):.1f}",
"int": lambda x: f"{float(x):.3f}",
},
r"35.0 lyr",
id="dictionary_formatters",
),
],
)
def test_formatter(self, quant, formatter, expected_result):
result = quant.to_string(formatter=formatter)
assert result == expected_result
@pytest.mark.parametrize(
"quant, formatter, format, expected_result",
[
pytest.param(
35.0 * u.lyr,
{"all": lambda x: f"{float(x):.3f}"},
"latex",
r"$35.000 \; \mathrm{lyr}$",
id="dictionary_formatters_latex",
),
pytest.param(
1.2345 * u.kg,
lambda x: f"{float(x):.2f}",
"latex",
r"$1.23 \; \mathrm{kg}$",
id="numerical_formatting_latex",
),
pytest.param(
35 * u.km / u.s,
lambda x: f"\\approx {float(x):.1f}",
"latex",
r"$\approx 35.0 \; \mathrm{\frac{km}{s}}$",
id="complex_formatting_latex",
),
pytest.param(
u.Quantity(2.5 - 1.2j),
lambda x: f"({x.real:.2f}{x.imag:+.1f}j)",
"latex",
r"$(2.50-1.2j) \; \mathrm{}$",
id="complex_custom_formatting_latex",
),
],
)
def test_formatter_latex(self, quant, formatter, format, expected_result):
result = quant.to_string(formatter=formatter, format=format)
assert result == expected_result
@pytest.mark.parametrize("format_spec", ["b", "o", "x", "c", "s"])
def test_format_spec_prohibition(self, format_spec):
qscalar = u.Quantity(123, "m")
with pytest.raises(ValueError):
qscalar.to_string(formatter=format_spec)
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, "m/s")
assert self.scalarintq._repr_latex_() == r"$1 \; \mathrm{m}$"
assert self.scalarfloatq._repr_latex_() == r"$1.3 \; \mathrm{m}$"
assert (
q2scalar._repr_latex_() == r"$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$"
)
assert self.arrq._repr_latex_() == r"$[1,~2.3,~8.9] \; \mathrm{m}$"
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r"$(1+2i) \; \mathrm{}$"
assert (
self.scalar_big_complex_q._repr_latex_()
== r"$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$"
)
assert (
self.scalar_big_neg_complex_q._repr_latex_()
== r"$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$"
)
assert self.arr_complex_q._repr_latex_() == (
r"$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),"
r"~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$"
)
assert r"\dots" in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100) * u.m
qbig = np.arange(1000) * u.m
qvbig = np.arange(10000) * 1e9 * u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, "m/s")
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert (
q._repr_latex_() == r"$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
)
assert (
qa._repr_latex_()
== r"$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$"
)
np.set_printoptions(precision=2)
assert q._repr_latex_() == r"$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$"
assert qa._repr_latex_() == r"$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$"
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r"\dots" not in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" not in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r"\dots" in lsmed
lsbig = qbig._repr_latex_()
assert r"\dots" in lsbig
lsvbig = qvbig._repr_latex_()
assert r"\dots" in lsvbig
assert lsvbig.endswith(",~1 \\times 10^{13}] \\; \\mathrm{m}$")
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r"$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$"
@pytest.mark.parametrize(
"q, expected",
[
pytest.param(0 * u.imperial.deg_R, r"$0\mathrm{{}^{\circ}R}$", id="deg_R"),
pytest.param(5 * u.imperial.deg_F, r"$5\mathrm{{}^{\circ}F}$", id="deg_F"),
pytest.param(10 * u.deg_C, r"$10\mathrm{{}^{\circ}C}$", id="deg_C"),
pytest.param(20 * u.deg, r"$20\mathrm{{}^{\circ}}$", id="deg"),
pytest.param(30 * u.arcmin, r"$30\mathrm{{}^{\prime}}$", id="arcmin"),
pytest.param(40 * u.arcsec, r"$40\mathrm{{}^{\prime\prime}}$", id="arcsec"),
pytest.param(50 * u.hourangle, r"$50\mathrm{{}^{h}}$", id="hourangle"),
],
)
def test_repr_latex_superscript_units(self, q, expected):
# see https://github.com/astropy/astropy/issues/14385
assert q._repr_latex_() == expected
assert q.to_string(format="latex") == expected
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s**-2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2.0 * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2.0 * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantities with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correctly for non-arrays.
qsecnotarray = u.Quantity(10.0, u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, numbers.Integral)
a = np.array(
[(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)],
dtype=[("x", float), ("y", float), ("z", float)],
)
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc["x"]
assert np.all(qkpcx.value == a["x"])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc["x"][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]["x"]
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1.0, 2.0, 3.0]) * u.m
assert q[0] == 1.0 * u.m
assert np.all(q[0:2] == u.Quantity([1.0, 2.0], u.m))
def test_array_setslice():
q = np.array([1.0, 2.0, 3.0]) * u.m
q[1:2] = np.array([400.0]) * u.cm
assert np.all(q == np.array([1.0, 4.0, 3.0]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4.0, u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
with pytest.warns(
AstropyDeprecationWarning,
match=(
"^divisions involving a unit and a 'str' instance are deprecated since "
r"v7\.1\. Convert 's' to a unit explicitly\.$"
),
):
q1 = 1.0 * u.m / "s"
assert q1.value == 1
assert q1.unit == (u.m / u.s)
with pytest.warns(
AstropyDeprecationWarning,
match=(
"^products involving a unit and a 'str' instance are deprecated since "
r"v7\.1\. Convert 'm' to a unit explicitly\.$"
),
):
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
def test_quantity_invalid_unit_string():
with (
pytest.raises(ValueError),
pytest.warns(AstropyDeprecationWarning, match="^products involving .* a 'str'"),
):
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert "centimeter" in attrs
assert "cm" in attrs
assert "parsec" in attrs
assert "foo" in attrs
assert "to" in attrs
assert "value" in attrs
# Something from the base class, object
assert "__setattr__" in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert np.iterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not np.iterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order="F")
assert q3.flags["F_CONTIGUOUS"]
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order="C")
assert q4.flags["C_CONTIGUOUS"]
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1.0, 2.0, 3.0]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10.0 * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity("1")
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.0
q = u.Quantity("1.5 m/s")
assert q.unit == u.m / u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit("1.5 m/s")
q = u.Quantity(".5 m")
assert q == u.Quantity(0.5, u.m)
q = u.Quantity("-1e1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("-1e+1km")
assert q == u.Quantity(-10, u.km)
q = u.Quantity("+.5km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("+5e-1km")
assert q == u.Quantity(0.5, u.km)
q = u.Quantity("5", u.m)
assert q == u.Quantity(5.0, u.m)
q = u.Quantity("5 km", u.m)
assert q.value == 5000.0
assert q.unit == u.m
q = u.Quantity("5Em")
assert q == u.Quantity(5.0, u.Em)
with pytest.raises(TypeError):
u.Quantity("")
with pytest.raises(TypeError):
u.Quantity("m")
with pytest.raises(TypeError):
u.Quantity("1.2.3 deg")
with pytest.raises(TypeError):
u.Quantity("1+deg")
with pytest.raises(TypeError):
u.Quantity("1-2deg")
with pytest.raises(TypeError):
u.Quantity("1.2e-13.3m")
with pytest.raises(TypeError):
u.Quantity(["5"])
with pytest.raises(TypeError):
u.Quantity(np.array(["5"]))
with pytest.raises(ValueError):
u.Quantity("5E")
with pytest.raises(ValueError):
u.Quantity("5 foo")
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
def test_quantity_tuple_power():
with pytest.raises(ValueError):
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.0
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == "f"
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=["a", "b"])
t["a"].unit = u.kpc
qa = u.Quantity(t["a"])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t["a"])
qb = u.Quantity(t["b"])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t["b"])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t["a"], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t["a"] * 1000)
qbp = u.Quantity(t["b"], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t["b"])
# Also check with a function unit (regression test for gh-8430)
t["a"].unit = u.dex(u.cm / u.s**2)
fq = u.Dex(t["a"])
assert fq.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq.value, t["a"])
fq2 = u.Quantity(t["a"], subok=True)
assert isinstance(fq2, u.Dex)
assert fq2.unit == u.dex(u.cm / u.s**2)
assert_array_equal(fq2.value, t["a"])
with pytest.raises(u.UnitTypeError):
u.Quantity(t["a"])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Column, Table
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.0), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t["x"] = np.arange(10) * u.mm
t["y"] = np.ones(10) * u.mm
assert type(t["x"]) is Column
xy = np.vstack([t["x"], t["y"]]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t["x"][ii].unit
# should not raise anything
xy[ii, 0] = t["x"][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == "f"
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2], [10, 20], [3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2], [3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
assert repr(a) == "array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)"
assert str(a) == "[<Quantity 1. m> <Quantity 2. s>]"
| TestQuantityDisplay |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 3610,
"end": 4180
} | class ____(Protocol):
"""protocol representing a :pep:`249` database connection.
.. versionadded:: 2.0
.. seealso::
`Connection Objects <https://www.python.org/dev/peps/pep-0249/#connection-objects>`_
- in :pep:`249`
""" # noqa: E501
def close(self) -> None: ...
def commit(self) -> None: ...
def cursor(self, *args: Any, **kwargs: Any) -> DBAPICursor: ...
def rollback(self) -> None: ...
def __getattr__(self, key: str) -> Any: ...
def __setattr__(self, key: str, value: Any) -> None: ...
| DBAPIConnection |
python | huggingface__transformers | tests/models/xglm/test_modeling_xglm.py | {
"start": 13165,
"end": 20784
} | class ____(unittest.TestCase):
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
cleanup(torch_device, gc_collect=True)
def _test_lm_generate_xglm_helper(
self,
gradient_checkpointing=False,
verify_outputs=True,
):
model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M")
if gradient_checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(torch_device)
input_ids = torch.tensor([[2, 268, 9865]], dtype=torch.long, device=torch_device) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581, 72616, 5, 984] # fmt: skip
output_ids = model.generate(input_ids, do_sample=False, num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
@slow
def test_batch_generation(self):
model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M")
model.to(torch_device)
tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M")
tokenizer.padding_side = "left"
# use different length sentences to test batching
sentences = [
"This is an extremely long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
outputs = model.generate(
input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), max_new_tokens=12
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded, max_new_tokens=12)
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_new_tokens=12)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
# fmt: off
expected_output_sentences = Expectations(
{
("xpu", None): [
'This is an extremely long sentence that only exists to test the ability of the model to cope with left-padding, such as in batched generation. The output for the sequence below should be the same regardless of whether left padding is applied or not. When left padding is applied, the model will not be able',
'Hello, my dog is a little bit of a shy one, but he is very friendly'
],
("cuda", None): [
"This is an extremely long sentence that only exists to test the ability of the model to cope with left-padding, such as in batched generation. The output for the sequence below should be the same regardless of whether left padding is applied or not. When left padding is applied, the sequence will be a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
],
}
)
# fmt: on
expected_output_sentence = expected_output_sentences.get_expectation()
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_lm_generate_xglm(self):
self._test_lm_generate_xglm_helper()
@slow
def test_lm_generate_xglm_with_gradient_checkpointing(self):
self._test_lm_generate_xglm_helper(gradient_checkpointing=True)
@slow
def test_xglm_sample(self):
tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M")
model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M")
torch.manual_seed(0)
tokenized = tokenizer("Today is a nice day and", return_tensors="pt")
input_ids = tokenized.input_ids
output_ids = model.generate(input_ids, do_sample=True, num_beams=1)
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)
if is_torch_greater_or_equal("2.7.0"):
cuda_expectation = (
"Today is a nice day and the sun is shining. A nice day with warm rainy and windy weather today."
)
else:
cuda_expectation = "Today is a nice day and the water is still cold. We just stopped off for some fresh coffee. This place looks like a"
expected_output_strings = Expectations(
{
("xpu", None): "Today is a nice day and the sun is shining. A nice day with warm rainy and windy weather today.",
("rocm", (9, 5)): "Today is a nice day and the sun is shining. A nice day with warm rainy and windy weather today.",
("cuda", None): cuda_expectation,
}
) # fmt: skip
EXPECTED_OUTPUT_STR = expected_output_strings.get_expectation()
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
@require_torch_accelerator
@require_torch_fp16
def test_batched_nan_fp16(self):
model_name = "facebook/xglm-564M"
tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left")
model = XGLMForCausalLM.from_pretrained(model_name, dtype=torch.float16, use_cache=True).to(torch_device)
model = model.eval()
batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt")
input_ids = batch["input_ids"].to(torch_device)
attention_mask = batch["attention_mask"].to(torch_device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask)
self.assertFalse(
torch.isnan(outputs.logits[0]).any().item()
) # the first logits could contain NaNs if it fails
@slow
def test_loss_with_padding(self):
tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M")
model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M")
model.to(torch_device)
tokenizer.padding_side = "right"
sequence = "Sequence"
tokenized_non_padded = tokenizer(sequence, return_tensors="pt")
tokenized_non_padded.to(torch_device)
labels_non_padded = tokenized_non_padded.input_ids.clone()
loss_non_padded = model(**tokenized_non_padded, labels=labels_non_padded).loss
tokenized_padded = tokenizer(sequence, padding="max_length", max_length=16, return_tensors="pt")
tokenized_padded.to(torch_device)
labels_padded = tokenized_padded.input_ids.clone()
labels_padded[labels_padded == tokenizer.pad_token_id] = -100
loss_padded = model(**tokenized_padded, labels=labels_padded).loss
torch.testing.assert_close(loss_non_padded, loss_padded, rtol=1e-3, atol=1e-3)
| XGLMModelLanguageGenerationTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/ingestion/data_sinks.py | {
"start": 582,
"end": 4065
} | class ____(Enum):
@classmethod
def from_component(
cls, component: BasePydanticVectorStore
) -> "ConfigurableComponent":
component_class = type(component)
for component_type in cls:
if component_type.value.component_type == component_class:
return component_type
raise ValueError(
f"Component {component} is not a supported data sink component."
)
def build_configured_data_sink(
self, component: BasePydanticVectorStore
) -> "ConfiguredDataSink":
component_type = self.value.component_type
if not isinstance(component, component_type):
raise ValueError(
f"The enum value {self} is not compatible with component of "
f"type {type(component)}"
)
return ConfiguredDataSink[component_type]( # type: ignore
component=component, name=self.value.name
)
def build_conifurable_data_sink_enum() -> ConfigurableComponent:
"""
Build an enum of configurable data sinks.
But conditional on if the corresponding vector store is available.
"""
enum_members = []
try:
from llama_index.vector_stores.chroma import (
ChromaVectorStore,
) # pants: no-infer-dep
enum_members.append(
(
"CHROMA",
DataSink(
name="Chroma",
component_type=ChromaVectorStore,
),
)
)
except (ImportError, ValidationError):
pass
try:
from llama_index.vector_stores.pinecone import (
PineconeVectorStore,
) # pants: no-infer-dep
enum_members.append(
(
"PINECONE",
DataSink(
name="Pinecone",
component_type=PineconeVectorStore,
),
)
)
except (ImportError, ValidationError):
pass
try:
from llama_index.vector_stores.postgres import (
PGVectorStore,
) # pants: no-infer-dep
enum_members.append(
(
"POSTGRES",
DataSink(
name="PostgreSQL",
component_type=PGVectorStore,
),
)
)
except (ImportError, ValidationError):
pass
try:
from llama_index.vector_stores.qdrant import (
QdrantVectorStore,
) # pants: no-infer-dep
enum_members.append(
(
"QDRANT",
DataSink(
name="Qdrant",
component_type=QdrantVectorStore,
),
)
)
except (ImportError, ValidationError):
pass
try:
from llama_index.vector_stores.weaviate import (
WeaviateVectorStore,
) # pants: no-infer-dep
enum_members.append(
(
"WEAVIATE",
DataSink(
name="Weaviate",
component_type=WeaviateVectorStore,
),
)
)
except (ImportError, ValidationError):
pass
return ConfigurableComponent("ConfigurableDataSinks", enum_members) # type: ignore
ConfigurableDataSinks = build_conifurable_data_sink_enum()
T = TypeVar("T", bound=BasePydanticVectorStore)
| ConfigurableComponent |
python | pytorch__pytorch | torch/_inductor/config.py | {
"start": 83706,
"end": 88045
} | class ____:
# master switch for all debugging flags below
enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
# save real tensors
save_real_tensors = os.environ.get("TORCH_COMPILE_DEBUG_SAVE_REAL", "0") == "1"
# Save debug information to a temporary directory
# If not specified, a temp directory will be created by system
debug_dir: Optional[str] = None
# Save python logger call >=logging.DEBUG
debug_log = False
# Save python logger call >=logging.INFO
info_log = False
# Save input FX graph (post decomps, pre optimization)
fx_graph = True
# Save FX graph after transformations
fx_graph_transformed = True
# Save TorchInductor IR before fusion pass
ir_pre_fusion = True
# Save TorchInductor IR after fusion pass
ir_post_fusion = True
# Copy generated code to trace dir
output_code = True
# SVG figure showing post-fusion graph
graph_diagram = os.environ.get("INDUCTOR_POST_FUSION_SVG", "0") == "1"
# SVG figure showing fx with fusion
draw_orig_fx_graph = os.environ.get("INDUCTOR_ORIG_FX_SVG", "0") == "1"
# We draw our fx graphs with the "record" shape attribute by default.
# Sometimes, when the graph is very complex, we may hit dot errors like below:
# "flat edge between adjacent nodes one of which has a record shape -
# replace records with HTML-like labels"
# and thus fail to generate a graph. So, let's give the user an option
# to specify the shape attribute for the dot graph. For example, passing
# INDUCTOR_DOT_GRAPH_SHAPE_SVG = "none" would let us generate HTML-like labels
# to workaround the above failure.
dot_graph_shape = os.environ.get("INDUCTOR_DOT_GRAPH_SHAPE_SVG", None)
# If not None, this is the URL that saves the SVG files of the input/output
# graph of each pass that changed the graph
# The nodes that are being transformed in each pass will be colored in yellow
# URL only supports local directory for now
log_url_for_graph_xform = os.environ.get("INDUCTOR_LOG_URL_FOR_GRAPH_XFORM", None)
# Store cProfile (see snakeviz to view)
compile_profile = False
# Upload the .tar.gz file
# Needs to be overridden based on specific environment needs
upload_tar: Optional[Callable[[str], None]] = None
log_autotuning_results = os.environ.get("LOG_AUTOTUNE_RESULTS", "0") == "1"
# Save mapping info from inductor generated kernel to post_grad/pre_grad fx nodes
# Levels:
# 0 - disabled (default)
# 1 - normal
# 2 - basic
# Backward compatibility:
# If TORCH_COMPILE_DEBUG=1, level is set to at least 1.
# If INDUCTOR_PROVENANCE is set, use its integer value.
provenance_tracking_level: int = int(
os.environ.get(
"INDUCTOR_PROVENANCE", os.environ.get("TORCH_COMPILE_DEBUG", "0")
)
)
_save_config_ignore: list[str] = [
# workaround: "Can't pickle <function ...>"
"trace.upload_tar",
"joint_custom_pre_pass",
"joint_custom_post_pass",
"pre_grad_custom_pass",
"aot_inductor.repro_level",
"aot_inductor.dump_aoti_minifier",
"post_grad_custom_pre_pass",
"post_grad_custom_post_pass",
"_fuse_ddp_communication_passes",
"_pre_fusion_custom_pass",
]
_cache_config_ignore_prefix: list[str] = [
# trace functions are not relevant to config caching
"trace",
# uses absolute path
"cuda.cutlass_dir",
# not relevant
"worker_start_method",
"compile_threads",
# see CustomGraphPass; these are handled specially
"post_grad_custom_post_pass",
"post_grad_custom_pre_pass",
"joint_custom_pre_pass",
"joint_custom_post_pass",
"_fuse_ddp_communication_passes",
"_pre_fusion_custom_pass",
# tests assume that changes here don't invalidate cache
"always_complex_memory_overlap_TESTING_ONLY",
# cache related options are not relevant to cache results
"fx_graph_cache",
"fx_graph_remote_cache",
"autotune_local_cache",
"autotune_remote_cache",
]
# External callable for matmul tuning candidates
external_matmul: list[Callable[[torch.Tensor, torch.Tensor, torch.Tensor], None]] = []
write_are_deterministic_algorithms_enabled = (
os.getenv("TORCHINDUCTOR_WRITE_ARE_DETERMINISTIC_ALGORITHMS_ENABLED", "1") == "1"
)
| trace |
python | falconry__falcon | falcon/inspect.py | {
"start": 9151,
"end": 9815
} | class ____:
__visit_name__ = 'N/A'
def to_string(self, verbose: bool = False, internal: bool = False) -> str:
"""Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
"""
return StringVisitor(verbose, internal).process(self)
def __repr__(self) -> str:
return self.to_string()
| _Traversable |
python | jina-ai__jina | tests/integration/hub_usage/dummyhub_pretrained/__init__.py | {
"start": 208,
"end": 430
} | class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
raise ModelCheckpointNotExist
def craft(self, *args, **kwargs) -> Dict:
pass
| DummyPretrainedExecutor |
python | huggingface__transformers | src/transformers/models/altclip/processing_altclip.py | {
"start": 775,
"end": 1556
} | class ____(ProcessorMixin):
r"""
Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single
processor.
[`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See
the [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information.
Args:
image_processor ([`CLIPImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`XLMRobertaTokenizerFast`], *optional*):
The tokenizer is a required input.
"""
def __init__(self, image_processor=None, tokenizer=None):
super().__init__(image_processor, tokenizer)
__all__ = ["AltCLIPProcessor"]
| AltCLIPProcessor |
python | has2k1__plotnine | plotnine/stats/stat_bin_2d.py | {
"start": 246,
"end": 5305
} | class ____(stat):
"""
2 Dimensional bin counts
{usage}
Parameters
----------
{common_parameters}
bins : int, default=30
Number of bins. Overridden by binwidth.
breaks : array_like | tuple[array_like, array_like] , default=None
Bin boundaries. This supersedes the `binwidth`, `bins`,
`center` and `boundary`. It can be an array_like or
a list of two array_likes to provide distinct breaks for
the `x` and `y` axes.
binwidth : float, default=None
The width of the bins. The default is to use bins bins that
cover the range of the data. You should always override this
value, exploring multiple widths to find the best to illustrate
the stories in your data.
drop : bool, default=False
If `True`{.py}, removes all cells with zero counts.
See Also
--------
plotnine.geom_rect : The default `geom` for this `stat`.
"""
_aesthetics_doc = """
{aesthetics_table}
**Options for computed aesthetics**
```python
"xmin" # x lower bound for the bin
"xmax" # x upper bound for the bin
"ymin" # y lower bound for the bin
"ymax" # y upper bound for the bin
"count" # number of points in bin
"density" # density of points in bin, scaled to integrate to 1
```
"""
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
"geom": "rect",
"position": "identity",
"na_rm": False,
"bins": 30,
"breaks": None,
"binwidth": None,
"drop": True,
}
DEFAULT_AES = {"fill": after_stat("count"), "weight": None}
CREATES = {"xmin", "xmax", "ymin", "ymax", "count", "density"}
def setup_params(self, data):
params = self.params
params["bins"] = dual_param(params["bins"])
params["breaks"] = dual_param(params["breaks"])
params["binwidth"] = dual_param(params["binwidth"])
def compute_group(self, data, scales):
bins = self.params["bins"]
breaks = self.params["breaks"]
binwidth = self.params["binwidth"]
drop = self.params["drop"]
weight = data.get("weight")
if weight is None:
weight = np.ones(len(data["x"]))
# The bins will be over the dimension(full size) of the
# trained x and y scales
range_x = scales.x.dimension()
range_y = scales.y.dimension()
# Trick pd.cut into creating cuts over the range of
# the scale
x = np.append(data["x"], range_x)
y = np.append(data["y"], range_y)
# create the cutting parameters
xbreaks = fuzzybreaks(
scales.x, breaks=breaks.x, binwidth=binwidth.x, bins=bins.x
)
ybreaks = fuzzybreaks(
scales.y, breaks.y, binwidth=binwidth.y, bins=bins.y
)
xbins = pd.cut(
x,
bins=xbreaks, # pyright: ignore
labels=False,
right=True,
)
ybins = pd.cut(
y,
bins=ybreaks, # pyright: ignore
labels=False,
right=True,
)
# Remove the spurious points
xbins = xbins[:-2]
ybins = ybins[:-2]
# Because we are graphing, we want to see equal breaks
# The original breaks have an extra room to the left
ybreaks[0] -= np.diff(np.diff(ybreaks))[0]
xbreaks[0] -= np.diff(np.diff(xbreaks))[0]
bins_grid_long = pd.DataFrame(
{
"xbins": xbins,
"ybins": ybins,
"weight": weight,
}
)
table = bins_grid_long.pivot_table(
"weight", index=["xbins", "ybins"], aggfunc="sum"
)["weight"]
# create rectangles
rects = []
keys = itertools.product(
range(len(ybreaks) - 1), range(len(xbreaks) - 1)
)
for j, i in keys:
try:
cval = table[(i, j)]
except KeyError:
if drop:
continue
cval = 0
# xmin, xmax, ymin, ymax, count
row = [
xbreaks[i],
xbreaks[i + 1],
ybreaks[j],
ybreaks[j + 1],
cval,
]
rects.append(row)
new_data = pd.DataFrame(
rects, columns=["xmin", "xmax", "ymin", "ymax", "count"]
)
new_data["density"] = new_data["count"] / new_data["count"].sum()
return new_data
stat_bin2d = stat_bin_2d
def dual_param(value):
"""
Return duplicate of parameter value
Used to apply same value to x & y axes if only one
value is given.
"""
if is_scalar(value):
return types.SimpleNamespace(x=value, y=value)
if hasattr(value, "x") and hasattr(value, "y"):
return value
if len(value) == 2:
return types.SimpleNamespace(x=value[0], y=value[1])
else:
return types.SimpleNamespace(x=value, y=value)
| stat_bin_2d |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 14782,
"end": 16222
} | class ____(BuildsViewSet, CreateModelMixin):
def get_serializer_class(self):
if self.action == "create":
return BuildCreateSerializer
return super().get_serializer_class()
def create(self, request, **kwargs): # pylint: disable=arguments-differ
project = self._get_parent_project()
version = self._get_parent_version()
build_retry = None
commit = None
if version.is_external:
# We use the last build for a version here as we want to update VCS
# providers and need to reference the latest commit to do so.
build_retry = version.last_build
if build_retry:
commit = build_retry.commit
_, build = trigger_build(
project=project,
version=version,
commit=commit,
)
# TODO: refactor this to be a serializer
# BuildTriggeredSerializer(build, project, version).data
data = {
"build": BuildSerializer(build).data,
"project": ProjectSerializer(project).data,
"version": VersionSerializer(version).data,
}
if build:
data.update({"triggered": True})
code = status.HTTP_202_ACCEPTED
else:
data.update({"triggered": False})
code = status.HTTP_400_BAD_REQUEST
return Response(data=data, status=code)
| BuildsCreateViewSet |
python | openai__openai-python | src/openai/types/responses/response_input_item_param.py | {
"start": 2125,
"end": 2774
} | class ____(TypedDict, total=False):
content: Required[ResponseInputMessageContentListParam]
"""
A list of one or many input items to the model, containing different content
types.
"""
role: Required[Literal["user", "system", "developer"]]
"""The role of the message input. One of `user`, `system`, or `developer`."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
type: Literal["message"]
"""The type of the message input. Always set to `message`."""
| Message |
python | ray-project__ray | python/ray/data/tests/test_autoscaler.py | {
"start": 11853,
"end": 12075
} | class ____:
def __init__(self, barrier):
self._barrier = barrier
def __call__(self, x):
ray.get(self._barrier.wait.remote(), timeout=10)
return x
@ray.remote(max_concurrency=10)
| BarrierWaiter |
python | bokeh__bokeh | tests/unit/bokeh/core/test_has_props.py | {
"start": 15072,
"end": 15213
} | class ____(hp.HasProps, hp.Local):
f4 = Int(default=4)
f3 = Int(default=3)
f2 = Int(default=2)
f1 = Int(default=1)
| Some3HasProps |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_scatter11.py | {
"start": 315,
"end": 1590
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_scatter11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart(
{"type": "scatter", "subtype": "straight_with_markers"}
)
chart.axis_ids = [47439232, 47670400]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"smooth": True,
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ROI.py | {
"start": 66538,
"end": 69518
} | class ____(object):
"""Implements default mouse drag behavior for ROI (not for ROI handles).
"""
def __init__(self, roi):
self.roi = roi
self.dragMode = None
self.startState = None
self.snapModifier = QtCore.Qt.KeyboardModifier.ControlModifier
self.translateModifier = QtCore.Qt.KeyboardModifier.NoModifier
self.rotateModifier = QtCore.Qt.KeyboardModifier.AltModifier
self.scaleModifier = QtCore.Qt.KeyboardModifier.ShiftModifier
self.rotateSpeed = 0.5
self.scaleSpeed = 1.01
def mouseDragEvent(self, ev):
roi = self.roi
if ev.isStart():
if ev.button() == QtCore.Qt.MouseButton.LeftButton:
roi.setSelected(True)
mods = ev.modifiers()
try:
mods &= ~self.snapModifier
except ValueError:
# workaround bug in Python 3.11.4 that affects PyQt
if mods & self.snapModifier:
mods ^= self.snapModifier
if roi.translatable and mods == self.translateModifier:
self.dragMode = 'translate'
elif roi.rotatable and mods == self.rotateModifier:
self.dragMode = 'rotate'
elif roi.resizable and mods == self.scaleModifier:
self.dragMode = 'scale'
else:
self.dragMode = None
if self.dragMode is not None:
roi._moveStarted()
self.startPos = roi.mapToParent(ev.buttonDownPos())
self.startState = roi.saveState()
self.cursorOffset = roi.pos() - self.startPos
ev.accept()
else:
ev.ignore()
else:
self.dragMode = None
ev.ignore()
if ev.isFinish() and self.dragMode is not None:
roi._moveFinished()
return
# roi.isMoving becomes False if the move was cancelled by right-click
if not roi.isMoving or self.dragMode is None:
return
snap = True if (ev.modifiers() & self.snapModifier) else None
pos = roi.mapToParent(ev.pos())
if self.dragMode == 'translate':
newPos = pos + self.cursorOffset
roi.translate(newPos - roi.pos(), snap=snap, finish=False)
elif self.dragMode == 'rotate':
diff = self.rotateSpeed * (ev.scenePos() - ev.buttonDownScenePos()).x()
angle = self.startState['angle'] - diff
roi.setAngle(angle, centerLocal=ev.buttonDownPos(), snap=snap, finish=False)
elif self.dragMode == 'scale':
diff = self.scaleSpeed ** -(ev.scenePos() - ev.buttonDownScenePos()).y()
roi.setSize(Point(self.startState['size']) * diff, centerLocal=ev.buttonDownPos(), snap=snap, finish=False)
| MouseDragHandler |
python | kamyu104__LeetCode-Solutions | Python/take-gifts-from-the-richest-pile.py | {
"start": 59,
"end": 450
} | class ____(object):
def pickGifts(self, gifts, k):
"""
:type gifts: List[int]
:type k: int
:rtype: int
"""
for i, x in enumerate(gifts):
gifts[i] = -x
heapq.heapify(gifts)
for _ in xrange(k):
x = heapq.heappop(gifts)
heapq.heappush(gifts, -int((-x)**0.5))
return -sum(gifts)
| Solution |
python | ray-project__ray | release/train_tests/benchmark/recsys/torchrec_runner.py | {
"start": 481,
"end": 4674
} | class ____(TrainLoopRunner):
def _setup(self):
if self.factory.benchmark_config.mock_gpu:
raise ValueError("Mock GPU is not supported for running TorchRec.")
self.model = self.factory.get_model()
# TODO: This code depends on the model having a fused_optimizer,
# which is hidden in the `get_model` method of the factory.
dense_optimizer = KeyedOptimizerWrapper(
dict(in_backward_optimizer_filter(self.model.named_parameters())),
lambda params: torch.optim.Adagrad(params, lr=15.0, eps=1e-8),
)
self.optimizer = CombinedOptimizer(
[self.model.fused_optimizer, dense_optimizer]
)
self._data_dist_stream = torch.cuda.Stream()
self._h2d_stream = torch.cuda.Stream()
def _wrap_dataloader(self, dataloader, train: bool = True):
dataloader_iter = iter(dataloader)
device = ray.train.torch.get_device()
sdd = SparseDataDistUtil(
model=self.model,
data_dist_stream=self._data_dist_stream,
# prefetch_stream=torch.cuda.Stream(),
)
pipeline = [
PipelineStage(
name="data_copy",
runnable=lambda batch: batch.to(device, non_blocking=True),
stream=self._h2d_stream,
),
PipelineStage(
name="start_sparse_data_dist",
runnable=sdd.start_sparse_data_dist,
stream=sdd.data_dist_stream,
fill_callback=sdd.wait_sparse_data_dist,
),
# PipelineStage(
# name="prefetch",
# runnable=sdd.prefetch,
# stream=sdd.prefetch_stream,
# fill_callback=sdd.load_prefetch,
# ),
]
pipeline = StagedTrainPipeline(pipeline_stages=pipeline)
def dataloader_with_torchrec_pipeline():
while batch := pipeline.progress(dataloader_iter):
yield batch
pipeline.flush_end()
return super()._wrap_dataloader(
dataloader_with_torchrec_pipeline(), train=train
)
def _train_step(self, batch):
self.model.train()
self.optimizer.zero_grad()
loss, out = self.model(batch)
loss.backward()
self.optimizer.step()
def _validate_step(self, batch):
self.model.eval()
with torch.no_grad():
loss, out = self.model(batch)
return loss
def _get_model_and_optim_filenames(self):
rank = ray.train.get_context().get_world_rank()
return f"model_shard_{rank=}.pt", f"optimizer_shard_{rank=}.pt"
def _save_training_state(self, local_dir: str):
# NOTE: Embedding table shards are on different GPUs,
# so we need to do distributed checkpointing.
# This checkpoint format must be loaded on the same number
# of workers and GPU types, since it was sharded with a compute-specific plan.
model_filename, optimizer_filename = self._get_model_and_optim_filenames()
torch.save(self.model.state_dict(), os.path.join(local_dir, model_filename))
torch.save(
self.optimizer.state_dict(), os.path.join(local_dir, optimizer_filename)
)
def _load_training_state(self, local_dir: str):
model_filename, optimizer_filename = self._get_model_and_optim_filenames()
self.model.load_state_dict(
torch.load(
os.path.join(local_dir, model_filename),
map_location=self.model.device,
)
)
self.optimizer.load_state_dict(
torch.load(
os.path.join(local_dir, optimizer_filename),
map_location=self.model.device,
)
)
def _cleanup(self):
# NOTE: This cleanup is needed to avoid zombie Train worker processes
# that hang on gc collect on python teardown.
del self.model
del self.optimizer
del self._data_dist_stream
del self._h2d_stream
torch.cuda.synchronize()
torch.cuda.empty_cache()
gc.collect()
| TorchRecRunner |
python | protocolbuffers__protobuf | python/python_version_test.py | {
"start": 380,
"end": 849
} | class ____(unittest.TestCase):
def testPython3(self):
"""Test that we can import nested import public messages."""
exp = os.getenv('KOKORO_PYTHON_VERSION', '')
if not exp:
print('No system python version found, skipping check', file=sys.stderr)
return
self.assertTrue(
sys.version.startswith(exp),
'Expected Python %s but found Python %s' % (exp, sys.version))
if __name__ == '__main__':
unittest.main()
| PythonVersionTest |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 121301,
"end": 121451
} | class ____:
xlTextVisualLTR = 1 # from enum XlTextVisualLayoutType
xlTextVisualRTL = 2 # from enum XlTextVisualLayoutType
| TextVisualLayoutType |
python | getsentry__sentry | src/sentry/analytics/events/eventuser_equality_check.py | {
"start": 81,
"end": 330
} | class ____(analytics.Event):
event_id: str
project_id: int
group_id: int
snuba_eventuser_equality: bool
event_eventuser_equality: bool
snuba_event_equality: bool
analytics.register(EventUserEqualityCheck)
| EventUserEqualityCheck |
python | ray-project__ray | python/ray/tests/spark/test_basic.py | {
"start": 9430,
"end": 10054
} | class ____(RayOnSparkCPUClusterTestBase):
@classmethod
def setup_class(cls):
cls.num_total_cpus = 2
cls.num_total_gpus = 0
cls.num_cpus_per_spark_task = 1
cls.num_gpus_per_spark_task = 0
cls.max_spark_tasks = 2
os.environ["SPARK_WORKER_CORES"] = "2"
cls.spark = (
SparkSession.builder.master("local-cluster[1, 2, 1024]")
.config("spark.task.cpus", "1")
.config("spark.task.maxFailures", "1")
.config("spark.executorEnv.RAY_ON_SPARK_WORKER_CPU_CORES", "2")
.getOrCreate()
)
| TestBasicSparkCluster |
python | python-pillow__Pillow | src/PIL/Image.py | {
"start": 13275,
"end": 107360
} | class ____:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format: str | None = None
format_description: str | None = None
_close_exclusive_fp_after_loading = True
def __init__(self) -> None:
# FIXME: take "new" parameters / other image?
self._im: core.ImagingCore | DeferredError | None = None
self._mode = ""
self._size = (0, 0)
self.palette: ImagePalette.ImagePalette | None = None
self.info: dict[str | tuple[int, int], Any] = {}
self.readonly = 0
self._exif: Exif | None = None
@property
def im(self) -> core.ImagingCore:
if isinstance(self._im, DeferredError):
raise self._im.ex
assert self._im is not None
return self._im
@im.setter
def im(self, im: core.ImagingCore) -> None:
self._im = im
@property
def width(self) -> int:
return self.size[0]
@property
def height(self) -> int:
return self.size[1]
@property
def size(self) -> tuple[int, int]:
return self._size
@property
def mode(self) -> str:
return self._mode
@property
def readonly(self) -> int:
return (self._im and self._im.readonly) or self._readonly
@readonly.setter
def readonly(self, readonly: int) -> None:
self._readonly = readonly
def _new(self, im: core.ImagingCore) -> Image:
new = Image()
new.im = im
new._mode = im.mode
new._size = im.size
if im.mode in ("P", "PA"):
if self.palette:
new.palette = self.palette.copy()
else:
from . import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
# Context manager support
def __enter__(self):
return self
def __exit__(self, *args):
from . import ImageFile
if isinstance(self, ImageFile.ImageFile):
if getattr(self, "_exclusive_fp", False):
self._close_fp()
self.fp = None
def close(self) -> None:
"""
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
if getattr(self, "map", None):
if sys.platform == "win32" and hasattr(sys, "pypy_version_info"):
self.map.close()
self.map: mmap.mmap | None = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self._im = DeferredError(ValueError("Operation on closed image"))
def _copy(self) -> None:
self.load()
self.im = self.im.copy()
self.readonly = 0
def _ensure_mutable(self) -> None:
if self.readonly:
self._copy()
else:
self.load()
def _dump(
self, file: str | None = None, format: str | None = None, **options: Any
) -> str:
suffix = ""
if format:
suffix = f".{format}"
if not file:
f, filename = tempfile.mkstemp(suffix)
os.close(f)
else:
filename = file
if not filename.endswith(suffix):
filename = filename + suffix
self.load()
if not format or format == "PPM":
self.im.save_ppm(filename)
else:
self.save(filename, format, **options)
return filename
def __eq__(self, other: object) -> bool:
if self.__class__ is not other.__class__:
return False
assert isinstance(other, Image)
return (
self.mode == other.mode
and self.size == other.size
and self.info == other.info
and self.getpalette() == other.getpalette()
and self.tobytes() == other.tobytes()
)
def __repr__(self) -> str:
return (
f"<{self.__class__.__module__}.{self.__class__.__name__} "
f"image mode={self.mode} size={self.size[0]}x{self.size[1]} "
f"at 0x{id(self):X}>"
)
def _repr_pretty_(self, p: PrettyPrinter, cycle: bool) -> None:
"""IPython plain text display support"""
# Same as __repr__ but without unpredictable id(self),
# to keep Jupyter notebook `text/plain` output stable.
p.text(
f"<{self.__class__.__module__}.{self.__class__.__name__} "
f"image mode={self.mode} size={self.size[0]}x{self.size[1]}>"
)
def _repr_image(self, image_format: str, **kwargs: Any) -> bytes | None:
"""Helper function for iPython display hook.
:param image_format: Image format.
:returns: image as bytes, saved into the given format.
"""
b = io.BytesIO()
try:
self.save(b, image_format, **kwargs)
except Exception:
return None
return b.getvalue()
def _repr_png_(self) -> bytes | None:
"""iPython display hook support for PNG format.
:returns: PNG version of the image as bytes
"""
return self._repr_image("PNG", compress_level=1)
def _repr_jpeg_(self) -> bytes | None:
"""iPython display hook support for JPEG format.
:returns: JPEG version of the image as bytes
"""
return self._repr_image("JPEG")
@property
def __array_interface__(self) -> dict[str, str | bytes | int | tuple[int, ...]]:
# numpy array interface support
new: dict[str, str | bytes | int | tuple[int, ...]] = {"version": 3}
if self.mode == "1":
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new["data"] = self.tobytes("raw", "L")
else:
new["data"] = self.tobytes()
new["shape"], new["typestr"] = _conv_type_shape(self)
return new
def __arrow_c_schema__(self) -> object:
self.load()
return self.im.__arrow_c_schema__()
def __arrow_c_array__(
self, requested_schema: object | None = None
) -> tuple[object, object]:
self.load()
return (self.im.__arrow_c_schema__(), self.im.__arrow_c_array__())
def __getstate__(self) -> list[Any]:
im_data = self.tobytes() # load image first
return [self.info, self.mode, self.size, self.getpalette(), im_data]
def __setstate__(self, state: list[Any]) -> None:
Image.__init__(self)
info, mode, size, palette, data = state[:5]
self.info = info
self._mode = mode
self._size = size
self.im = core.new(mode, size)
if mode in ("L", "LA", "P", "PA") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name: str = "raw", *args: Any) -> bytes:
"""
Return image as a bytes object.
.. warning::
This method returns raw image data derived from Pillow's internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory data.
:param encoder_name: What encoder to use.
The default is to use the standard "raw" encoder.
To see how this packs pixel data into the returned
bytes, see :file:`libImaging/Pack.c`.
A list of C encoders can be seen under codecs
section of the function array in
:file:`_imaging.c`. Python encoders are registered
within the relevant plugins.
:param args: Extra arguments to the encoder.
:returns: A :py:class:`bytes` object.
"""
encoder_args: Any = args
if len(encoder_args) == 1 and isinstance(encoder_args[0], tuple):
# may pass tuple instead of argument list
encoder_args = encoder_args[0]
if encoder_name == "raw" and encoder_args == ():
encoder_args = self.mode
self.load()
if self.width == 0 or self.height == 0:
return b""
# unpack data
e = _getencoder(self.mode, encoder_name, encoder_args)
e.setimage(self.im)
from . import ImageFile
bufsize = max(ImageFile.MAXBLOCK, self.size[0] * 4) # see RawEncode.c
output = []
while True:
bytes_consumed, errcode, data = e.encode(bufsize)
output.append(data)
if errcode:
break
if errcode < 0:
msg = f"encoder error {errcode} in tobytes"
raise RuntimeError(msg)
return b"".join(output)
def tobitmap(self, name: str = "image") -> bytes:
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
msg = "not a bitmap"
raise ValueError(msg)
data = self.tobytes("xbm")
return b"".join(
[
f"#define {name}_width {self.size[0]}\n".encode("ascii"),
f"#define {name}_height {self.size[1]}\n".encode("ascii"),
f"static char {name}_bits[] = {{\n".encode("ascii"),
data,
b"};",
]
)
def frombytes(
self,
data: bytes | bytearray | SupportsArrayInterface,
decoder_name: str = "raw",
*args: Any,
) -> None:
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
if self.width == 0 or self.height == 0:
return
decoder_args: Any = args
if len(decoder_args) == 1 and isinstance(decoder_args[0], tuple):
# may pass tuple instead of argument list
decoder_args = decoder_args[0]
# default format
if decoder_name == "raw" and decoder_args == ():
decoder_args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, decoder_args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
msg = "not enough image data"
raise ValueError(msg)
if s[1] != 0:
msg = "cannot decode image data"
raise ValueError(msg)
def load(self) -> core.PixelAccess | None:
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
If the file associated with the image was opened by Pillow, then this
method will close it. The exception to this is if the image has
multiple frames, in which case the file will be left open for seek
operations. See :ref:`file-handling` for more information.
:returns: An image access object.
:rtype: :py:class:`.PixelAccess`
"""
if self._im is not None and self.palette and self.palette.dirty:
# realize palette
mode, arr = self.palette.getdata()
self.im.putpalette(self.palette.mode, mode, arr)
self.palette.dirty = 0
self.palette.rawmode = None
if "transparency" in self.info and mode in ("LA", "PA"):
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
else:
self.palette.palette = self.im.getpalette(
self.palette.mode, self.palette.mode
)
if self._im is not None:
return self.im.pixel_access(self.readonly)
return None
def verify(self) -> None:
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(
self,
mode: str | None = None,
matrix: tuple[float, ...] | None = None,
dither: Dither | None = None,
palette: Palette = Palette.WEB,
colors: int = 256,
) -> Image:
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
This supports all possible conversions between "L", "RGB" and "CMYK". The
``matrix`` argument only supports "L" and "RGB".
When translating a color image to grayscale (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a grayscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is ``None``, all values larger than 127 are set to 255 (white),
all other values to 0 (black). To use other thresholds, use the
:py:meth:`~PIL.Image.Image.point` method.
When converting from "RGBA" to "P" without a ``matrix`` argument,
this passes the operation to :py:meth:`~PIL.Image.Image.quantize`,
and ``dither`` and ``palette`` are ignored.
When converting from "PA", if an "RGBA" palette is present, the alpha
channel from the image will be used instead of the values from the palette.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default). Note that this is not used when ``matrix`` is supplied.
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are :data:`Palette.WEB` or
:data:`Palette.ADAPTIVE`.
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
palette. Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
has_transparency = "transparency" in self.info
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if mode == "RGB" and has_transparency:
mode = "RGBA"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
msg = "illegal conversion"
raise ValueError(msg)
im = self.im.convert_matrix(mode, matrix)
new_im = self._new(im)
if has_transparency and self.im.bands == 3:
transparency = new_im.info["transparency"]
def convert_transparency(
m: tuple[float, ...], v: tuple[int, int, int]
) -> int:
value = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5
return max(0, min(255, int(value)))
if mode == "L":
transparency = convert_transparency(matrix, transparency)
elif len(mode) == 3:
transparency = tuple(
convert_transparency(matrix[i * 4 : i * 4 + 4], transparency)
for i in range(len(transparency))
)
new_im.info["transparency"] = transparency
return new_im
if self.mode == "RGBA":
if mode == "P":
return self.quantize(colors)
elif mode == "PA":
r, g, b, a = self.split()
rgb = merge("RGB", (r, g, b))
p = rgb.quantize(colors)
return merge("PA", (p, a))
trns = None
delete_trns = False
# transparency handling
if has_transparency:
if (self.mode in ("1", "L", "I", "I;16") and mode in ("LA", "RGBA")) or (
self.mode == "RGB" and mode in ("La", "LA", "RGBa", "RGBA")
):
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency expressed in bytes should be "
"converted to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = new(self.mode, (1, 1))
if self.mode == "P":
assert self.palette is not None
trns_im.putpalette(self.palette, self.palette.mode)
if isinstance(t, tuple):
err = "Couldn't allocate a palette color for transparency"
assert trns_im.palette is not None
try:
t = trns_im.palette.getcolor(t, self)
except ValueError as e:
if str(e) == "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
t = None
else:
raise ValueError(err) from e
if t is None:
trns = None
else:
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode in ("LA", "PA", "RGBA"):
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
msg = "Transparency for P mode should be bytes or int"
raise ValueError(msg)
if mode == "P" and palette == Palette.ADAPTIVE:
im = self.im.quantize(colors)
new_im = self._new(im)
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette(
"RGB", new_im.im.getpalette("RGB")
)
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new_im.info["transparency"]
if trns is not None:
try:
new_im.info["transparency"] = new_im.palette.getcolor(
cast(tuple[int, ...], trns), # trns was converted to RGB
new_im,
)
except Exception:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new_im.info["transparency"]
warnings.warn("Couldn't allocate palette entry for transparency")
return new_im
if "LAB" in (self.mode, mode):
im = self
if mode == "LAB":
if im.mode not in ("RGB", "RGBA", "RGBX"):
im = im.convert("RGBA")
other_mode = im.mode
else:
other_mode = mode
if other_mode in ("RGB", "RGBA", "RGBX"):
from . import ImageCms
srgb = ImageCms.createProfile("sRGB")
lab = ImageCms.createProfile("LAB")
profiles = [lab, srgb] if im.mode == "LAB" else [srgb, lab]
transform = ImageCms.buildTransform(
profiles[0], profiles[1], im.mode, mode
)
return transform.apply(im)
# colorspace conversion
if dither is None:
dither = Dither.FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
modebase = getmodebase(self.mode)
if modebase == self.mode:
raise
im = self.im.convert(modebase)
im = im.convert(mode, dither)
except KeyError as e:
msg = "illegal conversion"
raise ValueError(msg) from e
new_im = self._new(im)
if mode in ("P", "PA") and palette != Palette.ADAPTIVE:
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette("RGB", im.getpalette("RGB"))
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P" and new_im.palette:
try:
new_im.info["transparency"] = new_im.palette.getcolor(
cast(tuple[int, ...], trns), new_im # trns was converted to RGB
)
except ValueError as e:
del new_im.info["transparency"]
if str(e) != "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
warnings.warn(
"Couldn't allocate palette entry for transparency"
)
else:
new_im.info["transparency"] = trns
return new_im
def quantize(
self,
colors: int = 256,
method: int | None = None,
kmeans: int = 0,
palette: Image | None = None,
dither: Dither = Dither.FLOYDSTEINBERG,
) -> Image:
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: :data:`Quantize.MEDIANCUT` (median cut),
:data:`Quantize.MAXCOVERAGE` (maximum coverage),
:data:`Quantize.FASTOCTREE` (fast octree),
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support
using :py:func:`PIL.features.check_feature` with
``feature="libimagequant"``).
By default, :data:`Quantize.MEDIANCUT` will be used.
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so
:data:`Quantize.FASTOCTREE` is used by default instead.
:param kmeans: Integer greater than or equal to zero.
:param palette: Quantize to the palette of given
:py:class:`PIL.Image.Image`.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default).
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = Quantize.MEDIANCUT
if self.mode == "RGBA":
method = Quantize.FASTOCTREE
if self.mode == "RGBA" and method not in (
Quantize.FASTOCTREE,
Quantize.LIBIMAGEQUANT,
):
# Caller specified an invalid mode.
msg = (
"Fast Octree (method == 2) and libimagequant (method == 3) "
"are the only valid methods for quantizing RGBA images"
)
raise ValueError(msg)
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
msg = "bad mode for palette image"
raise ValueError(msg)
if self.mode not in {"RGB", "L"}:
msg = "only RGB or L mode images can be quantized to a palette"
raise ValueError(msg)
im = self.im.convert("P", dither, palette.im)
new_im = self._new(im)
assert palette.palette is not None
new_im.palette = palette.palette.copy()
return new_im
if kmeans < 0:
msg = "kmeans must not be negative"
raise ValueError(msg)
im = self._new(self.im.quantize(colors, method, kmeans))
from . import ImagePalette
mode = im.im.getpalettemode()
palette_data = im.im.getpalette(mode, mode)[: colors * len(mode)]
im.palette = ImagePalette.ImagePalette(mode, palette_data)
return im
def copy(self) -> Image:
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box: tuple[float, float, float, float] | None = None) -> Image:
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if box is None:
return self.copy()
if box[2] < box[0]:
msg = "Coordinate 'right' is less than 'left'"
raise ValueError(msg)
elif box[3] < box[1]:
msg = "Coordinate 'lower' is less than 'upper'"
raise ValueError(msg)
self.load()
return self._new(self._crop(self.im, box))
def _crop(
self, im: core.ImagingCore, box: tuple[float, float, float, float]
) -> core.ImagingCore:
"""
Returns a rectangular region from the core image object im.
This is equivalent to calling im.crop((x0, y0, x1, y1)), but
includes additional sanity checks.
:param im: a core image object
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:returns: A core image object.
"""
x0, y0, x1, y1 = map(int, map(round, box))
absolute_values = (abs(x1 - x0), abs(y1 - y0))
_decompression_bomb_check(absolute_values)
return im.crop((x0, y0, x1, y1))
def draft(
self, mode: str | None, size: tuple[int, int] | None
) -> tuple[str, tuple[int, int, float, float]] | None:
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to grayscale while loading it.
If any changes are made, returns a tuple with the chosen ``mode`` and
``box`` with coordinates of the original image within the altered one.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and MPO images.
:param mode: The requested mode.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
pass
def filter(self, filter: ImageFilter.Filter | type[ImageFilter.Filter]) -> Image:
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object."""
from . import ImageFilter
self.load()
if callable(filter):
filter = filter()
if not hasattr(filter, "filter"):
msg = "filter argument should be ImageFilter.Filter instance or class"
raise TypeError(msg)
multiband = isinstance(filter, ImageFilter.MultibandFilter)
if self.im.bands == 1 or multiband:
return self._new(filter.filter(self.im))
ims = [
self._new(filter.filter(self.im.getband(c))) for c in range(self.im.bands)
]
return merge(self.mode, ims)
def getbands(self) -> tuple[str, ...]:
"""
Returns a tuple containing the name of each band in this image.
For example, ``getbands`` on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self, *, alpha_only: bool = True) -> tuple[int, int, int, int] | None:
"""
Calculates the bounding box of the non-zero regions in the
image.
:param alpha_only: Optional flag, defaulting to ``True``.
If ``True`` and the image has an alpha channel, trim transparent pixels.
Otherwise, trim pixels when all channels are zero.
Keyword-only argument.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
self.load()
return self.im.getbbox(alpha_only)
def getcolors(
self, maxcolors: int = 256
) -> list[tuple[int, tuple[int, ...]]] | list[tuple[int, float]] | None:
"""
Returns a list of colors used in this image.
The colors will be in the image's mode. For example, an RGB image will
return a tuple of (red, green, blue) color values, and a P image will
return the index of the color in the palette.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out: list[tuple[int, float]] = [(h[i], i) for i in range(256) if h[i]]
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band: int | None = None) -> core.ImagingCore:
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use ``list(im.getdata())``.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self) -> tuple[float, float] | tuple[tuple[int, int], ...]:
"""
Gets the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
return tuple(self.im.getband(i).getextrema() for i in range(self.im.bands))
return self.im.getextrema()
def getxmp(self) -> dict[str, Any]:
"""
Returns a dictionary containing the XMP tags.
Requires defusedxml to be installed.
:returns: XMP tags in a dictionary.
"""
def get_name(tag: str) -> str:
return re.sub("^{[^}]+}", "", tag)
def get_value(element: Element) -> str | dict[str, Any] | None:
value: dict[str, Any] = {get_name(k): v for k, v in element.attrib.items()}
children = list(element)
if children:
for child in children:
name = get_name(child.tag)
child_value = get_value(child)
if name in value:
if not isinstance(value[name], list):
value[name] = [value[name]]
value[name].append(child_value)
else:
value[name] = child_value
elif value:
if element.text:
value["text"] = element.text
else:
return element.text
return value
if ElementTree is None:
warnings.warn("XMP data cannot be read without defusedxml dependency")
return {}
if "xmp" not in self.info:
return {}
root = ElementTree.fromstring(self.info["xmp"].rstrip(b"\x00 "))
return {get_name(root.tag): get_value(root)}
def getexif(self) -> Exif:
"""
Gets EXIF data from the image.
:returns: an :py:class:`~PIL.Image.Exif` object.
"""
if self._exif is None:
self._exif = Exif()
elif self._exif._loaded:
return self._exif
self._exif._loaded = True
exif_info = self.info.get("exif")
if exif_info is None:
if "Raw profile type exif" in self.info:
exif_info = bytes.fromhex(
"".join(self.info["Raw profile type exif"].split("\n")[3:])
)
elif hasattr(self, "tag_v2"):
self._exif.bigtiff = self.tag_v2._bigtiff
self._exif.endian = self.tag_v2._endian
self._exif.load_from_fp(self.fp, self.tag_v2._offset)
if exif_info is not None:
self._exif.load(exif_info)
# XMP tags
if ExifTags.Base.Orientation not in self._exif:
xmp_tags = self.info.get("XML:com.adobe.xmp")
pattern: str | bytes = r'tiff:Orientation(="|>)([0-9])'
if not xmp_tags and (xmp_tags := self.info.get("xmp")):
pattern = rb'tiff:Orientation(="|>)([0-9])'
if xmp_tags:
match = re.search(pattern, xmp_tags)
if match:
self._exif[ExifTags.Base.Orientation] = int(match[2])
return self._exif
def _reload_exif(self) -> None:
if self._exif is None or not self._exif._loaded:
return
self._exif._loaded = False
self.getexif()
def get_child_images(self) -> list[ImageFile.ImageFile]:
from . import ImageFile
deprecate("Image.Image.get_child_images", 13)
return ImageFile.ImageFile.get_child_images(self) # type: ignore[arg-type]
def getim(self) -> CapsuleType:
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self, rawmode: str | None = "RGB") -> list[int] | None:
"""
Returns the image palette as a list.
:param rawmode: The mode in which to return the palette. ``None`` will
return the palette in its current mode.
.. versionadded:: 9.1.0
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
mode = self.im.getpalettemode()
except ValueError:
return None # no palette
if rawmode is None:
rawmode = mode
return list(self.im.getpalette(mode, rawmode))
@property
def has_transparency_data(self) -> bool:
"""
Determine if an image has transparency data, whether in the form of an
alpha channel, a palette with an alpha channel, or a "transparency" key
in the info dictionary.
Note the image might still appear solid, if all of the values shown
within are opaque.
:returns: A boolean.
"""
if (
self.mode in ("LA", "La", "PA", "RGBA", "RGBa")
or "transparency" in self.info
):
return True
if self.mode == "P":
assert self.palette is not None
return self.palette.mode.endswith("A")
return False
def apply_transparency(self) -> None:
"""
If a P mode image has a "transparency" key in the info dictionary,
remove the key and instead apply the transparency to the palette.
Otherwise, the image is unchanged.
"""
if self.mode != "P" or "transparency" not in self.info:
return
from . import ImagePalette
palette = self.getpalette("RGBA")
assert palette is not None
transparency = self.info["transparency"]
if isinstance(transparency, bytes):
for i, alpha in enumerate(transparency):
palette[i * 4 + 3] = alpha
else:
palette[transparency * 4 + 3] = 0
self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette))
self.palette.dirty = 1
del self.info["transparency"]
def getpixel(
self, xy: tuple[int, int] | list[int]
) -> float | tuple[int, ...] | None:
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y). See
:ref:`coordinate-system`.
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
return self.im.getpixel(tuple(xy))
def getprojection(self) -> tuple[list[int], list[int]]:
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return list(x), list(y)
def histogram(
self, mask: Image | None = None, extrema: tuple[float, float] | None = None
) -> list[int]:
"""
Returns a histogram for the image. The histogram is returned as a
list of pixel counts, one for each pixel value in the source
image. Counts are grouped into 256 bins for each band, even if
the image has more than 8 bits per band. If the image has more
than one band, the histograms for all bands are concatenated (for
example, the histogram for an "RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a grayscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a grayscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
return self.im.histogram(
extrema if extrema is not None else self.getextrema()
)
return self.im.histogram()
def entropy(
self, mask: Image | None = None, extrema: tuple[float, float] | None = None
) -> float:
"""
Calculates and returns the entropy for the image.
A bilevel image (mode "1") is treated as a grayscale ("L")
image by this method.
If a mask is provided, the method employs the histogram for
those parts of the image where the mask image is non-zero.
The mask image must have the same size as the image, and be
either a bi-level image (mode "1") or a grayscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A float value representing the image entropy
"""
self.load()
if mask:
mask.load()
return self.im.entropy((0, 0), mask.im)
if self.mode in ("I", "F"):
return self.im.entropy(
extrema if extrema is not None else self.getextrema()
)
return self.im.entropy()
def paste(
self,
im: Image | str | float | tuple[float, ...],
box: Image | tuple[int, int, int, int] | tuple[int, int] | None = None,
mask: Image | None = None,
) -> None:
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size
of the pasted image must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module. See
:ref:`colors` for more information.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L", "LA", "RGBA"
or "RGBa" images (if present, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer, float or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isinstance(box, Image):
if mask is not None:
msg = "If using second argument as mask, third argument must be None"
raise ValueError(msg)
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
box = (0, 0)
if len(box) == 2:
# upper left corner given; get size from image or mask
if isinstance(im, Image):
size = im.size
elif isinstance(mask, Image):
size = mask.size
else:
# FIXME: use self.size here?
msg = "cannot determine region size; use 4-item box"
raise ValueError(msg)
box += (box[0] + size[0], box[1] + size[1])
source: core.ImagingCore | str | float | tuple[float, ...]
if isinstance(im, str):
from . import ImageColor
source = ImageColor.getcolor(im, self.mode)
elif isinstance(im, Image):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("LA", "RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
source = im.im
else:
source = im
self._ensure_mutable()
if mask:
mask.load()
self.im.paste(source, box, mask.im)
else:
self.im.paste(source, box)
def alpha_composite(
self, im: Image, dest: Sequence[int] = (0, 0), source: Sequence[int] = (0, 0)
) -> None:
"""'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
msg = "Source must be a list or tuple"
raise ValueError(msg)
if not isinstance(dest, (list, tuple)):
msg = "Destination must be a list or tuple"
raise ValueError(msg)
if len(source) == 4:
overlay_crop_box = tuple(source)
elif len(source) == 2:
overlay_crop_box = tuple(source) + im.size
else:
msg = "Source must be a sequence of length 2 or 4"
raise ValueError(msg)
if not len(dest) == 2:
msg = "Destination must be a sequence of length 2"
raise ValueError(msg)
if min(source) < 0:
msg = "Source must be non-negative"
raise ValueError(msg)
# over image, crop if it's not the whole image.
if overlay_crop_box == (0, 0) + im.size:
overlay = im
else:
overlay = im.crop(overlay_crop_box)
# target for the paste
box = tuple(dest) + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def point(
self,
lut: (
Sequence[float]
| NumpyArray
| Callable[[int], float]
| Callable[[ImagePointTransform], ImagePointTransform | float]
| ImagePointHandler
),
mode: str | None = None,
) -> Image:
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65536 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
It may also be an :py:class:`~PIL.Image.ImagePointHandler`
object::
class Example(Image.ImagePointHandler):
def point(self, im: Image) -> Image:
# Return result
:param mode: Output mode (default is same as input). This can only be used if
the source image has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut) # type: ignore[arg-type]
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
flatLut = [lut(i) for i in range(256)] * self.im.bands # type: ignore[arg-type]
else:
flatLut = lut
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
msg = "point operation not supported for this mode"
raise ValueError(msg)
if mode != "F":
flatLut = [round(i) for i in flatLut]
return self._new(self.im.point(flatLut, mode))
def putalpha(self, alpha: Image | int) -> None:
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer.
"""
self._ensure_mutable()
if self.mode not in ("LA", "PA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError) as e:
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "PA", "RGBA"):
msg = "alpha channel could not be added"
raise ValueError(msg) from e # sanity check
self.im = im
self._mode = self.im.mode
except KeyError as e:
msg = "illegal image mode"
raise ValueError(msg) from e
if self.mode in ("LA", "PA"):
band = 1
else:
band = 3
if isinstance(alpha, Image):
# alpha layer
if alpha.mode not in ("1", "L"):
msg = "illegal image mode"
raise ValueError(msg)
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(
self,
data: Sequence[float] | Sequence[Sequence[int]] | core.ImagingCore | NumpyArray,
scale: float = 1.0,
offset: float = 0.0,
) -> None:
"""
Copies pixel data from a flattened sequence object into the image. The
values should start at the upper left corner (0, 0), continue to the
end of the line, followed directly by the first value of the second
line, and so on. Data will be read until either the image or the
sequence ends. The scale and offset values are used to adjust the
sequence values: **pixel = value*scale + offset**.
:param data: A flattened sequence object. See :ref:`colors` for more
information about values.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self._ensure_mutable()
self.im.putdata(data, scale, offset)
def putpalette(
self,
data: ImagePalette.ImagePalette | bytes | Sequence[int],
rawmode: str = "RGB",
) -> None:
"""
Attaches a palette to this image. The image must be a "P", "PA", "L"
or "LA" image.
The palette sequence must contain at most 256 colors, made up of one
integer value for each channel in the raw mode.
For example, if the raw mode is "RGB", then it can contain at most 768
values, made up of red, green and blue values for the corresponding pixel
index in the 256 colors.
If the raw mode is "RGBA", then it can contain at most 1024 values,
containing red, green, blue and alpha values.
Alternatively, an 8-bit string may be used instead of an integer sequence.
:param data: A palette sequence (either a list or a string).
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode
that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L").
"""
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
msg = "illegal image mode"
raise ValueError(msg)
if isinstance(data, ImagePalette.ImagePalette):
if data.rawmode is not None:
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
palette = ImagePalette.ImagePalette(palette=data.palette)
palette.dirty = 1
else:
if not isinstance(data, bytes):
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self._mode = "PA" if "A" in self.mode else "P"
self.palette = palette
self.palette.mode = "RGBA" if "A" in rawmode else "RGB"
self.load() # install new palette
def putpixel(
self, xy: tuple[int, int], value: float | tuple[int, ...] | list[int]
) -> None:
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples are
accepted for P and PA images. See :ref:`colors` for more information.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y). See
:ref:`coordinate-system`.
:param value: The pixel value.
"""
self._ensure_mutable()
if (
self.mode in ("P", "PA")
and isinstance(value, (list, tuple))
and len(value) in [3, 4]
):
# RGB or RGBA value for a P or PA image
if self.mode == "PA":
alpha = value[3] if len(value) == 4 else 255
value = value[:3]
assert self.palette is not None
palette_index = self.palette.getcolor(tuple(value), self)
value = (palette_index, alpha) if self.mode == "PA" else palette_index
return self.im.putpixel(xy, value)
def remap_palette(
self, dest_map: list[int], source_palette: bytes | bytearray | None = None
) -> Image:
"""
Rewrites the image to reorder the palette.
:param dest_map: A list of indexes into the original palette.
e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``
is the identity transform.
:param source_palette: Bytes or None.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
msg = "illegal image mode"
raise ValueError(msg)
bands = 3
palette_mode = "RGB"
if source_palette is None:
if self.mode == "P":
self.load()
palette_mode = self.im.getpalettemode()
if palette_mode == "RGBA":
bands = 4
source_palette = self.im.getpalette(palette_mode, palette_mode)
else: # L-mode
source_palette = bytearray(i // 3 for i in range(768))
elif len(source_palette) > 768:
bands = 4
palette_mode = "RGBA"
palette_bytes = b""
new_positions = [0] * 256
# pick only the used colors from the palette
for i, oldPosition in enumerate(dest_map):
palette_bytes += source_palette[
oldPosition * bands : oldPosition * bands + bands
]
new_positions[oldPosition] = i
# replace the palette color id of all pixel with the new id
# Palette images are [0..255], mapped through a 1 or 3
# byte/color map. We need to remap the whole image
# from palette 1 to palette 2. New_positions is
# an array of indexes into palette 1. Palette 2 is
# palette 1 with any holes removed.
# We're going to leverage the convert mechanism to use the
# C code to remap the image from palette 1 to palette 2,
# by forcing the source image into 'L' mode and adding a
# mapping 'L' mode palette, then converting back to 'L'
# sans palette thus converting the image bytes, then
# assigning the optimized RGB palette.
# perf reference, 9500x4000 gif, w/~135 colors
# 14 sec prepatch, 1 sec postpatch with optimization forced.
mapping_palette = bytearray(new_positions)
m_im = self.copy()
m_im._mode = "P"
m_im.palette = ImagePalette.ImagePalette(
palette_mode, palette=mapping_palette * bands
)
# possibly set palette dirty, then
# m_im.putpalette(mapping_palette, 'L') # converts to 'P'
# or just force it.
# UNDONE -- this is part of the general issue with palettes
m_im.im.putpalette(palette_mode, palette_mode + ";L", m_im.palette.tobytes())
m_im = m_im.convert("L")
m_im.putpalette(palette_bytes, palette_mode)
m_im.palette = ImagePalette.ImagePalette(palette_mode, palette=palette_bytes)
if "transparency" in self.info:
try:
m_im.info["transparency"] = dest_map.index(self.info["transparency"])
except ValueError:
if "transparency" in m_im.info:
del m_im.info["transparency"]
return m_im
def _get_safe_box(
self,
size: tuple[int, int],
resample: Resampling,
box: tuple[float, float, float, float],
) -> tuple[int, int, int, int]:
"""Expands the box so it includes adjacent pixels
that may be used by resampling with the given resampling filter.
"""
filter_support = _filters_support[resample] - 0.5
scale_x = (box[2] - box[0]) / size[0]
scale_y = (box[3] - box[1]) / size[1]
support_x = filter_support * scale_x
support_y = filter_support * scale_y
return (
max(0, int(box[0] - support_x)),
max(0, int(box[1] - support_y)),
min(self.size[0], math.ceil(box[2] + support_x)),
min(self.size[1], math.ceil(box[3] + support_y)),
)
def resize(
self,
size: tuple[int, int] | list[int] | NumpyArray,
resample: int | None = None,
box: tuple[float, float, float, float] | None = None,
reducing_gap: float | None = None,
) -> Image:
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a tuple or array:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
If omitted or None, the entire source is used.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce`.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is None (no optimization).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample is None:
resample = Resampling.BICUBIC
elif resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
)
]
msg += f" Use {', '.join(filters[:-1])} or {filters[-1]}"
raise ValueError(msg)
if reducing_gap is not None and reducing_gap < 1.0:
msg = "reducing_gap must be 1.0 or greater"
raise ValueError(msg)
if box is None:
box = (0, 0) + self.size
size = tuple(size)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = Resampling.NEAREST
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box)
return im.convert(self.mode)
self.load()
if reducing_gap is not None and resample != Resampling.NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1:
reduce_box = self._get_safe_box(size, cast(Resampling, resample), box)
factor = (factor_x, factor_y)
self = (
self.reduce(factor, box=reduce_box)
if callable(self.reduce)
else Image.reduce(self, factor, box=reduce_box)
)
box = (
(box[0] - reduce_box[0]) / factor_x,
(box[1] - reduce_box[1]) / factor_y,
(box[2] - reduce_box[0]) / factor_x,
(box[3] - reduce_box[1]) / factor_y,
)
return self._new(self.im.resize(size, resample, box))
def reduce(
self,
factor: int | tuple[int, int],
box: tuple[int, int, int, int] | None = None,
) -> Image:
"""
Returns a copy of the image reduced ``factor`` times.
If the size of the image is not dividable by ``factor``,
the resulting size will be rounded up.
:param factor: A greater than 0 integer or tuple of two integers
for width and height separately.
:param box: An optional 4-tuple of ints providing
the source image region to be reduced.
The values must be within ``(0, 0, width, height)`` rectangle.
If omitted or ``None``, the entire source is used.
"""
if not isinstance(factor, (list, tuple)):
factor = (factor, factor)
if box is None:
box = (0, 0) + self.size
if factor == (1, 1) and box == (0, 0) + self.size:
return self.copy()
if self.mode in ["LA", "RGBA"]:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.reduce(factor, box)
return im.convert(self.mode)
self.load()
return self._new(self.im.reduce(factor, box))
def rotate(
self,
angle: float,
resample: Resampling = Resampling.NEAREST,
expand: int | bool = False,
center: tuple[float, float] | None = None,
translate: tuple[int, int] | None = None,
fillcolor: float | tuple[float, ...] | str | None = None,
) -> Image:
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter, as long as we're not
# translating or changing the center.
if not (center or translate):
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(Transpose.ROTATE_180)
if angle in (90, 270) and (expand or self.width == self.height):
return self.transpose(
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
)
# Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we
# want to interpolate the (discrete) destination pixel from
# the local area around the (floating) source pixel.
# The matrix we actually want (note that it operates from the right):
# (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx)
# (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy)
# (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1)
# The reverse matrix is thus:
# (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx)
# (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty)
# (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1)
# In any case, the final translation may be updated at the end to
# compensate for the expand flag.
w, h = self.size
if translate is None:
post_trans = (0, 0)
else:
post_trans = translate
if center is None:
center = (w / 2, h / 2)
angle = -math.radians(angle)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x: float, y: float, matrix: list[float]) -> tuple[float, float]:
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-center[0] - post_trans[0], -center[1] - post_trans[1], matrix
)
matrix[2] += center[0]
matrix[5] += center[1]
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
transformed_x, transformed_y = transform(x, y, matrix)
xx.append(transformed_x)
yy.append(transformed_y)
nw = math.ceil(max(xx)) - math.floor(min(xx))
nh = math.ceil(max(yy)) - math.floor(min(yy))
# We multiply a translation matrix from the right. Because of its
# special form, this is the same as taking the image of the
# translation vector as new translation vector.
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
w, h = nw, nh
return self.transform(
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
)
def save(
self, fp: StrOrBytesPath | IO[bytes], format: str | None = None, **params: Any
) -> None:
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), os.PathLike object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer. These can also be
set on the image itself through ``encoderinfo``. This is useful when
saving multiple images::
# Saving XMP data to a single image
from PIL import Image
red = Image.new("RGB", (1, 1), "#f00")
red.save("out.mpo", xmp=b"test")
# Saving XMP data to the second frame of an image
from PIL import Image
black = Image.new("RGB", (1, 1))
red = Image.new("RGB", (1, 1), "#f00")
red.encoderinfo = {"xmp": b"test"}
black.save("out.mpo", save_all=True, append_images=[red])
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception OSError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename: str | bytes = ""
open_fp = False
if is_path(fp):
filename = os.fspath(fp)
open_fp = True
elif fp == sys.stdout:
try:
fp = sys.stdout.buffer
except AttributeError:
pass
if not filename and hasattr(fp, "name") and is_path(fp.name):
# only set the name for metadata purposes
filename = os.fspath(fp.name)
preinit()
filename_ext = os.path.splitext(filename)[1].lower()
ext = filename_ext.decode() if isinstance(filename_ext, bytes) else filename_ext
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError as e:
msg = f"unknown file extension: {ext}"
raise ValueError(msg) from e
from . import ImageFile
# may mutate self!
if isinstance(self, ImageFile.ImageFile) and os.path.abspath(
filename
) == os.path.abspath(self.filename):
self._ensure_mutable()
else:
self.load()
save_all = params.pop("save_all", None)
self._default_encoderinfo = params
encoderinfo = getattr(self, "encoderinfo", {})
self._attach_default_encoderinfo(self)
self.encoderconfig: tuple[Any, ...] = ()
if format.upper() not in SAVE:
init()
if save_all or (
save_all is None
and params.get("append_images")
and format.upper() in SAVE_ALL
):
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
created = False
if open_fp:
created = not os.path.exists(filename)
if params.get("append", False):
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "r+b")
else:
fp = builtins.open(filename, "w+b")
else:
fp = cast(IO[bytes], fp)
try:
save_handler(self, fp, filename)
except Exception:
if open_fp:
fp.close()
if created:
try:
os.remove(filename)
except PermissionError:
pass
raise
finally:
self.encoderinfo = encoderinfo
if open_fp:
fp.close()
def _attach_default_encoderinfo(self, im: Image) -> dict[str, Any]:
encoderinfo = getattr(self, "encoderinfo", {})
self.encoderinfo = {**im._default_encoderinfo, **encoderinfo}
return encoderinfo
def seek(self, frame: int) -> None:
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
``EOFError`` exception. When a sequence file is opened, the
library automatically seeks to frame 0.
See :py:meth:`~PIL.Image.Image.tell`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
msg = "no more images in file"
raise EOFError(msg)
def show(self, title: str | None = None) -> None:
"""
Displays this image. This method is mainly intended for debugging purposes.
This method calls :py:func:`PIL.ImageShow.show` internally. You can use
:py:func:`PIL.ImageShow.register` to override its default behaviour.
The image is first saved to a temporary file. By default, it will be in
PNG format.
On Unix, the image is then opened using the **xdg-open**, **display**,
**gm**, **eog** or **xv** utility, depending on which one can be found.
On macOS, the image is opened with the native Preview application.
On Windows, the image is opened with the standard PNG display utility.
:param title: Optional title to use for the image window, where possible.
"""
from . import ImageShow
ImageShow.show(self, title)
def split(self) -> tuple[Image, ...]:
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
If you need only one band, :py:meth:`~PIL.Image.Image.getchannel`
method can be more convenient and faster.
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
return (self.copy(),)
return tuple(map(self._new, self.im.split()))
def getchannel(self, channel: int | str) -> Image:
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
self.load()
if isinstance(channel, str):
try:
channel = self.getbands().index(channel)
except ValueError as e:
msg = f'The image has no channel "{channel}"'
raise ValueError(msg) from e
return self._new(self.im.getband(channel))
def tell(self) -> int:
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(
self,
size: tuple[float, float],
resample: Resampling = Resampling.BICUBIC,
reducing_gap: float | None = 2.0,
) -> None:
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: Optional resampling filter. This can be one
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
provided_size = tuple(map(math.floor, size))
def preserve_aspect_ratio() -> tuple[int, int] | None:
def round_aspect(number: float, key: Callable[[int], float]) -> int:
return max(min(math.floor(number), math.ceil(number), key=key), 1)
x, y = provided_size
if x >= self.width and y >= self.height:
return None
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
return x, y
preserved_size = preserve_aspect_ratio()
if preserved_size is None:
return
final_size = preserved_size
box = None
if reducing_gap is not None:
res = self.draft(
None, (int(size[0] * reducing_gap), int(size[1] * reducing_gap))
)
if res is not None:
box = res[1]
if self.size != final_size:
im = self.resize(final_size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = final_size
self._mode = self.im.mode
self.readonly = 0
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(
self,
size: tuple[int, int],
method: Transform | ImageTransformHandler | SupportsGetData,
data: Sequence[Any] | None = None,
resample: int = Resampling.NEAREST,
fill: int = 1,
fillcolor: float | tuple[float, ...] | str | None = None,
) -> Image:
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size in pixels, as a 2-tuple:
(width, height).
:param method: The transformation method. This is one of
:py:data:`Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(self, size, data, resample, fill=1):
# Return result
Implementations of :py:class:`~PIL.Image.ImageTransformHandler`
for some of the :py:class:`Transform` methods are provided
in :py:mod:`~PIL.ImageTransform`.
It may also be an object with a ``method.getdata`` method
that returns a tuple supplying new ``method`` and ``data`` values::
class Example:
def getdata(self):
method = Image.Transform.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST:
return (
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
.transform(size, method, data, resample, fill, fillcolor)
.convert(self.mode)
)
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
msg = "missing method data"
raise ValueError(msg)
im = new(self.mode, size, fillcolor)
if self.mode == "P" and self.palette:
im.palette = self.palette.copy()
im.info = self.info.copy()
if method == Transform.MESH:
# list of quads
for box, quad in data:
im.__transformer(
box, self, Transform.QUAD, quad, resample, fillcolor is None
)
else:
im.__transformer(
(0, 0) + size, self, method, data, resample, fillcolor is None
)
return im
def __transformer(
self,
box: tuple[int, int, int, int],
image: Image,
method: Transform,
data: Sequence[float],
resample: int = Resampling.NEAREST,
fill: bool = True,
) -> None:
w = box[2] - box[0]
h = box[3] - box[1]
if method == Transform.AFFINE:
data = data[:6]
elif method == Transform.EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = (x1 - x0) / w
ys = (y1 - y0) / h
method = Transform.AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == Transform.PERSPECTIVE:
data = data[:8]
elif method == Transform.QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (
x0,
(ne[0] - x0) * As,
(sw[0] - x0) * At,
(se[0] - sw[0] - ne[0] + x0) * As * At,
y0,
(ne[1] - y0) * As,
(sw[1] - y0) * At,
(se[1] - sw[1] - ne[1] + y0) * As * At,
)
else:
msg = "unknown transformation method"
raise ValueError(msg)
if resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
):
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS):
unusable: dict[int, str] = {
Resampling.BOX: "Image.Resampling.BOX",
Resampling.HAMMING: "Image.Resampling.HAMMING",
Resampling.LANCZOS: "Image.Resampling.LANCZOS",
}
msg = unusable[resample] + f" ({resample}) cannot be used."
else:
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
)
]
msg += f" Use {', '.join(filters[:-1])} or {filters[-1]}"
raise ValueError(msg)
image.load()
self.load()
if image.mode in ("1", "P"):
resample = Resampling.NEAREST
self.im.transform(box, image.im, method, data, resample, fill)
def transpose(self, method: Transpose) -> Image:
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance: int) -> Image:
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self) -> ImageQt.ImageQt:
"""Returns a QImage copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqimage(self)
def toqpixmap(self) -> ImageQt.QPixmap:
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqpixmap(self)
# --------------------------------------------------------------------
# Abstract handlers.
| Image |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/non_slot_assignment.py | {
"start": 220,
"end": 462
} | class ____:
__slots__ = ("name", "surname")
def __init__(self, name, middle_name):
self.name = name
self.middle_name = middle_name # [assigning-non-slot]
self.setup()
def setup(self):
pass
| StudentB |
python | getsentry__sentry | tests/sentry/tasks/test_statistical_detectors.py | {
"start": 41906,
"end": 54559
} | class ____(ProfilesSnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.now = before_now(minutes=10)
self.hour_ago = (self.now - timedelta(hours=1)).replace(minute=0, second=0, microsecond=0)
self.projects = [
self.create_project(organization=self.organization, teams=[self.team], name="Foo"),
self.create_project(organization=self.organization, teams=[self.team], name="Bar"),
]
self.transaction_functions = []
self.continuous_functions = []
for project in self.projects:
stored = self.store_functions(
[
{
"self_times_ns": [100_000_000 for _ in range(10)],
"package": "foo",
"function": "foo",
# only in app functions should
# appear in the results
"in_app": True,
},
{
# this function has a lower count, so `foo` is prioritized
"self_times_ns": [200_000_000 for _ in range(20)],
"package": "baz",
"function": "baz",
# only in app functions should
# appear in the results
"in_app": True,
},
{
"self_times_ns": [300_000_000 for _ in range(30)],
"package": "qux",
"function": "qux",
# non in app functions should not
# appear in the results
"in_app": False,
},
],
project=project,
timestamp=self.hour_ago,
)
self.transaction_functions.append(stored)
stored = self.store_functions_chunk(
[
{
"self_times_ns": [100_000_000 for _ in range(10)],
"package": "bar",
"function": "bar",
"thread_id": "1",
# only in app functions should
# appear in the results
"in_app": True,
},
{
"self_times_ns": [200_000_000 for _ in range(20)],
"package": "baz",
"function": "baz",
"thread_id": "2",
# only in app functions should
# appear in the results
"in_app": True,
},
{
"self_times_ns": [300_000_000 for _ in range(30)],
"package": "qux",
"function": "qux",
"thread_id": "3",
# non in app functions should not
# appear in the results
"in_app": False,
},
],
project=project,
timestamp=self.hour_ago,
)
self.continuous_functions.append(stored)
@mock.patch("sentry.tasks.statistical_detectors.FUNCTIONS_PER_PROJECT", 1)
def test_functions_query(self) -> None:
results = query_functions(self.projects, self.now)
fingerprint = self.function_fingerprint({"package": "baz", "function": "baz"})
assert results == [
DetectorPayload(
project_id=project.id,
group=fingerprint,
fingerprint=f"{fingerprint:x}",
count=40,
value=pytest.approx(200_000_000), # type: ignore[arg-type]
timestamp=self.hour_ago,
)
for project in self.projects
]
@mock.patch("sentry.tasks.statistical_detectors.get_from_profiling_service")
def test_emit_function_regression_issue_transaction_function(
self, mock_get_from_profiling_service
):
mock_value = mock.MagicMock()
mock_value.status = 200
mock_value.data = b'{"occurrences":2}'
mock_get_from_profiling_service.return_value = mock_value
fingerprint = self.function_fingerprint({"package": "foo", "function": "foo"})
breakpoint = int((self.hour_ago - timedelta(hours=12)).timestamp())
regressions: list[BreakpointData] = [
{
"project": str(project.id),
"transaction": str(fingerprint),
"aggregate_range_1": 100_000_000,
"aggregate_range_2": 200_000_000,
"unweighted_t_value": 1.23,
"unweighted_p_value": 1.23,
"trend_percentage": 1.23,
"absolute_percentage_change": 1.23,
"trend_difference": 1.23,
"breakpoint": breakpoint,
}
for project in self.projects
]
emit_function_regression_issue(
{project.id: project for project in self.projects}, regressions, self.now
)
def get_example(stored):
return {
"profile_id": stored["transaction"]["contexts"]["profile"]["profile_id"],
}
mock_get_from_profiling_service.assert_has_calls(
[
mock.call(
method="POST",
path="/regressed",
json_data=[
{
"organization_id": self.organization.id,
"project_id": project.id,
"example": get_example(stored),
"fingerprint": fingerprint,
"absolute_percentage_change": 1.23,
"aggregate_range_1": 100_000_000,
"aggregate_range_2": 200_000_000,
"breakpoint": breakpoint,
"trend_difference": 1.23,
"trend_percentage": 1.23,
"unweighted_p_value": 1.23,
"unweighted_t_value": 1.23,
}
for project, stored in zip(self.projects, self.transaction_functions)
],
)
],
)
@mock.patch("sentry.tasks.statistical_detectors.get_from_profiling_service")
@mock.patch("sentry.tasks.statistical_detectors.raw_snql_query")
def test_emit_function_regression_issue_continuous_function(
self,
mock_raw_snql_query,
mock_get_from_profiling_service,
):
mock_raw_snql_query.return_value = {
"data": [
{
"project_id": project.id,
"profiler_id": stored["profiler_id"],
"chunk_id": stored["chunk_id"],
"start_timestamp": self.hour_ago.isoformat(),
"end_timestamp": (self.hour_ago + timedelta(microseconds=300_000)).isoformat(),
}
for project, stored in zip(
self.projects,
self.continuous_functions,
)
],
}
mock_value = mock.MagicMock()
mock_value.status = 200
mock_value.data = b'{"occurrences":2}'
mock_get_from_profiling_service.return_value = mock_value
fingerprint = self.function_fingerprint({"package": "bar", "function": "bar"})
breakpoint = int((self.hour_ago - timedelta(hours=12)).timestamp())
regressions: list[BreakpointData] = [
{
"project": str(project.id),
"transaction": str(fingerprint),
"aggregate_range_1": 100_000_000,
"aggregate_range_2": 200_000_000,
"unweighted_t_value": 1.23,
"unweighted_p_value": 1.23,
"trend_percentage": 1.23,
"absolute_percentage_change": 1.23,
"trend_difference": 1.23,
"breakpoint": breakpoint,
}
for project in self.projects
]
emit_function_regression_issue(
{project.id: project for project in self.projects}, regressions, self.now
)
def get_example(stored):
return {
"profiler_id": stored["profiler_id"],
"thread_id": "1",
"start": self.hour_ago.timestamp(),
"end": (self.hour_ago + timedelta(microseconds=300_000)).timestamp(),
"chunk_id": stored["chunk_id"],
}
mock_get_from_profiling_service.assert_has_calls(
[
mock.call(
method="POST",
path="/regressed",
json_data=[
{
"organization_id": self.organization.id,
"project_id": project.id,
"example": get_example(stored),
"fingerprint": fingerprint,
"absolute_percentage_change": 1.23,
"aggregate_range_1": 100_000_000,
"aggregate_range_2": 200_000_000,
"breakpoint": breakpoint,
"trend_difference": 1.23,
"trend_percentage": 1.23,
"unweighted_p_value": 1.23,
"unweighted_t_value": 1.23,
}
for project, stored in zip(self.projects, self.continuous_functions)
],
)
],
)
@mock.patch("sentry.tasks.statistical_detectors.get_from_profiling_service")
def test_emit_function_regression_issue_mixed(
self, mock_get_from_profiling_service: mock.MagicMock
) -> None:
mock_value = mock.MagicMock()
mock_value.status = 200
mock_value.data = b'{"occurrences":2}'
mock_get_from_profiling_service.return_value = mock_value
fingerprint = self.function_fingerprint({"package": "foo", "function": "foo"})
breakpoint = int((self.hour_ago - timedelta(hours=12)).timestamp())
regressions: list[BreakpointData] = [
{
"project": str(project.id),
"transaction": str(fingerprint),
"aggregate_range_1": 100_000_000,
"aggregate_range_2": 200_000_000,
"unweighted_t_value": 1.23,
"unweighted_p_value": 1.23,
"trend_percentage": 1.23,
"absolute_percentage_change": 1.23,
"trend_difference": 1.23,
"breakpoint": breakpoint,
}
for project in self.projects
]
emit_function_regression_issue(
{project.id: project for project in self.projects}, regressions, self.now
)
def get_example(stored):
# when the 2 modes are mixed, we try to prefer transaction based profiles
return {
"profile_id": stored["transaction"]["contexts"]["profile"]["profile_id"],
}
mock_get_from_profiling_service.assert_has_calls(
[
mock.call(
method="POST",
path="/regressed",
json_data=[
{
"organization_id": self.organization.id,
"project_id": project.id,
"example": get_example(stored),
"fingerprint": fingerprint,
"absolute_percentage_change": 1.23,
"aggregate_range_1": 100_000_000,
"aggregate_range_2": 200_000_000,
"breakpoint": breakpoint,
"trend_difference": 1.23,
"trend_percentage": 1.23,
"unweighted_p_value": 1.23,
"unweighted_t_value": 1.23,
}
for project, stored in zip(self.projects, self.transaction_functions)
],
)
],
)
@pytest.mark.sentry_metrics
| FunctionsTasksTest |
python | spack__spack | lib/spack/spack/fetch_strategy.py | {
"start": 22227,
"end": 23100
} | class ____(URLFetchStrategy):
def __init__(self, *, url: str, checksum: Optional[str] = None, **kwargs):
super().__init__(url=url, checksum=checksum, **kwargs)
self._urlopen = kwargs.get("_urlopen", spack.oci.opener.urlopen)
@_needs_stage
def fetch(self):
file = self.stage.save_filename
if os.path.lexists(file):
os.remove(file)
try:
response = self._urlopen(self.url)
tty.msg(f"Fetching {self.url}")
with open(file, "wb") as f:
shutil.copyfileobj(response, f)
except OSError as e:
# clean up archive on failure.
if self.archive_file:
os.remove(self.archive_file)
if os.path.lexists(file):
os.remove(file)
raise FailedDownloadError(e) from e
| OCIRegistryFetchStrategy |
python | tensorflow__tensorflow | tensorflow/python/eager/context.py | {
"start": 7537,
"end": 9908
} | class ____:
"""Options applied at call sites of eager functions.
Eager functions are functions decorated with tf.contrib.eager.defun.
"""
__slots__ = ["_config_proto_serialized", "_executor_type"]
def __init__(self, executor_type=None, config_proto=None):
"""Constructor.
Args:
executor_type: (optional) name of the executor to be used to execute the
eager function. If None or an empty string, the default Tensorflow
executor will be used.
config_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized
string of that proto. The config used by Grappler when optimizing the
function graph. Each concrete function is optimized the first time is
called. Changing config_proto after the first call has no effect. If
config_proto is None, an empty RewriterConfig will be used.
"""
self.config_proto_serialized = config_proto
self.executor_type = executor_type
@property
def executor_type(self):
return self._executor_type
@executor_type.setter
def executor_type(self, executor_type):
self._executor_type = executor_type
@property
def config_proto_serialized(self):
return self._config_proto_serialized
@config_proto_serialized.setter
def config_proto_serialized(self, config):
if isinstance(config, config_pb2.ConfigProto):
self._config_proto_serialized = config.SerializeToString(
deterministic=True
)
elif isinstance(config, str):
self._config_proto_serialized = config
elif config is None:
self._config_proto_serialized = (
config_pb2.ConfigProto().SerializeToString()
)
else:
raise ValueError(
"the rewriter config must be either a "
"config_pb2.ConfigProto, or a serialized string of that "
"proto or None. got: {}".format(type(config))
)
def as_attrs(self):
if self.config_proto_serialized is None:
config = function_utils.get_disabled_rewriter_config()
else:
config = self.config_proto_serialized
executor_type = self.executor_type or ""
return {"executor_type": executor_type, "config_proto": config}
# Map from context_id (an int) to _TensorCaches.
# Dicts are thread safe in CPython.
# TODO(iga): Remove this once TensorCaches are moved to C++.
_tensor_caches_map = {}
| FunctionCallOptions |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 502950,
"end": 503267
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("ProjectNext", graphql_name="node")
| ProjectNextEdge |
python | patrick-kidger__equinox | equinox/internal/_getkey.py | {
"start": 392,
"end": 1088
} | class ____:
"""Designed for use as a fixture in tests.
!!! Example
```python
# tests/conftest.py
@pytest.fixture
def getkey():
return eqxi.GetKey()
```
Do not use this in any other context; the random seed generation gives deliberate
non-determinism.
"""
seed: int
call: int
key: PRNGKeyArray
def __init__(self, seed: int | None = EQX_GETKEY_SEED):
if seed is None:
seed = random.randint(0, 2**31 - 1)
self.seed = seed
self.call = 0
self.key = jr.PRNGKey(seed)
def __call__(self):
self.call += 1
return jr.fold_in(self.key, self.call)
| GetKey |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/hash_test4/package.py | {
"start": 217,
"end": 687
} | class ____(Package):
"""This package isn't compared with others, but it contains constructs
that package hashing logic has tripped over in the past.
"""
homepage = "http://www.hashtest4.org"
url = "http://www.hashtest1.org/downloads/hashtest4-1.1.tar.bz2"
version("1.1", md5="a" * 32)
def install(self, spec, prefix):
pass
@staticmethod
def examine_prefix(pkg):
pass
run_after("install")(examine_prefix)
| HashTest4 |
python | wandb__wandb | wandb/sdk/data_types/audio.py | {
"start": 302,
"end": 6385
} | class ____(BatchableMedia):
"""W&B class for audio clips."""
_log_type = "audio-file"
def __init__(
self,
data_or_path: Union[
str,
pathlib.Path,
list,
"np.ndarray",
],
sample_rate: Optional[int] = None,
caption: Optional[str] = None,
):
"""Accept a path to an audio file or a numpy array of audio data.
Args:
data_or_path: A path to an audio file or a NumPy array of audio data.
sample_rate: Sample rate, required when passing in raw NumPy array of audio data.
caption: Caption to display with audio.
"""
super().__init__(caption=caption)
self._duration = None
self._sample_rate = sample_rate
if isinstance(data_or_path, (str, pathlib.Path)):
data_or_path = str(data_or_path)
if self.path_is_reference(data_or_path):
self._path = data_or_path
self._sha256 = hashlib.sha256(data_or_path.encode("utf-8")).hexdigest()
self._is_tmp = False
else:
self._set_file(data_or_path, is_tmp=False)
else:
if sample_rate is None:
raise ValueError(
'Argument "sample_rate" is required when instantiating wandb.Audio with raw data.'
)
soundfile = util.get_module(
"soundfile",
required='Raw audio requires the soundfile package. To get it, run "pip install soundfile"',
)
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".wav")
soundfile.write(tmp_path, data_or_path, sample_rate)
self._duration = len(data_or_path) / float(sample_rate)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def get_media_subdir(cls):
"""Get media subdirectory.
<!-- lazydoc-ignore-classmethod: internal -->
"""
return os.path.join("media", "audio")
@classmethod
def from_json(cls, json_obj, source_artifact):
"""Deserialize JSON object into it's class representation.
<!-- lazydoc-ignore-classmethod: internal -->
"""
return cls(
source_artifact.get_entry(json_obj["path"]).download(),
caption=json_obj["caption"],
)
def bind_to_run(
self, run, key, step, id_=None, ignore_copy_err: Optional[bool] = None
):
"""Bind this object to a run.
<!-- lazydoc-ignore: internal -->
"""
if self.path_is_reference(self._path):
raise ValueError(
"Audio media created by a reference to external storage cannot currently be added to a run"
)
return super().bind_to_run(run, key, step, id_, ignore_copy_err)
def to_json(self, run):
"""Returns the JSON representation expected by the backend.
<!-- lazydoc-ignore: internal -->
"""
json_dict = super().to_json(run)
json_dict.update(
{
"_type": self._log_type,
}
)
return json_dict
@classmethod
def seq_to_json(cls, seq, run, key, step):
"""Convert a sequence of Audio objects to a JSON representation.
<!-- lazydoc-ignore-classmethod: internal -->
"""
audio_list = list(seq)
util.get_module(
"soundfile",
required="wandb.Audio requires the soundfile package. To get it, run: pip install soundfile",
)
base_path = os.path.join(run.dir, "media", "audio")
filesystem.mkdir_exists_ok(base_path)
meta = {
"_type": "audio",
"count": len(audio_list),
"audio": [a.to_json(run) for a in audio_list],
}
sample_rates = cls.sample_rates(audio_list)
if sample_rates:
meta["sampleRates"] = sample_rates
durations = cls.durations(audio_list)
if durations:
meta["durations"] = durations
captions = cls.captions(audio_list)
if captions:
meta["captions"] = captions
return meta
@classmethod
def durations(cls, audio_list):
"""Calculate the duration of the audio files."""
return [a._duration for a in audio_list]
@classmethod
def sample_rates(cls, audio_list):
"""Get sample rates of the audio files."""
return [a._sample_rate for a in audio_list]
@classmethod
def captions(cls, audio_list):
"""Get the captions of the audio files.
<!-- lazydoc-ignore-classmethod: internal -->
"""
captions = [a._caption for a in audio_list]
if all(c is None for c in captions):
return False
else:
return ["" if c is None else c for c in captions]
def resolve_ref(self):
"""Resolve the reference to the actual file path.
<!-- lazydoc-ignore: internal -->
"""
if self.path_is_reference(self._path):
# this object was already created using a ref:
return self._path
source_artifact = self._artifact_source.artifact
resolved_name = source_artifact._local_path_to_name(self._path)
if resolved_name is not None:
target_entry = source_artifact.manifest.get_entry_by_path(resolved_name)
if target_entry is not None:
return target_entry.ref
return None
def __eq__(self, other):
if self.path_is_reference(self._path) or self.path_is_reference(other._path):
# one or more of these objects is an unresolved reference -- we'll compare
# their reference paths instead of their SHAs:
return (
self.resolve_ref() == other.resolve_ref()
and self._caption == other._caption
)
return super().__eq__(other) and self._caption == other._caption
def __ne__(self, other):
return not self.__eq__(other)
| Audio |
python | optuna__optuna | tests/storages_tests/rdb_tests/test_models.py | {
"start": 16452,
"end": 16888
} | class ____:
@staticmethod
def test_version_info_id_constraint(session: Session) -> None:
session.add(VersionInfoModel(schema_version=1, library_version="0.0.1"))
session.commit()
# Test check constraint of version_info_id.
session.add(VersionInfoModel(version_info_id=2, schema_version=2, library_version="0.0.2"))
pytest.raises(IntegrityError, lambda: session.commit())
| TestVersionInfoModel |
python | doocs__leetcode | lcci/08.10.Color Fill/Solution.py | {
"start": 0,
"end": 606
} | class ____:
def floodFill(
self, image: List[List[int]], sr: int, sc: int, newColor: int
) -> List[List[int]]:
def dfs(i, j):
if (
not 0 <= i < m
or not 0 <= j < n
or image[i][j] != oc
or image[i][j] == newColor
):
return
image[i][j] = newColor
for a, b in pairwise(dirs):
dfs(i + a, j + b)
dirs = (-1, 0, 1, 0, -1)
m, n = len(image), len(image[0])
oc = image[sr][sc]
dfs(sr, sc)
return image
| Solution |
python | django__django | django/db/models/functions/window.py | {
"start": 2622,
"end": 2727
} | class ____(Func):
function = "RANK"
output_field = IntegerField()
window_compatible = True
| Rank |
python | has2k1__plotnine | plotnine/composition/_compose.py | {
"start": 877,
"end": 14236
} | class ____:
"""
Base class for those that create plot compositions
As a user, you will never directly work with this class, except
through the operators that it makes possible.
The operators are of two kinds:
### 1. Composing Operators
The combine plots or compositions into a single composition.
Both operands are either a plot or a composition.
`/`
: Arrange operands side by side.
Powered by the subclass [](`~plotnine.composition.Beside`).
`|`
: Arrange operands vertically.
Powered by the subclass [](`~plotnine.composition.Stack`).
`-`
: Arrange operands side by side _and_ at the same nesting level.
Also powered by the subclass [](`~plotnine.composition.Beside`).
`+`
: Arrange operands in a 2D grid.
Powered by the subclass [](`~plotnine.composition.Wrap`).
### 2. Plot Modifying Operators
The modify all or some of the plots in a composition.
The left operand is a composition and the right operand is a
_plotaddable_; any object that can be added to a `ggplot` object
e.g. _geoms_, _stats_, _themes_, _facets_, ... .
`&`
: Add right hand side to all plots in the composition.
`*`
: Add right hand side to all plots in the top-most nesting
level of the composition.
`+`
: Add right hand side to the last plot in the composition.
See Also
--------
plotnine.composition.Beside : To arrange plots side by side
plotnine.composition.Stack : To arrange plots vertically
plotnine.composition.Stack : To arrange in a grid
plotnine.composition.plot_spacer : To add a blank space between plots
"""
items: list[ggplot | Compose]
"""
The objects to be arranged (composed)
"""
_layout: plot_layout = field(
init=False, repr=False, default_factory=plot_layout
)
"""
Every composition gets initiated with an empty plot_layout whose
attributes are either dynamically generated before the composition
is drawn, or they are overwritten by a layout added by the user.
"""
# These are created in the _create_figure method
figure: Figure = field(init=False, repr=False)
plotspecs: list[plotspec] = field(init=False, repr=False)
gridspec: p9GridSpec = field(init=False, repr=False)
def __post_init__(self):
# The way we handle the plots has consequences that would
# prevent having a duplicate plot in the composition.
# Using copies prevents this.
self.items = [
op if isinstance(op, Compose) else deepcopy(op)
for op in self.items
]
def __repr__(self):
"""
repr
Notes
-----
Subclasses that are dataclasses should be declared with
`@dataclass(repr=False)`.
"""
# knitr relies on __repr__ to automatically print the last object
# in a cell.
if is_knitr_engine():
self.show()
return ""
return super().__repr__()
@property
def layout(self) -> plot_layout:
"""
The plot_layout of this composition
"""
return self._layout
@layout.setter
def layout(self, value: plot_layout):
"""
Add (or merge) a plot_layout to this composition
"""
self._layout = copy(self.layout)
self._layout.update(value)
@property
def nrow(self) -> int:
return cast("int", self.layout.nrow)
@property
def ncol(self) -> int:
return cast("int", self.layout.ncol)
@abc.abstractmethod
def __or__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a column
"""
@abc.abstractmethod
def __truediv__(self, rhs: ggplot | Compose) -> Compose:
"""
Add rhs as a row
"""
def __add__(
self,
rhs: ggplot | Compose | PlotAddable | ComposeAddable,
) -> Compose:
"""
Add rhs to the composition
Parameters
----------
rhs:
What to add to the composition
"""
from plotnine import ggplot
self = deepcopy(self)
if isinstance(rhs, ComposeAddable):
return rhs.__radd__(self)
elif not isinstance(rhs, (ggplot, Compose)):
self.last_plot = self.last_plot + rhs
return self
t1, t2 = type(self).__name__, type(rhs).__name__
msg = f"unsupported operand type(s) for +: '{t1}' and '{t2}'"
raise TypeError(msg)
def __sub__(self, rhs: ggplot | Compose) -> Compose:
"""
Add the rhs onto the composition
Parameters
----------
rhs:
What to place besides the composition
"""
from plotnine import ggplot
from . import Beside
if not isinstance(rhs, (ggplot, Compose)):
t1, t2 = type(self).__name__, type(rhs).__name__
msg = f"unsupported operand type(s) for -: '{t1}' and '{t2}'"
raise TypeError(msg)
return Beside([self, rhs])
def __and__(self, rhs: PlotAddable) -> Compose:
"""
Add rhs to all plots in the composition
Parameters
----------
rhs:
What to add.
"""
self = deepcopy(self)
for i, item in enumerate(self):
if isinstance(item, Compose):
self[i] = item & rhs
else:
item += copy(rhs)
return self
def __mul__(self, rhs: PlotAddable) -> Compose:
"""
Add rhs to the outermost nesting level of the composition
Parameters
----------
rhs:
What to add.
"""
from plotnine import ggplot
self = deepcopy(self)
for item in self:
if isinstance(item, ggplot):
item += copy(rhs)
return self
def __len__(self) -> int:
"""
Number of operands
"""
return len(self.items)
def __iter__(self) -> Iterator[ggplot | Compose]:
"""
Return an iterable of all the items
"""
return iter(self.items)
@overload
def __getitem__(self, index: int) -> ggplot | Compose: ...
@overload
def __getitem__(self, index: slice) -> list[ggplot | Compose]: ...
def __getitem__(
self,
index: int | slice,
) -> ggplot | Compose | list[ggplot | Compose]:
return self.items[index]
def __setitem__(self, key, value):
self.items[key] = value
def _repr_mimebundle_(self, include=None, exclude=None) -> MimeBundle:
"""
Return dynamic MIME bundle for composition display
"""
ip = get_ipython()
format: FigureFormat = (
get_option("figure_format")
or (ip and ip.config.InlineBackend.get("figure_format"))
or "retina"
)
if format == "retina":
self = deepcopy(self)
self._to_retina()
buf = BytesIO()
self.save(buf, "png" if format == "retina" else format)
figure_size_px = self.last_plot.theme._figure_size_px
return get_mimebundle(buf.getvalue(), format, figure_size_px)
@property
def last_plot(self) -> ggplot:
"""
Last plot added to the composition
"""
from plotnine import ggplot
last_operand = self.items[-1]
if isinstance(last_operand, ggplot):
return last_operand
else:
return last_operand.last_plot
@last_plot.setter
def last_plot(self, plot: ggplot):
"""
Replace the last plot in the composition
"""
from plotnine import ggplot
last_operand = self.items[-1]
if isinstance(last_operand, ggplot):
self.items[-1] = plot
else:
last_operand.last_plot = plot
def __deepcopy__(self, memo):
"""
Deep copy without copying the figure
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
old = self.__dict__
new = result.__dict__
shallow = {"figure", "gridsspec", "__copy"}
for key, item in old.items():
if key in shallow:
new[key] = item
memo[id(new[key])] = new[key]
else:
new[key] = deepcopy(item, memo)
old["__copy"] = result
return result
def _to_retina(self):
from plotnine import ggplot
for item in self:
if isinstance(item, ggplot):
item.theme = item.theme.to_retina()
else:
item._to_retina()
def _create_gridspec(self, figure, nest_into):
"""
Create the gridspec for this composition
"""
from plotnine._mpl.gridspec import p9GridSpec
self.layout._setup(self)
self.gridspec = p9GridSpec.from_layout(
self.layout, figure=figure, nest_into=nest_into
)
def _setup(self) -> Figure:
"""
Setup this instance for the building process
"""
if not hasattr(self, "figure"):
self._create_figure()
return self.figure
def _create_figure(self):
import matplotlib.pyplot as plt
from plotnine import ggplot
from plotnine._mpl.gridspec import p9GridSpec
def _make_plotspecs(
cmp: Compose, parent_gridspec: p9GridSpec | None
) -> Generator[plotspec]:
"""
Return the plot specification for each subplot in the composition
"""
# This gridspec contains a composition group e.g.
# (p2 | p3) of p1 | (p2 | p3)
ss_or_none = parent_gridspec[0] if parent_gridspec else None
cmp._create_gridspec(self.figure, ss_or_none)
# Each subplot in the composition will contain one of:
# 1. A plot
# 2. A plot composition
# 3. Nothing
# Iterating over the gridspec yields the SubplotSpecs for each
# "subplot" in the grid. The SubplotSpec is the handle that
# allows us to set it up for a plot or to nest another gridspec
# in it.
for item, subplot_spec in zip(cmp, cmp.gridspec):
if isinstance(item, ggplot):
yield plotspec(
item,
self.figure,
cmp.gridspec,
subplot_spec,
p9GridSpec(1, 1, self.figure, nest_into=subplot_spec),
)
elif item:
yield from _make_plotspecs(
item,
p9GridSpec(1, 1, self.figure, nest_into=subplot_spec),
)
self.figure = plt.figure()
self.plotspecs = list(_make_plotspecs(self, None))
def _draw_plots(self):
"""
Draw all plots in the composition
"""
for ps in self.plotspecs:
ps.plot.draw()
def show(self):
"""
Display plot in the cells output
This function is called for its side-effects.
"""
# Prevent against any modifications to the users
# ggplot object. Do the copy here as we may/may not
# assign a default theme
self = deepcopy(self)
if is_inline_backend() or is_quarto_environment():
from IPython.display import display
data, metadata = self._repr_mimebundle_()
display(data, metadata=metadata, raw=True)
else:
self.draw(show=True)
def draw(self, *, show: bool = False) -> Figure:
"""
Render the arranged plots
Parameters
----------
show :
Whether to show the plot.
Returns
-------
:
Matplotlib figure
"""
from .._mpl.layout_manager import PlotnineCompositionLayoutEngine
with plot_composition_context(self, show):
figure = self._setup()
self._draw_plots()
figure.set_layout_engine(PlotnineCompositionLayoutEngine(self))
return figure
def save(
self,
filename: str | Path | BytesIO,
format: str | None = None,
dpi: int | None = None,
**kwargs,
):
"""
Save a composition as an image file
Parameters
----------
filename :
File name to write the plot to. If not specified, a name
format :
Image format to use, automatically extract from
file name extension.
dpi :
DPI to use for raster graphics. If None, defaults to using
the `dpi` of theme to the first plot.
**kwargs :
These are ignored. Here to "softly" match the API of
`ggplot.save()`.
"""
from plotnine import theme
# To set the dpi, we only need to change the dpi of
# the last plot and theme gets added to the last plot
plot = (self + theme(dpi=dpi)) if dpi else self
figure = plot.draw()
figure.savefig(filename, format=format)
| Compose |
python | getsentry__sentry | src/sentry/incidents/models/incident.py | {
"start": 5593,
"end": 8391
} | class ____(Model):
"""
An Incident represents the overarching period during an AlertRule's "unhealthy" state.
An AlertRule can have multiple IncidentTriggers during an Incident (ie. Critical -> Warning -> Critical)
but if it has been resolved, will end the Incident.
An AlertRule may have multiple Incidents that correlate with different subscriptions.
TODO:
- UI should be able to handle multiple active incidents
"""
__relocation_scope__ = RelocationScope.Global
objects: ClassVar[IncidentManager] = IncidentManager()
organization = FlexibleForeignKey("sentry.Organization")
projects = models.ManyToManyField(
"sentry.Project", related_name="incidents", through=IncidentProject
)
alert_rule = FlexibleForeignKey("sentry.AlertRule", on_delete=models.PROTECT)
# Incrementing id that is specific to the org.
identifier = models.IntegerField()
# Identifier used to match incoming events from the detection algorithm
detection_uuid = UUIDField(null=True, db_index=True)
status = models.PositiveSmallIntegerField(default=IncidentStatus.OPEN.value)
status_method = models.PositiveSmallIntegerField(
default=IncidentStatusMethod.RULE_TRIGGERED.value
)
type = models.PositiveSmallIntegerField()
title = models.TextField()
# When we suspect the incident actually started
date_started = models.DateTimeField(default=timezone.now)
# When we actually detected the incident
date_detected = models.DateTimeField(default=timezone.now)
date_added = models.DateTimeField(default=timezone.now)
date_closed = models.DateTimeField(null=True)
subscription = FlexibleForeignKey(
"sentry.QuerySubscription", on_delete=models.SET_NULL, null=True
)
class Meta:
app_label = "sentry"
db_table = "sentry_incident"
unique_together = (("organization", "identifier"),)
indexes = (models.Index(fields=("alert_rule", "type", "status")),)
@property
def current_end_date(self) -> datetime:
"""
Returns the current end of the incident. Either the date it was closed,
or the current time if it's still open.
"""
return self.date_closed if self.date_closed else timezone.now()
@property
def duration(self):
return self.current_end_date - self.date_started
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, scope: ImportScope, flags: ImportFlags
) -> int | None:
old_pk = super().normalize_before_relocation_import(pk_map, scope, flags)
if old_pk is None:
return None
# Generate a new UUID, if one exists.
if self.detection_uuid:
self.detection_uuid = uuid4()
return old_pk
| Incident |
python | pandas-dev__pandas | asv_bench/benchmarks/categoricals.py | {
"start": 9348,
"end": 9776
} | class ____:
def setup(self):
N = 10**5
self.ci = pd.CategoricalIndex(np.arange(N)).sort_values()
self.c = self.ci.values
self.key = self.ci.categories[1]
def time_categorical_index_contains(self):
self.ci.searchsorted(self.key)
def time_categorical_contains(self):
self.c.searchsorted(self.key)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| SearchSorted |
python | gevent__gevent | src/gevent/tests/test__socket.py | {
"start": 20635,
"end": 22637
} | class ____(greentest.TestCase):
@greentest.ignores_leakcheck
# Creating new types in the function takes a cycle to cleanup.
def test_wait_timeout(self):
# Issue #635
from gevent import socket as gsocket
class io(object):
callback = None
def start(self, *_args):
gevent.sleep(10)
with self.assertRaises(gsocket.timeout):
gsocket.wait(io(), timeout=0.01) # pylint:disable=no-member
def test_signatures(self):
# https://github.com/gevent/gevent/issues/960
exclude = []
if greentest.PYPY:
# Up through at least PyPy 5.7.1, they define these as
# gethostbyname(host), whereas the official CPython argument name
# is hostname. But cpython doesn't allow calling with keyword args.
# Likewise for gethostbyaddr: PyPy uses host, cpython uses ip_address
exclude.append('gethostbyname')
exclude.append('gethostbyname_ex')
exclude.append('gethostbyaddr')
if sys.version_info[:2] < (3, 11):
# 3.11+ add ``*, all_errors=False``. We allow that on all versions,
# forcing it to a false value if the user sends a true value before
# exception groups exist.
exclude.append('create_connection')
self.assertMonkeyPatchedFuncSignatures('socket', exclude=exclude)
def test_resolve_ipv6_scope_id(self):
from gevent import _socketcommon as SC
if not SC.__socket__.has_ipv6:
self.skipTest("Needs IPv6") # pragma: no cover
if not hasattr(SC.__socket__, 'inet_pton'):
self.skipTest("Needs inet_pton") # pragma: no cover
# A valid IPv6 address, with a scope.
addr = ('2607:f8b0:4000:80e::200e', 80, 0, 9)
# Mock socket
class sock(object):
family = SC.AF_INET6 # pylint:disable=no-member
self.assertIs(addr, SC._resolve_addr(sock, addr))
| TestFunctions |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/methods/test_round.py | {
"start": 223,
"end": 7846
} | class ____:
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("YE", "<YearEnd: month=12> is a non-fixed frequency"),
("ME", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture, unit):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz, unit=unit)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz),
Timestamp("2016-01-01 00:00:00", tz=tz),
Timestamp("2016-01-01 01:00:00", tz=tz),
Timestamp("2016-01-01 02:00:00", tz=tz),
Timestamp("2016-01-01 02:00:00", tz=tz),
]
).as_unit(unit)
expected_elt = expected_rng[1]
result = rng.round(freq="h")
tm.assert_index_equal(result, expected_rng)
assert elt.round(freq="h") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="ME")
with pytest.raises(ValueError, match=msg):
elt.round(freq="ME")
def test_round2(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz).as_unit("ns")
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz).as_unit("ns")
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
def test_round3(self, tz_naive_fixture):
tz = tz_naive_fixture
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz).as_unit("ns")
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz).as_unit("ns")
tm.assert_index_equal(result, expected)
def test_round4(self, tz_naive_fixture):
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"], dtype="M8[ns]")
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"], dtype="M8[ns]")
tm.assert_index_equal(result, expected)
ts = "2016-10-17 12:00:00.001501031"
dti = DatetimeIndex([ts], dtype="M8[ns]")
with tm.assert_produces_warning(False):
dti.round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz, unit="ns")
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz),
Timestamp("2016-01-01 00:02:00", tz=tz),
Timestamp("2016-01-01 00:04:00", tz=tz),
Timestamp("2016-01-01 00:06:00", tz=tz),
Timestamp("2016-01-01 00:08:00", tz=tz),
]
).as_unit("ns")
result = rng.round(freq="2min")
tm.assert_index_equal(result, expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15min", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15min", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3h", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3h", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12h", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12h",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods, unit="ns")
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"floor not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"ceil not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), f"round not a {round_freq} multiple"
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (result.asi8[diff == unit // 2] % 2 == 0).all(), (
"round half to even error"
)
| TestDatetimeIndexRound |
python | tox-dev__tox | src/tox/config/source/setup_cfg.py | {
"start": 183,
"end": 598
} | class ____(IniSource):
"""Configuration sourced from a tox.ini file."""
CORE_SECTION = IniSection("tox", "tox")
FILENAME = "setup.cfg"
def __init__(self, path: Path) -> None:
super().__init__(path)
if not self._parser.has_section(self.CORE_SECTION.key):
msg = f"section {self.CORE_SECTION.key} not found"
raise ValueError(msg)
__all__ = ("SetupCfg",)
| SetupCfg |
python | ethereum__web3.py | web3/_utils/empty.py | {
"start": 38,
"end": 132
} | class ____:
def __bool__(self) -> Literal[False]:
return False
empty = Empty()
| Empty |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/llms/fake_llm.py | {
"start": 312,
"end": 1833
} | class ____(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Mapping | None = None
sequential_responses: bool | None = False
response_index: int = 0
@model_validator(mode="before")
@classmethod
def check_queries_required(cls, values: dict) -> dict:
if values.get("sequential_response") and not values.get("queries"):
msg = "queries is required when sequential_response is set to True"
raise ValueError(msg)
return values
def get_num_tokens(self, text: str) -> int:
"""Return number of tokens."""
return len(text.split())
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake"
@override
def _call(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return "foo"
return "bar"
@property
def _identifying_params(self) -> dict[str, Any]:
return {}
@property
def _get_next_response_in_sequence(self) -> str:
queries = cast("Mapping", self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
| FakeLLM |
python | huggingface__transformers | src/transformers/models/vivit/modeling_vivit.py | {
"start": 3295,
"end": 7752
} | class ____(nn.Module):
"""
Vivit Embeddings.
Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
"""
def __init__(self, config: VivitConfig):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = VivitTubeletEmbeddings(config)
self.position_embeddings = nn.Parameter(
torch.zeros(1, self.patch_embeddings.num_patches + 1, config.hidden_size)
)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.tubelet_size[1:]
self.config = config
# Adapted from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size[0]
new_width = width // self.patch_size[1]
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
batch_size, num_frames, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
cls_tokens = self.cls_token.tile([batch_size, 1, 1])
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Vivit
| VivitEmbeddings |
python | Pylons__pyramid | tests/test_registry.py | {
"start": 13740,
"end": 13807
} | class ____(Interface):
pass
@implementer(IDummyEvent)
| IDummyEvent |
python | gevent__gevent | src/gevent/libuv/watcher.py | {
"start": 28626,
"end": 28968
} | class ____(check):
_watcher_skip_ffi = True
def __make_cb(self, func):
stop = self.stop
@functools.wraps(func)
def cb(*args):
stop()
return func(*args)
return cb
def start(self, callback, *args):
return check.start(self, self.__make_cb(callback), *args)
| OneShotCheck |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/blob/resources.py | {
"start": 1245,
"end": 4337
} | class ____(ConfigurableResource):
"""Resource for interacting with Azure Blob Storage.
Examples:
.. code-block:: python
import os
from dagster import Definitions, asset, EnvVar
from dagster_azure.blob import (
AzureBlobStorageResource,
AzureBlobStorageKeyCredential,
AzureBlobStorageDefaultCredential
)
@asset
def my_table(azure_blob_storage: AzureBlobStorageResource):
with azure_blob_storage.get_client() as blob_storage_client:
response = blob_storage_client.list_containers()
Definitions(
assets=[my_table],
resources={
"azure_blob_storage": AzureBlobStorageResource(
account_url=EnvVar("AZURE_BLOB_STORAGE_ACCOUNT_URL"),
credential=AzureBlobStorageDefaultCredential() if os.getenv("DEV") else
AzureBlobStorageKeyCredential(key=EnvVar("AZURE_BLOB_STORAGE_KEY"))
),
},
)
"""
account_url: str = Field(
description=(
"The URL to the blob storage account. Any other entities included"
" in the URL path (e.g. container or blob) will be discarded. This URL can be optionally"
" authenticated with a SAS token."
),
)
credential: Union[
AzureBlobStorageKeyCredential,
AzureBlobStorageSASTokenCredential,
AzureBlobStorageDefaultCredential,
AzureBlobStorageAnonymousCredential,
] = Field(
discriminator="credential_type",
description=(
"The credential used to authenticate to the storage account. One of:"
" AzureBlobStorageSASTokenCredential,"
" AzureBlobStorageKeyCredential,"
" AzureBlobStorageDefaultCredential,"
" AzureBlobStorageAnonymousCredential"
),
)
@classmethod
def _is_dagster_maintained(cls):
return True
def _raw_credential(self) -> Any:
if self.credential.credential_type == "sas":
return self.credential.token
if self.credential.credential_type == "key":
return self.credential.key
if self.credential.credential_type == "default_azure_credential":
return DefaultAzureCredential(**self.credential.kwargs)
if self.credential.credential_type == "anonymous":
return None
raise Exception(
"Invalid credential type - use one of AzureBlobStorageKeyCredential, "
" AzureBlobStorageSASTokenCredential, AzureBlobStorageDefaultCredential,"
" AzureBlobStorageAnonymousCredential"
)
@contextmanager
def get_client(self) -> Generator[BlobServiceClient, None, None]:
service = BlobServiceClient(account_url=self.account_url, credential=self._raw_credential())
try:
yield service
finally:
service.close()
| AzureBlobStorageResource |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-google/llama_index/readers/google/gmail/base.py | {
"start": 286,
"end": 6491
} | class ____(BaseReader, BaseModel):
"""
Gmail reader.
Reads emails
Args:
max_results (int): Defaults to 10.
query (str): Gmail query. Defaults to None.
service (Any): Gmail service. Defaults to None.
results_per_page (Optional[int]): Max number of results per page. Defaults to 10.
use_iterative_parser (bool): Use iterative parser. Defaults to False.
"""
query: str = None
use_iterative_parser: bool = False
max_results: int = 10
service: Any
results_per_page: Optional[int]
def load_data(self) -> List[Document]:
"""Load emails from the user's account."""
from googleapiclient.discovery import build
credentials = self._get_credentials()
if not self.service:
self.service = build("gmail", "v1", credentials=credentials)
messages = self.search_messages()
results = []
for message in messages:
text = message.pop("body")
extra_info = message
results.append(Document(text=text, extra_info=extra_info or {}))
return results
def _get_credentials(self) -> Any:
"""
Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
import os
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
creds = None
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", SCOPES
)
creds = flow.run_local_server(port=8080)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
def search_messages(self):
query = self.query
max_results = self.max_results
if self.results_per_page:
max_results = self.results_per_page
results = (
self.service.users()
.messages()
.list(userId="me", q=query, maxResults=int(max_results))
.execute()
)
messages = results.get("messages", [])
if len(messages) < self.max_results:
# paginate if there are more results
while "nextPageToken" in results:
page_token = results["nextPageToken"]
results = (
self.service.users()
.messages()
.list(
userId="me",
q=query,
pageToken=page_token,
maxResults=int(max_results),
)
.execute()
)
messages.extend(results["messages"])
if len(messages) >= self.max_results:
break
result = []
try:
for message in messages:
message_data = self.get_message_data(message)
if not message_data:
continue
result.append(message_data)
except Exception as e:
raise Exception("Can't get message data" + str(e))
return result
def get_message_data(self, message):
message_id = message["id"]
message_data = (
self.service.users()
.messages()
.get(format="raw", userId="me", id=message_id)
.execute()
)
if self.use_iterative_parser:
body = self.extract_message_body_iterative(message_data)
else:
body = self.extract_message_body(message_data)
if not body:
return None
# https://developers.google.com/gmail/api/reference/rest/v1/users.messages
return {
"id": message_data["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"internalDate": message_data["internalDate"],
"body": body,
}
def extract_message_body_iterative(self, message: dict):
if message["raw"]:
body = base64.urlsafe_b64decode(message["raw"].encode("utf-8"))
mime_msg = email.message_from_bytes(body)
else:
mime_msg = message
body_text = ""
if mime_msg.get_content_type() == "text/plain":
plain_text = mime_msg.get_payload(decode=True)
charset = mime_msg.get_content_charset("utf-8")
body_text = plain_text.decode(charset).encode("utf-8").decode("utf-8")
elif mime_msg.get_content_maintype() == "multipart":
msg_parts = mime_msg.get_payload()
for msg_part in msg_parts:
body_text += self.extract_message_body_iterative(msg_part)
return body_text
def extract_message_body(self, message: dict):
from bs4 import BeautifulSoup
try:
body = base64.urlsafe_b64decode(message["raw"].encode("utf-8"))
mime_msg = email.message_from_bytes(body)
# If the message body contains HTML, parse it with BeautifulSoup
if "text/html" in mime_msg:
soup = BeautifulSoup(body, "html.parser")
body = soup.get_text()
return body.decode("utf-8")
except Exception as e:
raise Exception("Can't parse message body" + str(e))
if __name__ == "__main__":
reader = GmailReader(query="from:me after:2023-01-01")
print(reader.load_data())
| GmailReader |
python | scrapy__scrapy | tests/AsyncCrawlerProcess/asyncio_deferred_signal.py | {
"start": 519,
"end": 1180
} | class ____(Spider):
name = "url_spider"
start_urls = ["data:,"]
custom_settings = {
"ITEM_PIPELINES": {UppercasePipeline: 100},
}
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
ASYNCIO_EVENT_LOOP: str | None
try:
ASYNCIO_EVENT_LOOP = sys.argv[1]
except IndexError:
ASYNCIO_EVENT_LOOP = None
process = AsyncCrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP,
}
)
process.crawl(UrlSpider)
process.start()
| UrlSpider |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/lambda4.py | {
"start": 824,
"end": 899
} | class ____(Protocol):
def __call__(self, *p0: str) -> bool: ...
| Callable3 |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 36091,
"end": 40975
} | class ____(Glm4vPreTrainedModel):
config: Glm4vTextConfig
input_modalities = ("text",)
def __init__(self, config: Glm4vTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Glm4vTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Glm4vTextRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# torch.jit.trace() doesn't support cache objects in the output
if use_cache and past_key_values is None and not torch.jit.is_tracing():
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
# NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
# where each dim indicates visual spatial positions for temporal/height/width grids.
# There are two scenarios when FA2-like packed masking might be activated.
# 1. User specifically passed packed `position_ids` and no attention mask.
# In this case we expect the useer to create correct position ids for all 3 grids
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
# 2. User runs forward with no attention mask and no position ids. In this case, position ids
# are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
# prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
# text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
# If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
text_position_ids = None
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": text_position_ids,
}
# Create the masks
causal_mask = create_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| Glm4vTextModel |
python | plotly__plotly.py | plotly/graph_objs/bar/_insidetextfont.py | {
"start": 233,
"end": 17164
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar"
_path_str = "bar.insidetextfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Insidetextfont object
Sets the font used for `text` lying inside the bar.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.Insidetextfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Insidetextfont
"""
super().__init__("insidetextfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.Insidetextfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.Insidetextfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Insidetextfont |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_serverless_job.py | {
"start": 1001,
"end": 1790
} | class ____:
def setup_method(self):
self.app_id = "vzwemreks"
self.job_run_id = "job1234"
self.sensor = EmrServerlessJobSensor(
task_id="test_emrcontainer_sensor",
application_id=self.app_id,
job_run_id=self.job_run_id,
aws_conn_id="aws_default",
)
def set_get_job_run_return_value(self, return_value: dict[str, str]):
self.mock_hook = MagicMock()
self.mock_hook.conn.get_job_run.return_value = return_value
self.sensor.hook = self.mock_hook
def assert_get_job_run_was_called_once_with_app_and_run_id(self):
self.mock_hook.conn.get_job_run.assert_called_once_with(
applicationId=self.app_id, jobRunId=self.job_run_id
)
| TestEmrServerlessJobSensor |
python | astropy__astropy | astropy/coordinates/spectral_quantity.py | {
"start": 582,
"end": 12384
} | class ____(SpecificTypeQuantity):
"""
One or more value(s) with spectral units.
The spectral units should be those for frequencies, wavelengths, energies,
wavenumbers, or velocities (interpreted as Doppler velocities relative to a
rest spectral value). The advantage of using this class over the regular
`~astropy.units.Quantity` class is that in `SpectralQuantity`, the
``u.spectral`` equivalency is enabled by default (allowing automatic
conversion between spectral units), and a preferred Doppler rest value and
convention can be stored for easy conversion to/from velocities.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralQuantity`
Spectral axis data values.
unit : unit-like
Unit for the given data.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value to use for conversions from/to velocities
doppler_convention : str, optional
The convention to use when converting the spectral data to/from
velocities.
"""
_equivalent_unit = SPECTRAL_UNITS
_include_easy_conversion_members = True
def __new__(
cls, value, unit=None, doppler_rest=None, doppler_convention=None, **kwargs
):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# If we're initializing from an existing SpectralQuantity, keep any
# parameters that aren't being overridden
if doppler_rest is None:
doppler_rest = getattr(value, "doppler_rest", None)
if doppler_convention is None:
doppler_convention = getattr(value, "doppler_convention", None)
obj._doppler_rest = doppler_rest
obj._doppler_convention = doppler_convention
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._doppler_rest = getattr(obj, "_doppler_rest", None)
self._doppler_convention = getattr(obj, "_doppler_convention", None)
def __quantity_subclass__(self, unit):
# Always default to just returning a Quantity, unless we explicitly
# choose to return a SpectralQuantity - even if the units match, we
# want to avoid doing things like adding two SpectralQuantity instances
# together and getting a SpectralQuantity back
if unit is self.unit:
return SpectralQuantity, True
else:
return Quantity, False
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# We always return Quantity except in a few specific cases
result = super().__array_ufunc__(function, method, *inputs, **kwargs)
if (
(
function is np.multiply
or (function is np.true_divide and inputs[0] is self)
)
and result.unit == self.unit
) or (
function in (np.minimum, np.maximum, np.fmax, np.fmin)
and method in ("reduce", "reduceat")
):
result = result.view(self.__class__)
result.__array_finalize__(self)
else:
if result is self:
raise TypeError(
"Cannot store the result of this operation in"
f" {self.__class__.__name__}"
)
if result.dtype.kind == "b":
result = result.view(np.ndarray)
else:
result = result.view(Quantity)
return result
@property
def doppler_rest(self):
"""
The rest value of the spectrum used for transformations to/from
velocity space.
Returns
-------
`~astropy.units.Quantity` ['speed']
Rest value as an astropy `~astropy.units.Quantity` object.
"""
return self._doppler_rest
@doppler_rest.setter
@quantity_input(value=SPECTRAL_UNITS)
def doppler_rest(self, value):
"""
New rest value needed for velocity-space conversions.
Parameters
----------
value : `~astropy.units.Quantity` ['speed']
Rest value.
"""
if self._doppler_rest is not None:
raise AttributeError(
"doppler_rest has already been set, and cannot be changed. Use the"
" ``to`` method to convert the spectral values(s) to use a different"
" rest value"
)
self._doppler_rest = value
@property
def doppler_convention(self):
"""
The defined convention for conversions to/from velocity space.
Returns
-------
str
One of 'optical', 'radio', or 'relativistic' representing the
equivalency used in the unit conversions.
"""
return self._doppler_convention
@doppler_convention.setter
def doppler_convention(self, value):
"""
New velocity convention used for velocity space conversions.
Parameters
----------
value
Notes
-----
More information on the equations dictating the transformations can be
found in the astropy documentation [1]_.
References
----------
.. [1] Astropy documentation: https://docs.astropy.org/en/stable/units/equivalencies.html#spectral-doppler-equivalencies
"""
if self._doppler_convention is not None:
raise AttributeError(
"doppler_convention has already been set, and cannot be changed. Use"
" the ``to`` method to convert the spectral values(s) to use a"
" different convention"
)
if value is not None and value not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
self._doppler_convention = value
@quantity_input(doppler_rest=SPECTRAL_UNITS)
def to(self, unit, equivalencies=[], doppler_rest=None, doppler_convention=None):
"""
Return a new `~astropy.coordinates.SpectralQuantity` object with the specified unit.
By default, the ``spectral`` equivalency will be enabled, as well as
one of the Doppler equivalencies if converting to/from velocities.
Parameters
----------
unit : unit-like
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package, and should be a spectral unit.
equivalencies : list of `~astropy.units.equivalencies.Equivalency`, optional
A list of equivalence pairs to try if the units are not
directly convertible (along with spectral).
See :ref:`astropy:unit_equivalencies`.
If not provided or ``[]``, spectral equivalencies will be used.
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
doppler_rest : `~astropy.units.Quantity` ['speed'], optional
The rest value used when converting to/from velocities. This will
also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
doppler_convention : {'relativistic', 'optical', 'radio'}, optional
The Doppler convention used when converting to/from velocities.
This will also be set at an attribute on the output
`~astropy.coordinates.SpectralQuantity`.
Returns
-------
`SpectralQuantity`
New spectral coordinate object with data converted to the new unit.
"""
# Make sure units can be passed as strings
unit = Unit(unit)
# If equivalencies is explicitly set to None, we should just use the
# default Quantity.to with equivalencies also set to None
if equivalencies is None:
result = super().to(unit, equivalencies=None)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
# FIXME: need to consider case where doppler equivalency is passed in
# equivalencies list, or is u.spectral equivalency is already passed
if doppler_rest is None:
doppler_rest = self._doppler_rest
if doppler_convention is None:
doppler_convention = self._doppler_convention
elif doppler_convention not in DOPPLER_CONVENTIONS:
raise ValueError(
"doppler_convention should be one of"
f" {'/'.join(sorted(DOPPLER_CONVENTIONS))}"
)
if self.unit.is_equivalent(KMS) and unit.is_equivalent(KMS):
# Special case: if the current and final units are both velocity,
# and either the rest value or the convention are different, we
# need to convert back to frequency temporarily.
if doppler_convention is not None and self._doppler_convention is None:
raise ValueError("Original doppler_convention not set")
if doppler_rest is not None and self._doppler_rest is None:
raise ValueError("Original doppler_rest not set")
if doppler_rest is None and doppler_convention is None:
result = super().to(unit, equivalencies=equivalencies)
result = result.view(self.__class__)
result.__array_finalize__(self)
return result
elif (doppler_rest is None) is not (doppler_convention is None):
raise ValueError(
"Either both or neither doppler_rest and doppler_convention should"
" be defined for velocity conversions"
)
vel_equiv1 = DOPPLER_CONVENTIONS[self._doppler_convention](
self._doppler_rest
)
freq = super().to(si.Hz, equivalencies=equivalencies + vel_equiv1)
vel_equiv2 = DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
result = freq.to(unit, equivalencies=equivalencies + vel_equiv2)
else:
additional_equivalencies = eq.spectral()
if self.unit.is_equivalent(KMS) or unit.is_equivalent(KMS):
if doppler_convention is None:
raise ValueError(
"doppler_convention not set, cannot convert to/from velocities"
)
if doppler_rest is None:
raise ValueError(
"doppler_rest not set, cannot convert to/from velocities"
)
additional_equivalencies = (
additional_equivalencies
+ DOPPLER_CONVENTIONS[doppler_convention](doppler_rest)
)
result = super().to(
unit, equivalencies=equivalencies + additional_equivalencies
)
# Since we have to explicitly specify when we want to keep this as a
# SpectralQuantity, we need to convert it back from a Quantity to
# a SpectralQuantity here. Note that we don't use __array_finalize__
# here since we might need to set the output doppler convention and
# rest based on the parameters passed to 'to'
result = result.view(self.__class__)
result.__array_finalize__(self)
result._doppler_convention = doppler_convention
result._doppler_rest = doppler_rest
return result
def to_value(self, unit=None, *args, **kwargs):
if unit is None:
return self.view(np.ndarray)
return self.to(unit, *args, **kwargs).value
| SpectralQuantity |
python | langchain-ai__langchain | libs/langchain/langchain_classic/retrievers/document_compressors/cross_encoder.py | {
"start": 38,
"end": 362
} | class ____(ABC):
"""Interface for cross encoder models."""
@abstractmethod
def score(self, text_pairs: list[tuple[str, str]]) -> list[float]:
"""Score pairs' similarity.
Args:
text_pairs: List of pairs of texts.
Returns:
List of scores.
"""
| BaseCrossEncoder |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/string_distance/base.py | {
"start": 4816,
"end": 9682
} | class ____(StringEvaluator, _RapidFuzzChainMixin):
"""Compute string distances between the prediction and the reference.
Examples:
----------
>>> from langchain_classic.evaluation import StringDistanceEvalChain
>>> evaluator = StringDistanceEvalChain()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="Mindy is the CEO",
)
Using the `load_evaluator` function:
>>> from langchain_classic.evaluation import load_evaluator
>>> evaluator = load_evaluator("string_distance")
>>> evaluator.evaluate_strings(
prediction="The answer is three",
reference="three",
)
"""
@property
def requires_input(self) -> bool:
"""This evaluator does not require input."""
return False
@property
def requires_reference(self) -> bool:
"""This evaluator does not require a reference."""
return True
@property
def input_keys(self) -> list[str]:
"""Get the input keys.
Returns:
The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""Get the evaluation name.
Returns:
The evaluation name.
"""
return f"{self.distance.value}_distance"
@override
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Compute the string distance between the prediction and the reference.
Args:
inputs: The input values.
run_manager: The callback manager.
Returns:
The evaluation results containing the score.
"""
return {"score": self.compute_metric(inputs["reference"], inputs["prediction"])}
@override
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Compute the string distance between the prediction and the reference.
Args:
inputs: The input values.
run_manager: The callback manager.
Returns:
The evaluation results containing the score.
"""
return {"score": self.compute_metric(inputs["reference"], inputs["prediction"])}
@override
def _evaluate_strings(
self,
*,
prediction: str,
reference: str | None = None,
input: str | None = None,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate the string distance between the prediction and the reference.
Args:
prediction: The prediction string.
reference: The reference string.
input: The input string.
callbacks: The callbacks to use.
tags: The tags to apply.
metadata: The metadata to use.
include_run_info: Whether to include run info in the output.
**kwargs: Additional keyword arguments.
Returns:
The evaluation results containing the score.
"""
result = self(
inputs={"prediction": prediction, "reference": reference},
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
@override
async def _aevaluate_strings(
self,
*,
prediction: str,
reference: str | None = None,
input: str | None = None,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate the string distance between the prediction and the reference.
Args:
prediction: The prediction string.
reference: The reference string.
input: The input string.
callbacks: The callbacks to use.
tags: The tags to apply.
metadata: The metadata to apply.
include_run_info: Whether to include run info in the output.
**kwargs: Additional keyword arguments.
Returns:
The evaluation results containing the score.
"""
result = await self.acall(
inputs={"prediction": prediction, "reference": reference},
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
| StringDistanceEvalChain |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E743.py | {
"start": 40,
"end": 99
} | class ____:
def O(self):
pass
def x():
pass
| X |
python | getsentry__sentry | src/sentry/web/frontend/shared_group_details.py | {
"start": 305,
"end": 1273
} | class ____(GenericReactPageView):
def meta_tags(
self, request: HttpRequest, *, share_id: str = "", **kwargs: Any
) -> dict[str, str]:
org_slug = getattr(request, "subdomain", None)
if org_slug:
group = issue_service.get_shared_for_org(slug=org_slug, share_id=share_id)
else:
# Backwards compatibility for Self-hosted and single tenants
group = issue_service.get_shared_for_region(
region_name=settings.SENTRY_MONOLITH_REGION, share_id=share_id
)
if not group:
return {}
return {
"og:type": "website",
"og:title": group.title,
"og:description": group.message,
"og:site_name": "Sentry",
"twitter:card": "summary",
"twitter:site": "@getsentry",
"twitter:title": group.title,
"twitter:description": group.message,
}
| SharedGroupDetailsView |
python | facebookresearch__faiss | tests/test_fast_scan.py | {
"start": 5773,
"end": 8064
} | class ____: # (unittest.TestCase):
def do_loop5_kernel(self, nq, bb):
""" unit test for the accumulation kernel """
nb = bb * 32 # databse size
nsp = 24 # number of sub-quantizers
rs = np.random.RandomState(123)
codes = rs.randint(256, size=(nb, nsp // 2)).astype('uint8')
LUT = rs.randint(256, size=(nq, nsp, 16)).astype('uint8')
accu_ref = reference_accu(codes, LUT)
def to_A(x):
return faiss.array_to_AlignedTable(x.ravel())
sp = faiss.swig_ptr
LUT_a = faiss.AlignedTableUint8(LUT.size)
faiss.pq4_pack_LUT(
nq, nsp, sp(LUT),
LUT_a.get()
)
codes_a = faiss.AlignedTableUint8(codes.size)
faiss.pq4_pack_codes(
sp(codes),
nb, nsp, nb, nb, nsp,
codes_a.get()
)
accu_a = faiss.AlignedTableUint16(nq * nb)
accu_a.clear()
faiss.loop5_kernel_accumulate_1_block_to_mem(
nq, nb, nsp, codes_a.get(), LUT_a.get(), accu_a.get()
)
accu = faiss.AlignedTable_to_array(accu_a).reshape(nq, nb)
np.testing.assert_array_equal(accu_ref, accu)
def test_11(self):
self.do_loop5_kernel(1, 1)
def test_21(self):
self.do_loop5_kernel(2, 1)
def test_12(self):
self.do_loop5_kernel(1, 2)
def test_22(self):
self.do_loop5_kernel(2, 2)
##########################################################
# Tests for various IndexPQFastScan implementations
##########################################################
def verify_with_draws(testcase, Dref, Iref, Dnew, Inew):
""" verify a list of results where there are draws in the distances (because
they are integer). """
np.testing.assert_array_almost_equal(Dref, Dnew, decimal=5)
# here we have to be careful because of draws
for i in range(len(Iref)):
if np.all(Iref[i] == Inew[i]): # easy case
continue
# we can deduce nothing about the latest line
skip_dis = Dref[i, -1]
for dis in np.unique(Dref):
if dis == skip_dis:
continue
mask = Dref[i, :] == dis
testcase.assertEqual(set(Iref[i, mask]), set(Inew[i, mask]))
| ThisIsNotATestLoop5 |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 29400,
"end": 34642
} | class ____(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config ([`ElectraConfig`]):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
- `"last"` -- Take the last token hidden state (like XLNet)
- `"first"` -- Take the first token hidden state (like Bert)
- `"mean"` -- Take the mean of all tokens hidden states
- `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- `"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
(otherwise to `config.hidden_size`).
- **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
another string or `None` will add no activation.
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
"""
def __init__(self, config: ElectraConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = nn.Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()
self.first_dropout = nn.Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = nn.Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Returns:
`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
@auto_docstring(
custom_intro="""
ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| ElectraSequenceSummary |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/configs/bigquery.py | {
"start": 513,
"end": 5575
} | class ____(BaseTargetConfigs):
"""
Target configs contain credentials and
settings, specific to BigQuery.
To find valid keys, head to the [BigQuery Profile](
https://docs.getdbt.com/reference/warehouse-profiles/bigquery-profile)
page.
Attributes:
credentials: The credentials to use to authenticate; if there are
duplicate keys between credentials and TargetConfigs,
e.g. schema, an error will be raised.
Examples:
Load stored BigQueryTargetConfigs.
```python
from prefect_dbt.cli.configs import BigQueryTargetConfigs
bigquery_target_configs = BigQueryTargetConfigs.load("BLOCK_NAME")
```
Instantiate BigQueryTargetConfigs.
```python
from prefect_dbt.cli.configs import BigQueryTargetConfigs
from prefect_gcp.credentials import GcpCredentials
credentials = GcpCredentials.load("BLOCK-NAME-PLACEHOLDER")
target_configs = BigQueryTargetConfigs(
schema="schema", # also known as dataset
credentials=credentials,
)
```
"""
_block_type_name = "dbt CLI BigQuery Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_description = "dbt CLI target configs containing credentials and settings, specific to BigQuery." # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
type: Literal["bigquery"] = Field(
default="bigquery", description="The type of target."
)
project: Optional[str] = Field(default=None, description="The project to use.")
credentials: GcpCredentials = Field(
default_factory=GcpCredentials,
description="The credentials to use to authenticate.",
)
def get_configs(self) -> Dict[str, Any]:
"""
Returns the dbt configs specific to BigQuery profile.
Returns:
A configs JSON.
"""
# since GcpCredentials will always define a project
self_copy = self.copy()
if self_copy.project is not None:
self_copy.credentials.project = None
all_configs_json = self._populate_configs_json(
{}, self_copy.model_fields, model=self_copy
)
# decouple prefect-gcp from prefect-dbt
# by mapping all the keys dbt gcp accepts
# https://docs.getdbt.com/reference/warehouse-setups/bigquery-setup
rename_keys = {
# dbt
"type": "type",
"schema": "schema",
"threads": "threads",
# general
"dataset": "schema",
"method": "method",
"project": "project",
# service-account
"service_account_file": "keyfile",
# service-account json
"service_account_info": "keyfile_json",
# oauth secrets
"refresh_token": "refresh_token",
"client_id": "client_id",
"client_secret": "client_secret",
"token_uri": "token_uri",
# optional
"priority": "priority",
"timeout_seconds": "timeout_seconds",
"location": "location",
"maximum_bytes_billed": "maximum_bytes_billed",
"scopes": "scopes",
"impersonate_service_account": "impersonate_service_account",
"execution_project": "execution_project",
}
configs_json = {}
extras = self.extras or {}
for key in all_configs_json.keys():
if key not in rename_keys and key not in extras:
# skip invalid keys
continue
# rename key to something dbt profile expects
dbt_key = rename_keys.get(key) or key
configs_json[dbt_key] = all_configs_json[key]
if "keyfile_json" in configs_json:
configs_json["method"] = "service-account-json"
elif "keyfile" in configs_json:
configs_json["method"] = "service-account"
configs_json["keyfile"] = str(configs_json["keyfile"])
else:
configs_json["method"] = "oauth-secrets"
# through gcloud application-default login
google_credentials = (
self_copy.credentials.get_credentials_from_service_account()
)
if hasattr(google_credentials, "token"):
request = Request()
google_credentials.refresh(request)
configs_json["token"] = google_credentials.token
else:
for key in ("refresh_token", "client_id", "client_secret", "token_uri"):
configs_json[key] = getattr(google_credentials, key)
if "project" not in configs_json:
raise ValueError(
"The keyword, project, must be provided in either "
"GcpCredentials or BigQueryTargetConfigs"
)
return configs_json
| BigQueryTargetConfigs |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 28069,
"end": 28717
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
self.layer = torch.nn.Sequential(
collections.OrderedDict(
[
("linear1", torch.nn.Linear(10, 20)),
("relu1", self.relu),
("linear2", torch.nn.Linear(20, 20)),
("relu2", self.relu),
("linear3", torch.nn.Linear(20, 10)),
("relu3", self.relu),
]
)
)
def forward(self, x):
return self.layer(x)
| SequentialWithDuplicatedModule2 |
python | rapidsai__cudf | python/cudf/cudf/core/buffer/spillable_buffer.py | {
"start": 2399,
"end": 12957
} | class ____(BufferOwner):
"""A Buffer that supports spilling memory off the GPU to avoid OOMs.
This buffer supports spilling the represented data to host memory.
Spilling can be done manually by calling `.spill(target="cpu")` but
usually the associated spilling manager triggers spilling based on current
device memory usage see `cudf.core.buffer.spill_manager.SpillManager`.
Unspill is triggered automatically when accessing the data of the buffer.
The buffer might not be spillable, which is based on the "expose" status of
the buffer. We say that the buffer has been exposed if the device pointer
(integer or void*) has been accessed outside of SpillableBufferOwner.
In this case, we cannot invalidate the device pointer by moving the data
to host.
A buffer can be exposed permanently at creation or by accessing the `.ptr`
property. To avoid this, one can use `.get_ptr()` instead, which support
exposing the buffer temporarily.
Use the factory function `as_buffer` to create a SpillableBufferOwner
instance.
"""
lock: RLock
_spill_locks: weakref.WeakSet
_last_accessed: float
_ptr_desc: dict[str, Any]
_manager: SpillManager
def _finalize_init(self, ptr_desc: dict[str, Any]) -> None:
"""Finish initialization of the spillable buffer
This implements the common initialization that `from_device_memory`
and `from_host_memory` are missing.
Parameters
----------
ptr_desc : dict
Description of the memory.
"""
from cudf.core.buffer.spill_manager import get_global_manager
self.lock = RLock()
self._spill_locks = weakref.WeakSet()
self._last_accessed = time.monotonic()
self._ptr_desc = ptr_desc
manager = get_global_manager()
if manager is None:
raise ValueError(
f"cannot create {self.__class__} without "
"a global spill manager"
)
self._manager = manager
self._manager.add(self)
@classmethod
def from_device_memory(cls, data: Any, exposed: bool) -> Self:
"""Create a spillabe buffer from device memory.
No data is being copied.
Parameters
----------
data : device-buffer-like
An object implementing the CUDA Array Interface.
exposed : bool
Mark the buffer as permanently exposed (unspillable).
Returns
-------
SpillableBufferOwner
Buffer representing the same device memory as `data`
"""
ret = super().from_device_memory(data, exposed=exposed)
ret._finalize_init(ptr_desc={"type": "gpu"})
return ret
@classmethod
def from_host_memory(cls, data: Any) -> Self:
"""Create a spillabe buffer from host memory.
Data must implement `__array_interface__`, the buffer protocol, and/or
be convertible to a buffer object using `numpy.asanyarray()`
The new buffer is marked as spilled to host memory already.
Raises ValueError if array isn't C-contiguous.
Parameters
----------
data : Any
An object that represents host memory.
Returns
-------
SpillableBufferOwner
Buffer representing a copy of `data`.
"""
# Convert to a memoryview using numpy array, this will not copy data
# in most cases.
data = memoryview(numpy.asanyarray(data))
if not data.c_contiguous:
raise ValueError("Buffer data must be C-contiguous")
data = data.cast("B") # Make sure itemsize==1
# Create an already spilled buffer
ret = cls(ptr=0, size=data.nbytes, owner=None, exposed=False)
ret._finalize_init(ptr_desc={"type": "cpu", "memoryview": data})
return ret
@property
def is_spilled(self) -> bool:
return self._ptr_desc["type"] != "gpu"
def spill(self, target: str = "cpu") -> None:
"""Spill or un-spill this buffer in-place
Parameters
----------
target : str
The target of the spilling.
"""
time_start = time.perf_counter()
with self.lock:
ptr_type = self._ptr_desc["type"]
if ptr_type == target:
return
if not self.spillable:
raise ValueError(
f"Cannot in-place move an unspillable buffer: {self}"
)
if (ptr_type, target) == ("gpu", "cpu"):
with nvtx.annotate(
message="SpillDtoH",
color=_get_color_for_nvtx("SpillDtoH"),
domain="cudf_python-spill",
):
host_mem = host_memory_allocation(self.size)
rmm.pylibrmm.device_buffer.copy_ptr_to_host(
self._ptr, host_mem
)
self._ptr_desc["memoryview"] = host_mem
self._ptr = 0
self._owner = None
elif (ptr_type, target) == ("cpu", "gpu"):
# Notice, this operation is prone to deadlock because the RMM
# allocation might trigger spilling-on-demand which in turn
# trigger a new call to this buffer's `spill()`.
# Therefore, it is important that spilling-on-demand doesn't
# try to unspill an already locked buffer!
with nvtx.annotate(
message="SpillHtoD",
color=_get_color_for_nvtx("SpillHtoD"),
domain="cudf_python-spill",
):
dev_mem = rmm.DeviceBuffer.to_device(
self._ptr_desc.pop("memoryview")
)
self._ptr = dev_mem.ptr
self._owner = dev_mem
assert self._size == dev_mem.size
else:
# TODO: support moving to disk
raise ValueError(f"Unknown target: {target}")
self._ptr_desc["type"] = target
time_end = time.perf_counter()
self._manager.statistics.log_spill(
src=ptr_type,
dst=target,
nbytes=self.size,
time=time_end - time_start,
)
def mark_exposed(self) -> None:
"""Mark the buffer as "exposed" and make it unspillable permanently.
This also unspills the buffer (unspillable buffers cannot be spilled!).
"""
self._manager.spill_to_device_limit()
with self.lock:
if not self.exposed:
self._manager.statistics.log_expose(self)
self.spill(target="gpu")
super().mark_exposed()
self._last_accessed = time.monotonic()
def spill_lock(self, spill_lock: SpillLock) -> None:
"""Spill lock the buffer
Mark the buffer as unspillable while `spill_lock` is alive,
which is tracked by monitoring a weakref to `spill_lock`.
Parameters
----------
spill_lock : SpillLock
The object that defines the scope of the lock.
"""
with self.lock:
self.spill(target="gpu")
self._spill_locks.add(spill_lock)
def get_ptr(self, *, mode: Literal["read", "write"]) -> int:
"""Get a device pointer to the memory of the buffer.
If this is called within an `acquire_spill_lock` context,
a reference to this buffer is added to spill_lock, which
disable spilling of this buffer while in the context.
If this is *not* called within a `acquire_spill_lock` context,
this buffer is marked as unspillable permanently.
Returns
-------
int
The device pointer as an integer
"""
from cudf.core.buffer.utils import get_spill_lock
spill_lock = get_spill_lock()
if spill_lock is None:
self.mark_exposed()
else:
self.spill_lock(spill_lock)
self._last_accessed = time.monotonic()
return self._ptr
def memory_info(self) -> tuple[int, int, str]:
"""Get pointer, size, and device type of this buffer.
Warning, it is not safe to access the pointer value without
spill lock the buffer manually. This method neither exposes
nor spill locks the buffer.
Return
------
int
The memory pointer as an integer (device or host memory)
int
The size of the memory in bytes
str
The device type as a string ("cpu" or "gpu")
"""
if self._ptr_desc["type"] == "gpu":
ptr = self._ptr
elif self._ptr_desc["type"] == "cpu":
ptr = numpy.array(
self._ptr_desc["memoryview"], copy=False
).__array_interface__["data"][0]
return (ptr, self.nbytes, self._ptr_desc["type"])
@property
def spillable(self) -> bool:
return not self.exposed and len(self._spill_locks) == 0
@property
def last_accessed(self) -> float:
return self._last_accessed
@property
def __cuda_array_interface__(self) -> dict:
return {
"data": DelayedPointerTuple(self),
"shape": (self.size,),
"strides": None,
"typestr": "|u1",
"version": 0,
}
def memoryview(
self, *, offset: int = 0, size: int | None = None
) -> memoryview:
size = self._size if size is None else size
with self.lock:
if self.spillable:
self.spill(target="cpu")
return self._ptr_desc["memoryview"][offset : offset + size]
else:
assert self._ptr_desc["type"] == "gpu"
ret = host_memory_allocation(size)
rmm.pylibrmm.device_buffer.copy_ptr_to_host(
self._ptr + offset, ret
)
return ret
def __str__(self) -> str:
if self._ptr_desc["type"] != "gpu":
ptr_info = str(self._ptr_desc)
else:
ptr_info = str(hex(self._ptr))
return (
f"<{self.__class__.__name__} size={format_bytes(self._size)} "
f"spillable={self.spillable} exposed={self.exposed} "
f"num-spill-locks={len(self._spill_locks)} "
f"ptr={ptr_info} owner={self._owner!r}>"
)
| SpillableBufferOwner |
python | pennersr__django-allauth | allauth/socialaccount/providers/clever/views.py | {
"start": 280,
"end": 1703
} | class ____(OAuth2Adapter):
provider_id = "clever"
access_token_url = "https://clever.com/oauth/tokens" # nosec
authorize_url = "https://clever.com/oauth/authorize"
identity_url = "https://api.clever.com/v3.0/me"
user_details_url = "https://api.clever.com/v3.0/users"
def complete_login(self, request, app, token, **kwargs):
extra_data = self.get_data(token.token)
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_data(self, token):
# Verify the user first
resp = (
get_adapter()
.get_requests_session()
.get(
self.identity_url, headers={"Authorization": "Bearer {}".format(token)}
)
)
if resp.status_code != HTTPStatus.OK:
raise OAuth2Error()
resp = resp.json()
user_id = resp["data"]["id"]
user_details = (
get_adapter()
.get_requests_session()
.get(
"{}/{}".format(self.user_details_url, user_id),
headers={"Authorization": "Bearer {}".format(token)},
)
)
user_details.raise_for_status()
user_details = user_details.json()
return user_details
oauth2_login = OAuth2LoginView.adapter_view(CleverOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(CleverOAuth2Adapter)
| CleverOAuth2Adapter |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_step_function.py | {
"start": 1568,
"end": 4266
} | class ____:
TASK_ID = "step_function_get_execution_output"
@pytest.fixture(autouse=True)
def _setup_test_cases(self):
with mock.patch(
"airflow.providers.amazon.aws.links.step_function.StateMachineExecutionsDetailsLink.persist"
) as executions_details_link:
self.mocked_executions_details_link = executions_details_link
yield
def test_init(self):
op = StepFunctionGetExecutionOutputOperator(
task_id=self.TASK_ID,
execution_arn=EXECUTION_ARN,
aws_conn_id=AWS_CONN_ID,
region_name=REGION_NAME,
verify="/spam/egg.pem",
botocore_config={"read_timeout": 42},
)
assert op.execution_arn == EXECUTION_ARN
assert op.hook.aws_conn_id == AWS_CONN_ID
assert op.hook._region_name == REGION_NAME
assert op.hook._verify == "/spam/egg.pem"
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = StepFunctionGetExecutionOutputOperator(task_id=self.TASK_ID, execution_arn=EXECUTION_ARN)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch.object(StepFunctionGetExecutionOutputOperator, "hook")
@pytest.mark.parametrize(
("response", "expected_output"),
[
pytest.param({"output": '{"foo": "bar"}'}, {"foo": "bar"}, id="output"),
pytest.param({"error": '{"spam": "egg"}'}, {"spam": "egg"}, id="error"),
pytest.param({"other": '{"baz": "qux"}'}, None, id="other"),
],
)
def test_execute(self, mocked_hook, mocked_context, response, expected_output):
mocked_hook.describe_execution.return_value = response
op = StepFunctionGetExecutionOutputOperator(
task_id=self.TASK_ID,
execution_arn=EXECUTION_ARN,
aws_conn_id=None,
)
assert op.execute(mocked_context) == expected_output
mocked_hook.describe_execution.assert_called_once_with(EXECUTION_ARN)
self.mocked_executions_details_link.assert_called_once_with(
aws_partition=mock.ANY,
context=mock.ANY,
operator=mock.ANY,
region_name=mock.ANY,
execution_arn=EXECUTION_ARN,
)
def test_template_fields(self):
operator = StepFunctionGetExecutionOutputOperator(
task_id=self.TASK_ID,
execution_arn=EXECUTION_ARN,
aws_conn_id=None,
)
validate_template_fields(operator)
| TestStepFunctionGetExecutionOutputOperator |
python | django__django | tests/contenttypes_tests/operations_migrations/0001_initial.py | {
"start": 43,
"end": 258
} | class ____(migrations.Migration):
operations = [
migrations.CreateModel(
"Foo",
[
("id", models.AutoField(primary_key=True)),
],
),
]
| Migration |
python | celery__celery | celery/exceptions.py | {
"start": 6372,
"end": 6466
} | class ____(CeleryError):
"""Celery is somehow improperly configured."""
| ImproperlyConfigured |
python | getsentry__sentry | src/sentry/utils/http.py | {
"start": 6256,
"end": 6546
} | class ____(HttpRequest):
"""typing-only: to help with hinting for `.subdomain`"""
subdomain: str
def is_using_customer_domain(request: HttpRequest) -> TypeGuard[_HttpRequestWithSubdomain]:
return bool(hasattr(request, "subdomain") and request.subdomain)
| _HttpRequestWithSubdomain |
python | pytorch__pytorch | torch/_guards.py | {
"start": 17456,
"end": 18258
} | class ____:
nn_modules: dict[str, torch.nn.Module] = {}
def __init__(self, nn_modules: dict[str, torch.nn.Module]) -> None:
self.nn_modules = nn_modules
def diff(self, other: ModuleContextCheckpointState) -> Optional[set[str]]:
"""
Produces a delta against another ModuleContextCheckpointState.
Returns None if no delta is found, otherwise, return a set() of mismatched
module key names.
"""
r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))
if len(r) == 0:
return None
return r
def __eq__(self, other: object) -> bool:
if not isinstance(other, ModuleContextCheckpointState):
return False
return self.diff(other) is None
| ModuleContextCheckpointState |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 325399,
"end": 327414
} | class ____:
@pytest.mark.parametrize('scale, expected',
[(1.0, 2.3283064359965952e-170),
(3.5, 5.987114417447875e-153)])
def test_delta_cdf(self, scale, expected):
# Expected value computed with mpmath:
#
# def burr12sf(x, c, d, scale):
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# d = mpmath.mpf(d)
# scale = mpmath.mpf(scale)
# return (mpmath.mp.one + (x/scale)**c)**(-d)
#
# >>> import mpmath
# >>> mpmath.mp.dps = 60
# >>> float(burr12sf(2e5, 4, 8, 1) - burr12sf(4e5, 4, 8, 1))
# 2.3283064359965952e-170
# >>> float(burr12sf(2e5, 4, 8, 3.5) - burr12sf(4e5, 4, 8, 3.5))
# 5.987114417447875e-153
#
delta = stats.burr12._delta_cdf(2e5, 4e5, 4, 8, scale=scale)
assert_allclose(delta, expected, rtol=1e-13)
def test_moments_edge(self):
# gh-18838 reported that burr12 moments could be invalid; see above.
# Check that this is resolved in an edge case where c*d == n, and
# compare the results against those produced by Mathematica, e.g.
# `SinghMaddalaDistribution[2, 2, 1]` at Wolfram Alpha.
c, d = 2, 2
mean = np.pi/4
var = 1 - np.pi**2/16
skew = np.pi**3/(32*var**1.5)
kurtosis = np.nan
ref = [mean, var, skew, kurtosis]
res = stats.burr12(c, d).stats('mvsk')
assert_allclose(res, ref, rtol=1e-14)
# Reference values were computed with mpmath using mp.dps = 80
# and then cast to float.
@pytest.mark.parametrize(
'p, c, d, ref',
[(1e-12, 20, 0.5, 15.848931924611135),
(1e-19, 20, 0.5, 79.43282347242815),
(1e-12, 0.25, 35, 2.0888618213462466),
(1e-80, 0.25, 35, 1360930951.7972188)]
)
def test_isf_near_zero(self, p, c, d, ref):
x = stats.burr12.isf(p, c, d)
assert_allclose(x, ref, rtol=1e-14)
| TestBurr12 |
python | mwaskom__seaborn | tests/_stats/test_density.py | {
"start": 278,
"end": 6861
} | class ____:
@pytest.fixture
def df(self, rng):
n = 100
return pd.DataFrame(dict(
x=rng.uniform(0, 7, n).round(),
y=rng.normal(size=n),
color=rng.choice(["a", "b", "c"], n),
alpha=rng.choice(["x", "y"], n),
))
def get_groupby(self, df, orient):
cols = [c for c in df if c != orient]
return GroupBy([*cols, "group"])
def integrate(self, y, x):
y = np.asarray(y)
x = np.asarray(x)
dx = np.diff(x)
return (dx * y[:-1] + dx * y[1:]).sum() / 2
@pytest.mark.parametrize("ori", ["x", "y"])
def test_columns(self, df, ori):
df = df[[ori, "alpha"]]
gb = self.get_groupby(df, ori)
res = KDE()(df, gb, ori, {})
other = {"x": "y", "y": "x"}[ori]
expected = [ori, "alpha", "density", other]
assert list(res.columns) == expected
@pytest.mark.parametrize("gridsize", [20, 30, None])
def test_gridsize(self, df, gridsize):
ori = "y"
df = df[[ori]]
gb = self.get_groupby(df, ori)
res = KDE(gridsize=gridsize)(df, gb, ori, {})
if gridsize is None:
assert_array_equal(res[ori], df[ori])
else:
assert len(res) == gridsize
@pytest.mark.parametrize("cut", [1, 2])
def test_cut(self, df, cut):
ori = "y"
df = df[[ori]]
gb = self.get_groupby(df, ori)
res = KDE(cut=cut, bw_method=1)(df, gb, ori, {})
vals = df[ori]
bw = vals.std()
assert res[ori].min() == pytest.approx(vals.min() - bw * cut, abs=1e-2)
assert res[ori].max() == pytest.approx(vals.max() + bw * cut, abs=1e-2)
@pytest.mark.parametrize("common_grid", [True, False])
def test_common_grid(self, df, common_grid):
ori = "y"
df = df[[ori, "alpha"]]
gb = self.get_groupby(df, ori)
res = KDE(common_grid=common_grid)(df, gb, ori, {})
vals = df["alpha"].unique()
a = res.loc[res["alpha"] == vals[0], ori].to_numpy()
b = res.loc[res["alpha"] == vals[1], ori].to_numpy()
if common_grid:
assert_array_equal(a, b)
else:
assert np.not_equal(a, b).all()
@pytest.mark.parametrize("common_norm", [True, False])
def test_common_norm(self, df, common_norm):
ori = "y"
df = df[[ori, "alpha"]]
gb = self.get_groupby(df, ori)
res = KDE(common_norm=common_norm)(df, gb, ori, {})
areas = (
res.groupby("alpha")
.apply(
lambda x: self.integrate(x["density"], x[ori]),
**groupby_apply_include_groups(False),
)
)
if common_norm:
assert areas.sum() == pytest.approx(1, abs=1e-3)
else:
assert_array_almost_equal(areas, [1, 1], decimal=3)
def test_common_norm_variables(self, df):
ori = "y"
df = df[[ori, "alpha", "color"]]
gb = self.get_groupby(df, ori)
res = KDE(common_norm=["alpha"])(df, gb, ori, {})
def integrate_by_color_and_sum(x):
return (
x.groupby("color")
.apply(
lambda y: self.integrate(y["density"], y[ori]),
**groupby_apply_include_groups(False)
)
.sum()
)
areas = (
res
.groupby("alpha")
.apply(integrate_by_color_and_sum, **groupby_apply_include_groups(False))
)
assert_array_almost_equal(areas, [1, 1], decimal=3)
@pytest.mark.parametrize("param", ["norm", "grid"])
def test_common_input_checks(self, df, param):
ori = "y"
df = df[[ori, "alpha"]]
gb = self.get_groupby(df, ori)
msg = rf"Undefined variable\(s\) passed for KDE.common_{param}"
with pytest.warns(UserWarning, match=msg):
KDE(**{f"common_{param}": ["color", "alpha"]})(df, gb, ori, {})
msg = f"KDE.common_{param} must be a boolean or list of strings"
with pytest.raises(TypeError, match=msg):
KDE(**{f"common_{param}": "alpha"})(df, gb, ori, {})
def test_bw_adjust(self, df):
ori = "y"
df = df[[ori]]
gb = self.get_groupby(df, ori)
res1 = KDE(bw_adjust=0.5)(df, gb, ori, {})
res2 = KDE(bw_adjust=2.0)(df, gb, ori, {})
mad1 = res1["density"].diff().abs().mean()
mad2 = res2["density"].diff().abs().mean()
assert mad1 > mad2
def test_bw_method_scalar(self, df):
ori = "y"
df = df[[ori]]
gb = self.get_groupby(df, ori)
res1 = KDE(bw_method=0.5)(df, gb, ori, {})
res2 = KDE(bw_method=2.0)(df, gb, ori, {})
mad1 = res1["density"].diff().abs().mean()
mad2 = res2["density"].diff().abs().mean()
assert mad1 > mad2
@pytest.mark.skipif(_no_scipy, reason="KDE.cumulative requires scipy")
@pytest.mark.parametrize("common_norm", [True, False])
def test_cumulative(self, df, common_norm):
ori = "y"
df = df[[ori, "alpha"]]
gb = self.get_groupby(df, ori)
res = KDE(cumulative=True, common_norm=common_norm)(df, gb, ori, {})
for _, group_res in res.groupby("alpha"):
assert (group_res["density"].diff().dropna() >= 0).all()
if not common_norm:
assert group_res["density"].max() == pytest.approx(1, abs=1e-3)
def test_cumulative_requires_scipy(self):
if _no_scipy:
err = "Cumulative KDE evaluation requires scipy"
with pytest.raises(RuntimeError, match=err):
KDE(cumulative=True)
@pytest.mark.parametrize("vals", [[], [1], [1] * 5, [1929245168.06679] * 18])
def test_singular(self, df, vals):
df1 = pd.DataFrame({"y": vals, "alpha": ["z"] * len(vals)})
gb = self.get_groupby(df1, "y")
res = KDE()(df1, gb, "y", {})
assert res.empty
df2 = pd.concat([df[["y", "alpha"]], df1], ignore_index=True)
gb = self.get_groupby(df2, "y")
res = KDE()(df2, gb, "y", {})
assert set(res["alpha"]) == set(df["alpha"])
@pytest.mark.parametrize("col", ["y", "weight"])
def test_missing(self, df, col):
val, ori = "xy"
df["weight"] = 1
df = df[[ori, "weight"]].astype(float)
df.loc[:4, col] = np.nan
gb = self.get_groupby(df, ori)
res = KDE()(df, gb, ori, {})
assert self.integrate(res[val], res[ori]) == pytest.approx(1, abs=1e-3)
| TestKDE |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-square-free-subsets.py | {
"start": 111,
"end": 1723
} | class ____(object):
def squareFreeSubsets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def linear_sieve_of_eratosthenes(n): # Time: O(n), Space: O(n)
primes = []
spf = [-1]*(n+1) # the smallest prime factor
for i in xrange(2, n+1):
if spf[i] == -1:
spf[i] = i
primes.append(i)
for p in primes:
if i*p > n or p > spf[i]:
break
spf[i*p] = p
return primes
MAX_NUM = max(nums)
PRIMES = linear_sieve_of_eratosthenes(MAX_NUM)
MASKS = [0]*(MAX_NUM+1)
for x in xrange(MAX_NUM+1):
y = x
for i, p in enumerate(PRIMES):
if y%p:
continue
if y%p**2 == 0:
MASKS[x] = 0
break
MASKS[x] |= (1<<i)
y //= p
MOD = 10**9+7
cnt = collections.Counter(nums)
arr = [x for x in cnt.iterkeys() if x != 1]
dp = [1]*(1<<len(PRIMES))
for x in arr:
if not MASKS[x]:
continue
for mask in reversed(xrange(len(dp))):
if MASKS[x]&mask == 0:
dp[mask|MASKS[x]] = (dp[mask|MASKS[x]]+cnt[x]*dp[mask])%MOD
return (dp[-1]*pow(2, cnt[1], MOD)-1)%MOD if 1 in cnt else (dp[-1]-1)%MOD
# Time: O(n + m * 2^p)
# Space: O(m * 2^p)
import collections
# number theory, combinatorics, bitmasks, memoization
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_web_search_tool_result_error.py | {
"start": 290,
"end": 437
} | class ____(BaseModel):
error_code: BetaWebSearchToolResultErrorCode
type: Literal["web_search_tool_result_error"]
| BetaWebSearchToolResultError |
python | huggingface__transformers | tests/models/granite_speech/test_modeling_granite_speech.py | {
"start": 7187,
"end": 11121
} | class ____(
ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
"""
Model tester for `GraniteSpeechForConditionalGeneration`.
"""
all_model_classes = (GraniteSpeechForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = {"any-to-any": GraniteSpeechForConditionalGeneration} if is_torch_available() else {}
_is_composite = True
def setUp(self):
self.model_tester = GraniteSpeechForConditionalGenerationModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=GraniteSpeechConfig,
has_text_modality=False,
)
def test_inputs_embeds(self):
# overwrite inputs_embeds tests because we need to delete "input features" for the audio model
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["input_features"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
def test_sdpa_can_dispatch_composite_models(self):
# overwrite because Granite Speech is audio+text model (not vision+text)
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self._is_composite:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
for model_class in self.all_model_classes:
# NOTE - currently we only enable alternate attention implementations on
# the encapsulated LLM; in the future, this should be added for the conformer
# encoder as well.
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_sdpa = model_class.from_pretrained(tmpdirname)
model_sdpa = model_sdpa.eval().to(torch_device)
text_attn = "sdpa" if model.language_model._supports_sdpa else "eager"
# `None` as it is the requested one which will be assigned to each sub-config
# Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present)
self.assertTrue(model_sdpa.config._attn_implementation == "sdpa")
self.assertTrue(model.language_model.config._attn_implementation == text_attn)
model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager")
model_eager = model_eager.eval().to(torch_device)
self.assertTrue(model_eager.config._attn_implementation == "eager")
self.assertTrue(model_eager.language_model.config._attn_implementation == "eager")
for name, submodule in model_eager.named_modules():
class_name = submodule.__class__.__name__
if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name:
raise ValueError("The eager model should not have SDPA attention layers")
@pytest.mark.generate
@slow
@unittest.skip(reason="Granite Speech doesn't support SDPA for all backbones")
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip(reason="GraniteSpeech has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
| GraniteSpeechForConditionalGenerationModelTest |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_extra/testing.py | {
"start": 1000,
"end": 11975
} | class ____(enum.Enum):
"""Unique type for deprecated parameters."""
DEPRECATED = 1
DEPRECATED = Deprecated.DEPRECATED
def lazy_xp_function(
func: Callable[..., Any],
*,
allow_dask_compute: bool | int = False,
jax_jit: bool = True,
static_argnums: Deprecated = DEPRECATED,
static_argnames: Deprecated = DEPRECATED,
) -> None: # numpydoc ignore=GL07
"""
Tag a function to be tested on lazy backends.
Tag a function so that when any tests are executed with ``xp=jax.numpy`` the
function is replaced with a jitted version of itself, and when it is executed with
``xp=dask.array`` the function will raise if it attempts to materialize the graph.
This will be later expanded to provide test coverage for other lazy backends.
In order for the tag to be effective, the test or a fixture must call
:func:`patch_lazy_xp_functions`.
Parameters
----------
func : callable
Function to be tested.
allow_dask_compute : bool | int, optional
Whether `func` is allowed to internally materialize the Dask graph, or maximum
number of times it is allowed to do so. This is typically triggered by
``bool()``, ``float()``, or ``np.asarray()``.
Set to 1 if you are aware that `func` converts the input parameters to NumPy and
want to let it do so at least for the time being, knowing that it is going to be
extremely detrimental for performance.
If a test needs values higher than 1 to pass, it is a canary that the conversion
to NumPy/bool/float is happening multiple times, which translates to multiple
computations of the whole graph. Short of making the function fully lazy, you
should at least add explicit calls to ``np.asarray()`` early in the function.
*Note:* the counter of `allow_dask_compute` resets after each call to `func`, so
a test function that invokes `func` multiple times should still work with this
parameter set to 1.
Set to True to allow `func` to materialize the graph an unlimited number
of times.
Default: False, meaning that `func` must be fully lazy and never materialize the
graph.
jax_jit : bool, optional
Set to True to replace `func` with a smart variant of ``jax.jit(func)`` after
calling the :func:`patch_lazy_xp_functions` test helper with ``xp=jax.numpy``.
This is the default behaviour.
Set to False if `func` is only compatible with eager (non-jitted) JAX.
Unlike with vanilla ``jax.jit``, all arguments and return types that are not JAX
arrays are treated as static; the function can accept and return arbitrary
wrappers around JAX arrays. This difference is because, in real life, most users
won't wrap the function directly with ``jax.jit`` but rather they will use it
within their own code, which is itself then wrapped by ``jax.jit``, and
internally consume the function's outputs.
In other words, the pattern that is being tested is::
>>> @jax.jit
... def user_func(x):
... y = user_prepares_inputs(x)
... z = func(y, some_static_arg=True)
... return user_consumes(z)
Default: True.
static_argnums :
Deprecated; ignored
static_argnames :
Deprecated; ignored
See Also
--------
patch_lazy_xp_functions : Companion function to call from the test or fixture.
jax.jit : JAX function to compile a function for performance.
Examples
--------
In ``test_mymodule.py``::
from array_api_extra.testing import lazy_xp_function from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
# When xp=jax.numpy, this is similar to `b = jax.jit(myfunc)(a)`
# When xp=dask.array, crash on compute() or persist()
b = myfunc(a)
Notes
-----
In order for this tag to be effective, the test function must be imported into the
test module globals without its namespace; alternatively its namespace must be
declared in a ``lazy_xp_modules`` list in the test module globals.
Example 1::
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
x = myfunc(xp.asarray([1, 2]))
Example 2::
import mymodule
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
x = mymodule.myfunc(xp.asarray([1, 2]))
A test function can circumvent this monkey-patching system by using a namespace
outside of the two above patterns. You need to sanitize your code to make sure this
only happens intentionally.
Example 1::
import mymodule
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = mymodule.myfunc(a) # This is not
Example 2::
import mymodule
class naked:
myfunc = mymodule.myfunc
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = mymodule.myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = naked.myfunc(a) # This is not
"""
if static_argnums is not DEPRECATED or static_argnames is not DEPRECATED:
warnings.warn(
(
"The `static_argnums` and `static_argnames` parameters are deprecated "
"and ignored. They will be removed in a future version."
),
DeprecationWarning,
stacklevel=2,
)
tags = {
"allow_dask_compute": allow_dask_compute,
"jax_jit": jax_jit,
}
try:
func._lazy_xp_function = tags # type: ignore[attr-defined] # pylint: disable=protected-access # pyright: ignore[reportFunctionMemberAccess]
except AttributeError: # @cython.vectorize
_ufuncs_tags[func] = tags
def patch_lazy_xp_functions(
request: pytest.FixtureRequest,
monkeypatch: pytest.MonkeyPatch | None = None,
*,
xp: ModuleType,
) -> contextlib.AbstractContextManager[None]:
"""
Test lazy execution of functions tagged with :func:`lazy_xp_function`.
If ``xp==jax.numpy``, search for all functions which have been tagged with
:func:`lazy_xp_function` in the globals of the module that defines the current test,
as well as in the ``lazy_xp_modules`` list in the globals of the same module,
and wrap them with :func:`jax.jit`. Unwrap them at the end of the test.
If ``xp==dask.array``, wrap the functions with a decorator that disables
``compute()`` and ``persist()`` and ensures that exceptions and warnings are raised
eagerly.
This function should be typically called by your library's `xp` fixture that runs
tests on multiple backends::
@pytest.fixture(params=[
numpy,
array_api_strict,
pytest.param(jax.numpy, marks=pytest.mark.thread_unsafe),
pytest.param(dask.array, marks=pytest.mark.thread_unsafe),
])
def xp(request):
with patch_lazy_xp_functions(request, xp=request.param):
yield request.param
but it can be otherwise be called by the test itself too.
Parameters
----------
request : pytest.FixtureRequest
Pytest fixture, as acquired by the test itself or by one of its fixtures.
monkeypatch : pytest.MonkeyPatch
Deprecated
xp : array_namespace
Array namespace to be tested.
See Also
--------
lazy_xp_function : Tag a function to be tested on lazy backends.
pytest.FixtureRequest : `request` test function parameter.
Notes
-----
This context manager monkey-patches modules and as such is thread unsafe
on Dask and JAX. If you run your test suite with
`pytest-run-parallel <https://github.com/Quansight-Labs/pytest-run-parallel/>`_,
you should mark these backends with ``@pytest.mark.thread_unsafe``, as shown in
the example above.
"""
mod = cast(ModuleType, request.module)
mods = [mod, *cast(list[ModuleType], getattr(mod, "lazy_xp_modules", []))]
to_revert: list[tuple[ModuleType, str, object]] = []
def temp_setattr(mod: ModuleType, name: str, func: object) -> None:
"""
Variant of monkeypatch.setattr, which allows monkey-patching only selected
parameters of a test so that pytest-run-parallel can run on the remainder.
"""
assert hasattr(mod, name)
to_revert.append((mod, name, getattr(mod, name)))
setattr(mod, name, func)
if monkeypatch is not None:
warnings.warn(
(
"The `monkeypatch` parameter is deprecated and will be removed in a "
"future version. "
"Use `patch_lazy_xp_function` as a context manager instead."
),
DeprecationWarning,
stacklevel=2,
)
# Enable using patch_lazy_xp_function not as a context manager
temp_setattr = monkeypatch.setattr # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
def iter_tagged() -> Iterator[
tuple[ModuleType, str, Callable[..., Any], dict[str, Any]]
]:
for mod in mods:
for name, func in mod.__dict__.items():
tags: dict[str, Any] | None = None
with contextlib.suppress(AttributeError):
tags = func._lazy_xp_function # pylint: disable=protected-access
if tags is None:
with contextlib.suppress(KeyError, TypeError):
tags = _ufuncs_tags[func]
if tags is not None:
yield mod, name, func, tags
if is_dask_namespace(xp):
for mod, name, func, tags in iter_tagged():
n = tags["allow_dask_compute"]
if n is True:
n = 1_000_000
elif n is False:
n = 0
wrapped = _dask_wrap(func, n)
temp_setattr(mod, name, wrapped)
elif is_jax_namespace(xp):
for mod, name, func, tags in iter_tagged():
if tags["jax_jit"]:
wrapped = jax_autojit(func)
temp_setattr(mod, name, wrapped)
# We can't just decorate patch_lazy_xp_functions with
# @contextlib.contextmanager because it would not work with the
# deprecated monkeypatch when not used as a context manager.
@contextlib.contextmanager
def revert_on_exit() -> Generator[None]:
try:
yield
finally:
for mod, name, orig_func in to_revert:
setattr(mod, name, orig_func)
return revert_on_exit()
| Deprecated |
python | conda__conda | conda/gateways/connection/adapters/ftp.py | {
"start": 1351,
"end": 9554
} | class ____(BaseAdapter):
"""A Requests Transport Adapter that handles FTP urls."""
def __init__(self):
super().__init__()
# Build a dictionary keyed off the methods we support in upper case.
# The values of this dictionary should be the functions we use to
# send the specific queries.
self.func_table = {
"LIST": self.list,
"RETR": self.retr,
"NLST": self.nlst,
"GET": self.retr,
}
def send(self, request, **kwargs):
"""Sends a PreparedRequest object over FTP. Returns a response object."""
# Get the authentication from the prepared request, if any.
auth = self.get_username_password_from_header(request)
# Next, get the host and the path.
host, port, path = self.get_host_and_path_from_url(request)
# Sort out the timeout.
timeout = kwargs.get("timeout", None)
if not isinstance(timeout, int):
# https://github.com/conda/conda/pull/3392
timeout = 10
# Establish the connection and login if needed.
self.conn = ftplib.FTP()
self.conn.connect(host, port, timeout)
if auth is not None:
self.conn.login(auth[0], auth[1])
else:
self.conn.login()
# Get the method and attempt to find the function to call.
resp = self.func_table[request.method](path, request)
# Return the response.
return resp
def close(self):
"""Dispose of any internal state."""
# Currently this is a no-op.
pass
def list(self, path, request):
"""Executes the FTP LIST command on the given path."""
data = StringIO()
# To ensure the StringIO gets cleaned up, we need to alias its close
# method to the release_conn() method. This is a dirty hack, but there
# you go.
data.release_conn = data.close
self.conn.cwd(path)
code = self.conn.retrbinary("LIST", data_callback_factory(data))
# When that call has finished executing, we'll have all our data.
response = build_text_response(request, data, code)
# Close the connection.
self.conn.close()
return response
def retr(self, path, request):
"""Executes the FTP RETR command on the given path."""
data = BytesIO()
# To ensure the BytesIO gets cleaned up, we need to alias its close
# method. See self.list().
data.release_conn = data.close
code = self.conn.retrbinary("RETR " + path, data_callback_factory(data))
response = build_binary_response(request, data, code)
# Close the connection.
self.conn.close()
return response
def nlst(self, path, request):
"""Executes the FTP NLST command on the given path."""
data = StringIO()
# Alias the close method.
data.release_conn = data.close
self.conn.cwd(path)
code = self.conn.retrbinary("NLST", data_callback_factory(data))
# When that call has finished executing, we'll have all our data.
response = build_text_response(request, data, code)
# Close the connection.
self.conn.close()
return response
def get_username_password_from_header(self, request):
"""Given a PreparedRequest object, reverse the process of adding HTTP
Basic auth to obtain the username and password. Allows the FTP adapter
to piggyback on the basic auth notation without changing the control
flow.
"""
auth_header = request.headers.get("Authorization")
if auth_header:
# The basic auth header is of the form 'Basic xyz'. We want the
# second part. Check that we have the right kind of auth though.
encoded_components = auth_header.split()[:2]
if encoded_components[0] != "Basic":
raise AuthenticationError("Invalid form of Authentication used.")
else:
encoded = encoded_components[1]
# Decode the base64 encoded string.
decoded = b64decode(encoded)
# The string is of the form 'username:password'. Split on the
# colon.
components = decoded.split(":")
username = components[0]
password = components[1]
return (username, password)
else:
# No auth header. Return None.
return None
def get_host_and_path_from_url(self, request):
"""Given a PreparedRequest object, split the URL in such a manner as to
determine the host and the path. This is a separate method to wrap some
of urlparse's craziness.
"""
url = request.url
parsed = urlparse(url)
path = parsed.path
# If there is a slash on the front of the path, chuck it.
if path[0] == "/":
path = path[1:]
host = parsed.hostname
port = parsed.port or 0
return (host, port, path)
def data_callback_factory(variable):
"""Returns a callback suitable for use by the FTP library. This callback
will repeatedly save data into the variable provided to this function. This
variable should be a file-like structure.
"""
def callback(data):
variable.write(data)
return callback
def build_text_response(request, data, code):
"""Build a response for textual data."""
return build_response(request, data, code, "ascii")
def build_binary_response(request, data, code):
"""Build a response for data whose encoding is unknown."""
return build_response(request, data, code, None)
def build_response(request, data, code, encoding):
"""Builds a response object from the data returned by ftplib, using the
specified encoding.
"""
response = Response()
response.encoding = encoding
# Fill in some useful fields.
response.raw = data
response.url = request.url
response.request = request
response.status_code = get_status_code_from_code_response(code)
# Make sure to seek the file-like raw object back to the start.
response.raw.seek(0)
# Run the response hook.
response = dispatch_hook("response", request.hooks, response)
return response
def get_status_code_from_code_response(code):
r"""Handle complicated code response, even multi-lines.
We get the status code in two ways:
- extracting the code from the last valid line in the response
- getting it from the 3 first digits in the code
After a comparison between the two values,
we can safely set the code or raise a warning.
Examples:
- get_status_code_from_code_response('200 Welcome') == 200
- multi_line_code = '226-File successfully transferred\n226 0.000 seconds'
get_status_code_from_code_response(multi_line_code) == 226
- multi_line_with_code_conflicts = '200-File successfully transferred\n226 0.000 seconds'
get_status_code_from_code_response(multi_line_with_code_conflicts) == 226
For more detail see RFC 959, page 36, on multi-line responses:
https://www.ietf.org/rfc/rfc959.txt
"Thus the format for multi-line replies is that the first line
will begin with the exact required reply code, followed
immediately by a Hyphen, "-" (also known as Minus), followed by
text. The last line will begin with the same code, followed
immediately by Space <SP>, optionally some text, and the Telnet
end-of-line code."
"""
last_valid_line_from_code = [line for line in code.split("\n") if line][-1]
status_code_from_last_line = int(last_valid_line_from_code.split()[0])
status_code_from_first_digits = int(code[:3])
if status_code_from_last_line != status_code_from_first_digits:
log.warning(
"FTP response status code seems to be inconsistent.\n"
"Code received: %s, extracted: %s and %s",
code,
status_code_from_last_line,
status_code_from_first_digits,
)
return status_code_from_last_line
| FTPAdapter |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/repository_origin.py | {
"start": 418,
"end": 1677
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
repository_location_name = graphene.NonNull(graphene.String)
repository_name = graphene.NonNull(graphene.String)
repository_location_metadata = non_null_list(GrapheneRepositoryMetadata)
class Meta:
name = "RepositoryOrigin"
def __init__(self, origin: RemoteRepositoryOrigin):
super().__init__()
self._origin = check.inst_param(origin, "origin", RemoteRepositoryOrigin)
def resolve_id(self, _graphene_info: ResolveInfo) -> str:
return self._origin.get_id()
def resolve_repository_location_name(self, _graphene_info: ResolveInfo) -> str:
return self._origin.code_location_origin.location_name
def resolve_repository_name(self, _graphene_info: ResolveInfo) -> str:
return self._origin.repository_name
def resolve_repository_location_metadata(
self, _graphene_info: ResolveInfo
) -> Sequence[GrapheneRepositoryMetadata]:
metadata = self._origin.code_location_origin.get_display_metadata()
return [
GrapheneRepositoryMetadata(key=key, value=value)
for key, value in metadata.items()
if value is not None
]
| GrapheneRepositoryOrigin |
python | ansible__ansible | lib/ansible/plugins/action/include_vars.py | {
"start": 541,
"end": 11436
} | class ____(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions', 'ignore_unknown_extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name', 'hash_behaviour']
_requires_connection = False
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self._display.deprecated(
msg="Specifying 'ignore_files' as a string is deprecated.",
version="2.24",
help_text="Use a list of strings instead.",
obj=self.ignore_files,
)
self.ignore_files = self.ignore_files.split()
if not isinstance(self.ignore_files, list):
raise AnsibleError("The 'ignore_files' option must be a list.", obj=self.ignore_files)
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.hash_behaviour = self._task.args.get('hash_behaviour', None)
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
if self.source_file:
self.source_file = self.source_file.rstrip('\n')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_unknown_extensions = self._task.args.get('ignore_unknown_extensions', False)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
if not isinstance(self.valid_extensions, list):
raise AnsibleError("The 'extensions' option must be a list.", obj=self.valid_extensions)
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
self.show_content = True
self.included_files = []
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError(f'{arg} is not a valid option in include_vars', obj=arg)
if dirs and files:
raise AnsibleError("You are mixing file only and dir only arguments, these are incompatible", obj=self._task.args)
# set internal vars from args
self._set_args()
results = dict()
failed = False
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if not path.exists(self.source_dir):
failed = True
err_msg = f"{self.source_dir} directory does not exist"
elif not path.isdir(self.source_dir):
failed = True
err_msg = f"{self.source_dir} is not a directory"
else:
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = self._load_files_in_dir(root_dir, filenames)
if failed:
break
results.update(updated_results)
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(task_vars=task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
elif self.hash_behaviour is not None and self.hash_behaviour != C.DEFAULT_HASH_BEHAVIOUR:
merge_hashes = self.hash_behaviour == 'merge'
existing_variables = {k: v for k, v in task_vars.items() if k in results}
results = combine_vars(existing_variables, results, merge=merge_hashes)
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
if (origin := self._task._origin) and origin.path: # origin.path is not present for ad-hoc tasks
current_dir = (
"/".join(origin.path.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _log_walk(self, error):
self._display.vvv(f"Issue with walking through {error.filename}: {error}")
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
sorted_walk = list(walk(self.source_dir, onerror=self._log_walk, followlinks=True))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
# Depth 1 is the root, relative_to omits the root
current_depth = len(pathlib.Path(current_root).relative_to(self.source_dir).parts) + 1
if self.depth != 0 and current_depth > self.depth:
continue
current_files.sort()
yield (current_root, current_files)
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception as ex:
raise AnsibleError(f'Invalid regular expression: {file_type!r}', obj=file_type) from ex
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = f"{filename!r} does not have a valid extension: {', '.join(self.valid_extensions)}"
else:
data = self._loader.load_from_file(filename, cache='none', trusted_as_template=True)
self.show_content &= not SourceWasEncrypted.is_tagged_on(data)
if data is None: # support empty files, but not falsey values
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = f"{filename!r} must be stored as a dictionary/hash"
else:
self.included_files.append(filename)
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'):
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if self.ignore_unknown_extensions:
if path.exists(filepath) and not self._ignore_file(filename) and self._is_valid_file_ext(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
else:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
| ActionModule |
python | pytorch__pytorch | torch/_dynamo/aot_compile.py | {
"start": 1571,
"end": 2107
} | class ____(pickle.Pickler):
@classmethod
def _unpickle_cell(cls, val: Any) -> Any:
def _() -> Any:
return val
assert _.__closure__ is not None
return _.__closure__[0]
# pyrefly: ignore [bad-override]
def reducer_override(self, obj: Any) -> Any:
if isinstance(obj, type((lambda x: lambda: x)(0).__closure__[0])): # type: ignore[index] # noqa: PLC3002
return type(self)._unpickle_cell, (obj.cell_contents,)
return NotImplemented
@dataclass
| AOTCompilePickler |
python | great-expectations__great_expectations | great_expectations/core/freshness_diagnostics.py | {
"start": 3095,
"end": 3594
} | class ____(_ParentFreshnessDiagnostics):
parent_error_class: ClassVar[Type[GreatExpectationsError]] = ValidationDefinitionNotAddedError
children_error_classes: ClassVar[Tuple[Type[GreatExpectationsError], ...]] = (
ExpectationSuiteNotAddedError,
BatchDefinitionNotAddedError,
)
raise_for_error_class: ClassVar[Type[ResourceFreshnessAggregateError]] = (
ValidationDefinitionRelatedResourcesFreshnessError
)
@dataclass
| ValidationDefinitionFreshnessDiagnostics |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 116319,
"end": 118408
} | class ____(Response):
"""
Response of tasks.clone endpoint.
:param id: ID of the new task
:type id: str
:param new_project: In case the new_project_name was specified returns the
target project details
:type new_project: dict
"""
_service = "tasks"
_action = "clone"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "ID of the new task", "type": ["string", "null"]},
"new_project": {
"description": "In case the new_project_name was specified returns the target project details",
"properties": {
"id": {
"description": "The ID of the target project",
"type": "string",
},
"name": {
"description": "The name of the target project",
"type": "string",
},
},
"type": ["object", "null"],
},
},
"type": "object",
}
def __init__(self, id: Optional[str] = None, new_project: Optional[dict] = None, **kwargs: Any) -> None:
super(CloneResponse, self).__init__(**kwargs)
self.id = id
self.new_project = new_project
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("new_project")
def new_project(self) -> Optional[dict]:
return self._property_new_project
@new_project.setter
def new_project(self, value: Optional[dict]) -> None:
if value is None:
self._property_new_project = None
return
self.assert_isinstance(value, "new_project", (dict,))
self._property_new_project = value
| CloneResponse |
python | pytorch__pytorch | torch/_higher_order_ops/cond.py | {
"start": 1260,
"end": 10471
} | class ____(HigherOrderOperator):
def __init__(self):
super().__init__("cond")
def __call__(self, pred, true_fn, false_fn, operands):
validate_subgraph_args_types(operands)
return super().__call__(pred, true_fn, false_fn, operands)
# pyrefly: ignore [bad-override]
def gen_schema(self, pred, true_fn, false_fn, operands):
from torch._higher_order_ops.schema import HopSchemaGenerator
from torch._higher_order_ops.utils import materialize_as_graph
then_gm: torch.fx.GraphModule = materialize_as_graph(true_fn, operands)
else_gm: torch.fx.GraphModule = materialize_as_graph(false_fn, operands)
(
_,
_,
_,
then_mutated_inputs,
then_outputs,
) = check_input_alias_and_mutation_return_outputs(then_gm)
(
_,
_,
_,
else_mutated_inputs,
else_outputs,
) = check_input_alias_and_mutation_return_outputs(else_gm)
mutated_inputs = set(then_mutated_inputs) | set(else_mutated_inputs)
schema_gen = HopSchemaGenerator(self)
schema_gen.add_arg("pred", pred)
schema_gen.add_arg("true_fn", then_gm)
schema_gen.add_arg("false_fn", else_gm)
for idx, arg in enumerate(operands):
schema_gen.add_arg(f"operand{idx}", arg, is_mutated=idx in mutated_inputs)
for out in then_outputs:
schema_gen.add_output(out)
schema_gen.add_schema_tree_spec(pred, true_fn, false_fn, operands)
return schema_gen.gen_schema()
cond_op = CondOp()
@exposed_in("torch")
def cond(
pred: Union[bool, int, float, torch.Tensor],
true_fn: Callable,
false_fn: Callable,
operands: Union[tuple, list] = (),
) -> Any:
r"""
Conditionally applies `true_fn` or `false_fn`.
.. warning::
`torch.cond` is a prototype feature in PyTorch. It has limited support for input and output types.
Please look forward to a more stable implementation in a future version of PyTorch.
Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
`cond` is structured control flow operator. That is, it is like a Python if-statement,
but has restrictions on `true_fn`, `false_fn`, and `operands` that enable it to be
capturable using torch.compile and torch.export.
Assuming the constraints on `cond`'s arguments are met, `cond` is equivalent to the following::
def cond(pred, true_branch, false_branch, operands):
if pred:
return true_branch(*operands)
else:
return false_branch(*operands)
Args:
pred (Union[bool, torch.Tensor]): A boolean expression or a tensor with one element,
indicating which branch function to apply.
true_fn (Callable): A callable function (a -> b) that is within the
scope that is being traced.
false_fn (Callable): A callable function (a -> b) that is within the
scope that is being traced. The true branch and false branch must
have consistent input and outputs, meaning the inputs have to be
the same, and the outputs have to be the same type and shape. Int
output is also allowed. We'll make the output dynamic by turning it
into a symint.
operands (Tuple of possibly nested dict/list/tuple of torch.Tensor): A tuple of inputs to the
true/false functions. It can be empty if true_fn/false_fn doesn't require input. Defaults to ().
Example::
def true_fn(x: torch.Tensor):
return x.cos()
def false_fn(x: torch.Tensor):
return x.sin()
return cond(x.shape[0] > 4, true_fn, false_fn, (x,))
Restrictions:
- The conditional statement (aka `pred`) must meet one of the following constraints:
- It's a `torch.Tensor` with only one element, and torch.bool dtype
- It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10`
- The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints:
- The function signature must match with operands.
- The function must return a tensor with the same metadata, e.g. shape,
dtype, etc.
- The function cannot have in-place mutations on inputs or global variables.
(Note: in-place tensor operations such as `add_` for intermediate results
are allowed in a branch)
"""
if torch.compiler.is_dynamo_compiling():
return cond_op(pred, true_fn, false_fn, operands)
if isinstance(pred, (bool, int, float)):
# This is the non-strict export case. Strict export and torch.compile are
# handled above in dynamo.
if torch.compiler.is_compiling():
warnings.warn(
"Pred is a Python constant. When used with torch.cond, it specializes on one of the branches."
" If you want torch.cond to preserve two branches, please make the predicate a boolean tensor or a SymBool.",
UserWarning,
stacklevel=2,
)
# This is the eager case. We can just run the true or false branch.
if pred:
return true_fn(*operands)
else:
return false_fn(*operands)
def _validate_input(pred, true_fn, false_fn, operands):
if not isinstance(pred, (bool, torch.Tensor, torch.SymBool)):
raise RuntimeError(f"Expected pred to be bool or tensor, but got {pred}.")
if isinstance(pred, torch.Tensor) and pred.numel() != 1:
raise RuntimeError(
f"Expected pred to be bool or single-element tensor, but got {pred}."
)
if not callable(true_fn) or not callable(false_fn):
raise RuntimeError("Expect both branches to be callable.")
if not isinstance(operands, (tuple, list)) or pytree.tree_any(
lambda t: not isinstance(t, torch.Tensor), operands
):
raise RuntimeError(
"Expect operands to be a tuple of possibly nested dict/list/tuple that only "
f"consists of tensor leaves, but got {operands}."
)
_validate_input(pred, true_fn, false_fn, operands)
if not torch._dynamo.is_dynamo_supported():
raise RuntimeError("torch.cond requires dynamo support.")
# Dynamo is expecting a callable with "__code__" attribute.
# We cannot directly pass cond_op to it. So we wrap it in a dummy function.
def _cond_op_wrapper(*args, **kwargs):
return cond_op(*args, **kwargs)
from torch._higher_order_ops.utils import setup_compilation_env
with setup_compilation_env() as backend:
return torch.compile(_cond_op_wrapper, backend=backend, fullgraph=True)(
pred, true_fn, false_fn, operands
)
def trace_cond(proxy_mode, func_overload, pred, true_fn, false_fn, operands):
assert isinstance(operands, (list, tuple)), (
f"Cond operands must be a list or tuple of tensors and SymInts {operands}"
)
true_graph = reenter_make_fx(true_fn)(*operands)
false_graph = reenter_make_fx(false_fn)(*operands)
true_outs = []
false_outs = []
for node in true_graph.graph.nodes:
if node.op == "output":
true_outs.extend(node.args)
for node in false_graph.graph.nodes:
if node.op == "output":
false_outs.extend(node.args)
flat_true_outs = pytree.arg_tree_leaves(*true_outs)
flat_false_outs = pytree.arg_tree_leaves(*false_outs)
if len(flat_true_outs) != len(flat_false_outs):
raise torch._dynamo.exc.CondOpArgsMismatchError(
f"Expected to return same number of outputs but got:"
f"\n true branch returns {len(flat_true_outs)} item(s)"
f"\n false branch returns {len(flat_false_outs)} item(s)"
)
i, true_name = unique_graph_id(proxy_mode, prefix="true_graph")
false_name = f"false_graph_{i}"
assert not hasattr(proxy_mode.tracer.root, false_name)
proxy_mode.tracer.root.register_module(true_name, true_graph)
proxy_mode.tracer.root.register_module(false_name, false_graph)
args = (pred, true_graph, false_graph, operands)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, args)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", func_overload, proxy_args, {}
)
out = func_overload(pred, true_graph, false_graph, operands)
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
@cond_op.py_impl(DispatchKey.CompositeExplicitAutograd)
def cond_op_dense(pred, true_fn, false_fn, operands):
assert all(isinstance(o, (torch.Tensor, int)) for o in operands), (
f"Dense implementation operands must be a list of tensors and ints {operands}"
)
mode = _get_current_dispatch_mode()
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
if pred:
return true_fn(*operands)
else:
return false_fn(*operands)
| CondOp |
python | wandb__wandb | wandb/sdk/artifacts/_generated/run_input_artifacts.py | {
"start": 239,
"end": 325
} | class ____(GQLResult):
project: Optional[RunInputArtifactsProject]
| RunInputArtifacts |
python | doocs__leetcode | solution/0400-0499/0477.Total Hamming Distance/Solution.py | {
"start": 0,
"end": 246
} | class ____:
def totalHammingDistance(self, nums: List[int]) -> int:
ans, n = 0, len(nums)
for i in range(32):
a = sum(x >> i & 1 for x in nums)
b = n - a
ans += a * b
return ans
| Solution |
python | getsentry__sentry | tests/sentry/uptime/autodetect/test_ranking.py | {
"start": 7419,
"end": 7980
} | class ____(UptimeTestCase):
def test(self) -> None:
bucket = datetime(2024, 7, 18, 0, 47)
delete_organization_bucket(bucket)
dummy_org_id = 1487
self.project.organization = Organization(id=dummy_org_id)
self.project.organization_id = dummy_org_id
add_base_url_to_rank(self.project, "https://sentry.io")
assert get_organization_bucket(bucket) == {self.project.organization_id}
delete_organization_bucket(bucket)
assert get_organization_bucket(bucket) == set()
| DeleteOrganizationBucketTest |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_imsi_belong_to_country_code.py | {
"start": 996,
"end": 2081
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.imsi_belong_to_country_code"
condition_value_keys = ("country_code",)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, country_code, **kwargs):
return column.apply(lambda x: imsi_country_code(x, country_code))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesImsiBelongToCountryCode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.