language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kubernetes-client__python | kubernetes/client/models/v2_horizontal_pod_autoscaler_condition.py | {
"start": 383,
"end": 7759
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V2HorizontalPodAutoscalerCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V2HorizontalPodAutoscalerCondition. # noqa: E501
lastTransitionTime is the last time the condition transitioned from one status to another # noqa: E501
:return: The last_transition_time of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V2HorizontalPodAutoscalerCondition.
lastTransitionTime is the last time the condition transitioned from one status to another # noqa: E501
:param last_transition_time: The last_transition_time of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V2HorizontalPodAutoscalerCondition. # noqa: E501
message is a human-readable explanation containing details about the transition # noqa: E501
:return: The message of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V2HorizontalPodAutoscalerCondition.
message is a human-readable explanation containing details about the transition # noqa: E501
:param message: The message of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V2HorizontalPodAutoscalerCondition. # noqa: E501
reason is the reason for the condition's last transition. # noqa: E501
:return: The reason of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V2HorizontalPodAutoscalerCondition.
reason is the reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V2HorizontalPodAutoscalerCondition. # noqa: E501
status is the status of the condition (True, False, Unknown) # noqa: E501
:return: The status of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V2HorizontalPodAutoscalerCondition.
status is the status of the condition (True, False, Unknown) # noqa: E501
:param status: The status of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V2HorizontalPodAutoscalerCondition. # noqa: E501
type describes the current condition # noqa: E501
:return: The type of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V2HorizontalPodAutoscalerCondition.
type describes the current condition # noqa: E501
:param type: The type of this V2HorizontalPodAutoscalerCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2HorizontalPodAutoscalerCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2HorizontalPodAutoscalerCondition):
return True
return self.to_dict() != other.to_dict()
| V2HorizontalPodAutoscalerCondition |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_organization_seer_explorer_chat.py | {
"start": 454,
"end": 3868
} | class ____(APITestCase):
def setUp(self):
super().setUp()
self.login_as(user=self.user)
self.url = f"/api/0/organizations/{self.organization.slug}/seer/explorer-chat/"
def test_get_without_run_id_returns_null_session(self) -> None:
response = self.client.get(self.url)
assert response.status_code == 404
assert response.data == {"session": None}
@patch("sentry.seer.endpoints.organization_seer_explorer_chat.SeerExplorerClient")
def test_get_with_run_id_calls_client(self, mock_client_class: MagicMock) -> None:
from sentry.seer.explorer.client_models import SeerRunState
# Mock client response
mock_state = SeerRunState(
run_id=123,
blocks=[],
status="completed",
updated_at="2024-01-01T00:00:00Z",
)
mock_client = MagicMock()
mock_client.get_run.return_value = mock_state
mock_client_class.return_value = mock_client
response = self.client.get(f"{self.url}123/")
assert response.status_code == 200
assert response.data["session"]["run_id"] == 123
assert response.data["session"]["status"] == "completed"
mock_client.get_run.assert_called_once_with(run_id=123)
def test_post_without_query_returns_400(self) -> None:
data: dict[str, Any] = {}
response = self.client.post(self.url, data, format="json")
assert response.status_code == 400
def test_post_with_empty_query_returns_400(self) -> None:
data = {"query": ""}
response = self.client.post(self.url, data, format="json")
assert response.status_code == 400
@patch("sentry.seer.endpoints.organization_seer_explorer_chat.SeerExplorerClient")
def test_post_new_conversation_calls_client(self, mock_client_class: MagicMock):
mock_client = MagicMock()
mock_client.start_run.return_value = 456
mock_client_class.return_value = mock_client
data = {"query": "What is this error about?"}
response = self.client.post(self.url, data, format="json")
assert response.status_code == 200
assert response.data == {"run_id": 456}
# Verify client was called correctly
mock_client_class.assert_called_once_with(self.organization, ANY, is_interactive=True)
mock_client.start_run.assert_called_once_with(
prompt="What is this error about?", on_page_context=None
)
@patch("sentry.seer.endpoints.organization_seer_explorer_chat.SeerExplorerClient")
def test_post_continue_conversation_calls_client(self, mock_client_class: MagicMock) -> None:
mock_client = MagicMock()
mock_client.continue_run.return_value = 789
mock_client_class.return_value = mock_client
data = {
"query": "Follow up question",
"insert_index": 2,
}
response = self.client.post(f"{self.url}789/", data, format="json")
assert response.status_code == 200
assert response.data == {"run_id": 789}
# Verify client was called correctly
mock_client_class.assert_called_once_with(self.organization, ANY, is_interactive=True)
mock_client.continue_run.assert_called_once_with(
run_id=789, prompt="Follow up question", insert_index=2, on_page_context=None
)
| OrganizationSeerExplorerChatEndpointTest |
python | pytorch__pytorch | torchgen/utils.py | {
"start": 13319,
"end": 15479
} | class ____:
"""A helper for constructing the namespace open and close strings for a nested set of namespaces.
e.g. for namespace_str torch::lazy,
prologue:
namespace torch {
namespace lazy {
epilogue:
} // namespace lazy
} // namespace torch
"""
def __init__(
self,
namespace_str: str,
entity_name: str = "",
max_level: int = 2,
) -> None:
# cpp_namespace can be a colon joined string such as torch::lazy
cpp_namespaces = namespace_str.split("::")
assert len(cpp_namespaces) <= max_level, (
f"Codegen doesn't support more than {max_level} level(s) of custom namespace. Got {namespace_str}."
)
self.cpp_namespace_ = namespace_str
self.prologue_ = "\n".join([f"namespace {n} {{" for n in cpp_namespaces])
self.epilogue_ = "\n".join(
[f"}} // namespace {n}" for n in reversed(cpp_namespaces)]
)
self.namespaces_ = cpp_namespaces
self.entity_name_ = entity_name
@staticmethod
def from_namespaced_entity(
namespaced_entity: str,
max_level: int = 2,
) -> NamespaceHelper:
"""
Generate helper from nested namespaces as long as class/function name. E.g.: "torch::lazy::add"
"""
names = namespaced_entity.split("::")
entity_name = names[-1]
namespace_str = "::".join(names[:-1])
return NamespaceHelper(
namespace_str=namespace_str, entity_name=entity_name, max_level=max_level
)
@property
def prologue(self) -> str:
return self.prologue_
@property
def epilogue(self) -> str:
return self.epilogue_
@property
def entity_name(self) -> str:
return self.entity_name_
# Only allow certain level of namespaces
def get_cpp_namespace(self, default: str = "") -> str:
"""
Return the namespace string from joining all the namespaces by "::" (hence no leading "::").
Return default if namespace string is empty.
"""
return self.cpp_namespace_ if self.cpp_namespace_ else default
| NamespaceHelper |
python | apache__airflow | airflow-core/src/airflow/timetables/trigger.py | {
"start": 4776,
"end": 6185
} | class ____(DeltaMixin, _TriggerTimetable):
"""
Timetable that triggers DAG runs according to a cron expression.
This is different from ``DeltaDataIntervalTimetable``, where the delta value
specifies the *data interval* of a DAG run. With this timetable, the data
intervals are specified independently. Also for the same reason, this
timetable kicks off a DAG run immediately at the start of the period,
instead of needing to wait for one data interval to pass.
:param delta: How much time to wait between each run.
:param interval: The data interval of each run. Default is 0.
"""
def __init__(
self,
delta: datetime.timedelta | relativedelta,
*,
interval: datetime.timedelta | relativedelta = datetime.timedelta(),
) -> None:
super().__init__(delta)
self._interval = interval
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
return cls(
_deserialize_interval(data["delta"]),
interval=_deserialize_interval(data["interval"]),
)
def serialize(self) -> dict[str, Any]:
return {
"delta": _serialize_interval(self._delta),
"interval": _serialize_interval(self._interval),
}
def _calc_first_run(self) -> DateTime:
return self._align_to_prev(coerce_datetime(utcnow()))
| DeltaTriggerTimetable |
python | doocs__leetcode | solution/2500-2599/2544.Alternating Digit Sum/Solution2.py | {
"start": 0,
"end": 207
} | class ____:
def alternateDigitSum(self, n: int) -> int:
ans, sign = 0, 1
for c in str(n):
x = int(c)
ans += sign * x
sign *= -1
return ans
| Solution |
python | ray-project__ray | ci/ray_ci/test_base.py | {
"start": 212,
"end": 1334
} | class ____(unittest.TestCase):
def setUp(self) -> None:
ci_init()
self.patcher = patch.dict(
os.environ,
{
"RAYCI_CHECKOUT_DIR": "/ray",
"RAYCI_BUILD_ID": "a1b2c3d4",
"RAYCI_WORK_REPO": "rayproject/citemp",
"BUILDKITE_COMMIT": "123456",
"BUILDKITE_BRANCH": "master",
"BUILDKITE_PIPELINE_ID": "123456",
},
)
self.patcher.start()
def tearDown(self) -> None:
self.patcher.stop()
def get_non_default_python(self) -> str:
for version in PYTHON_VERSIONS.keys():
if version not in [DEFAULT_PYTHON_VERSION, DEFAULT_PYTHON_TAG_VERSION]:
return version
raise ValueError(
f"No non-default python version found in {PYTHON_VERSIONS.keys()}"
)
def get_python_version(self, version: str) -> str:
return f"py{version.replace('.', '')}" # 3.x -> py3x
def get_cpp_version(self, version: str) -> str:
return f"cp{version.replace('.', '')}" # 3.x -> cp3x
| RayCITestBase |
python | getsentry__sentry | tests/sentry/snuba/test_subscriptions.py | {
"start": 12448,
"end": 17989
} | class ____(TestCase):
def test(self) -> None:
old_dataset = Dataset.Events
old_query = "level:error"
old_aggregate = "count()"
with self.tasks():
snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
old_dataset,
old_query,
old_aggregate,
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
old_type = SnubaQuery.Type(snuba_query.type)
dataset = Dataset.Transactions
query = "level:warning"
aggregate = "count_unique(tags[sentry:user])"
time_window = timedelta(minutes=20)
resolution = timedelta(minutes=2)
subscription = QuerySubscription.objects.get(id=subscription.id)
subscription_id = subscription.subscription_id
snuba_query.update(
type=SnubaQuery.Type.PERFORMANCE.value,
dataset=dataset.value,
query=query,
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=self.environment,
aggregate=aggregate,
)
assert subscription_id is not None
update_snuba_subscription(subscription, old_type, old_dataset, old_aggregate, old_query)
assert subscription.status == QuerySubscription.Status.UPDATING.value
assert subscription.subscription_id == subscription_id
assert subscription.snuba_query.dataset == dataset.value
assert subscription.snuba_query.query == query
assert subscription.snuba_query.aggregate == aggregate
assert subscription.snuba_query.time_window == int(time_window.total_seconds())
assert subscription.snuba_query.resolution == int(resolution.total_seconds())
def test_with_task(self) -> None:
with self.tasks():
old_dataset = Dataset.Events
old_query = "level:error"
old_aggregate = "count()"
snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
old_dataset,
old_query,
old_aggregate,
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
old_type = SnubaQuery.Type(snuba_query.type)
dataset = Dataset.Transactions
query = "level:warning"
aggregate = "count_unique(tags[sentry:user])"
time_window = timedelta(minutes=20)
resolution = timedelta(minutes=2)
subscription = QuerySubscription.objects.get(id=subscription.id)
subscription_id = subscription.subscription_id
assert subscription_id is not None
snuba_query.update(
type=SnubaQuery.Type.PERFORMANCE.value,
dataset=dataset.value,
query=query,
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=self.environment,
aggregate=aggregate,
)
update_snuba_subscription(subscription, old_type, old_dataset, old_aggregate, old_query)
subscription = QuerySubscription.objects.get(id=subscription.id)
assert subscription.status == QuerySubscription.Status.ACTIVE.value
assert subscription.subscription_id is not None
assert subscription.subscription_id != subscription_id
def test_perf_metric_to_transaction(self) -> None:
with self.tasks():
old_dataset = Dataset.PerformanceMetrics
old_query = ""
old_aggregate = "count()"
snuba_query = create_snuba_query(
SnubaQuery.Type.PERFORMANCE,
old_dataset,
old_query,
old_aggregate,
timedelta(minutes=10),
timedelta(minutes=1),
None,
)
subscription = create_snuba_subscription(self.project, "something", snuba_query)
old_type = SnubaQuery.Type(snuba_query.type)
dataset = Dataset.Transactions
query = "level:warning"
aggregate = "count()"
time_window = timedelta(minutes=20)
resolution = timedelta(minutes=2)
subscription = QuerySubscription.objects.get(id=subscription.id)
subscription_id = subscription.subscription_id
assert subscription_id is not None
snuba_query.update(
type=SnubaQuery.Type.PERFORMANCE.value,
dataset=dataset.value,
query=query,
time_window=int(time_window.total_seconds()),
resolution=int(resolution.total_seconds()),
environment=self.environment,
aggregate=aggregate,
)
update_snuba_subscription(subscription, old_type, old_dataset, old_aggregate, old_query)
subscription = QuerySubscription.objects.get(id=subscription.id)
assert subscription.status == QuerySubscription.Status.ACTIVE.value
assert subscription.subscription_id is not None
assert subscription.subscription_id != subscription_id
| UpdateSnubaSubscriptionTest |
python | pandas-dev__pandas | pandas/core/computation/scope.py | {
"start": 484,
"end": 2691
} | class ____(ChainMap[_KT, _VT]):
"""
Variant of ChainMap that allows direct updates to inner scopes.
Only works when all passed mapping are mutable.
"""
def __setitem__(self, key: _KT, value: _VT) -> None:
for mapping in self.maps:
if key in mapping:
mapping[key] = value
return
self.maps[0][key] = value
def __delitem__(self, key: _KT) -> None:
"""
Raises
------
KeyError
If `key` doesn't exist.
"""
for mapping in self.maps:
if key in mapping:
del mapping[key]
return
raise KeyError(key)
def ensure_scope(
level: int, global_dict=None, local_dict=None, resolvers=(), target=None
) -> Scope:
"""Ensure that we are grabbing the correct scope."""
return Scope(
level + 1,
global_dict=global_dict,
local_dict=local_dict,
resolvers=resolvers,
target=target,
)
def _replacer(x) -> str:
"""
Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj) -> str:
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack("@P", id(obj))
return "".join([_replacer(x) for x in packed])
DEFAULT_GLOBALS = {
"Timestamp": Timestamp,
"datetime": datetime.datetime,
"True": True,
"False": False,
"list": list,
"tuple": tuple,
"inf": np.inf,
"Inf": np.inf,
}
def _get_pretty_string(obj) -> str:
"""
Return a prettier version of obj.
Parameters
----------
obj : object
Object to pretty print
Returns
-------
str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
| DeepChainMap |
python | kamyu104__LeetCode-Solutions | Python/distinct-numbers-in-each-subarray.py | {
"start": 50,
"end": 573
} | class ____(object):
def distinctNumbers(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
result = []
count = collections.Counter()
for i, num in enumerate(nums):
count[num] += 1
if i >= k:
count[nums[i-k]] -= 1
if not count[nums[i-k]]:
del count[nums[i-k]]
if i+1 >= k:
result.append(len(count))
return result
| Solution |
python | networkx__networkx | networkx/algorithms/tests/test_boundary.py | {
"start": 246,
"end": 3334
} | class ____:
"""Unit tests for the :func:`~networkx.node_boundary` function."""
def test_null_graph(self):
"""Tests that the null graph has empty node boundaries."""
null = nx.null_graph()
assert nx.node_boundary(null, []) == set()
assert nx.node_boundary(null, [], []) == set()
assert nx.node_boundary(null, [1, 2, 3]) == set()
assert nx.node_boundary(null, [1, 2, 3], [4, 5, 6]) == set()
assert nx.node_boundary(null, [1, 2, 3], [3, 4, 5]) == set()
def test_path_graph(self):
P10 = cnlti(nx.path_graph(10), first_label=1)
assert nx.node_boundary(P10, []) == set()
assert nx.node_boundary(P10, [], []) == set()
assert nx.node_boundary(P10, [1, 2, 3]) == {4}
assert nx.node_boundary(P10, [4, 5, 6]) == {3, 7}
assert nx.node_boundary(P10, [3, 4, 5, 6, 7]) == {2, 8}
assert nx.node_boundary(P10, [8, 9, 10]) == {7}
assert nx.node_boundary(P10, [4, 5, 6], [9, 10]) == set()
def test_complete_graph(self):
K10 = cnlti(nx.complete_graph(10), first_label=1)
assert nx.node_boundary(K10, []) == set()
assert nx.node_boundary(K10, [], []) == set()
assert nx.node_boundary(K10, [1, 2, 3]) == {4, 5, 6, 7, 8, 9, 10}
assert nx.node_boundary(K10, [4, 5, 6]) == {1, 2, 3, 7, 8, 9, 10}
assert nx.node_boundary(K10, [3, 4, 5, 6, 7]) == {1, 2, 8, 9, 10}
assert nx.node_boundary(K10, [4, 5, 6], []) == set()
assert nx.node_boundary(K10, K10) == set()
assert nx.node_boundary(K10, [1, 2, 3], [3, 4, 5]) == {4, 5}
def test_petersen(self):
"""Check boundaries in the petersen graph
cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0<k<=|V(G)|/2)
"""
def cheeger(G, k):
return min(len(nx.node_boundary(G, nn)) / k for nn in combinations(G, k))
P = nx.petersen_graph()
assert cheeger(P, 1) == pytest.approx(3.00, abs=1e-2)
assert cheeger(P, 2) == pytest.approx(2.00, abs=1e-2)
assert cheeger(P, 3) == pytest.approx(1.67, abs=1e-2)
assert cheeger(P, 4) == pytest.approx(1.00, abs=1e-2)
assert cheeger(P, 5) == pytest.approx(0.80, abs=1e-2)
def test_directed(self):
"""Tests the node boundary of a directed graph."""
G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)])
S = {0, 1}
boundary = nx.node_boundary(G, S)
expected = {2}
assert boundary == expected
def test_multigraph(self):
"""Tests the node boundary of a multigraph."""
G = nx.MultiGraph(list(nx.cycle_graph(5).edges()) * 2)
S = {0, 1}
boundary = nx.node_boundary(G, S)
expected = {2, 4}
assert boundary == expected
def test_multidigraph(self):
"""Tests the edge boundary of a multidigraph."""
edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)]
G = nx.MultiDiGraph(edges * 2)
S = {0, 1}
boundary = nx.node_boundary(G, S)
expected = {2}
assert boundary == expected
| TestNodeBoundary |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis30.py | {
"start": 315,
"end": 1399
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis30.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [69200896, 69215360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
chart.set_x_axis({"position_axis": "on_tick"})
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | nryoung__algorithms | algorithms/data_structures/union_find_by_rank.py | {
"start": 444,
"end": 2276
} | class ____:
def __init__(self, N):
if type(N) != int:
raise TypeError("size must be integer")
if N < 0:
raise ValueError("N cannot be a negative integer")
self.__parent = []
self.__rank = []
self.__N = N
for i in range(0, N):
self.__parent.append(i)
self.__rank.append(0)
def make_set(self, x):
if type(x) != int:
raise TypeError("x must be integer")
if x != self.__N:
raise ValueError(
"a new element must have index {0}".format(self.__N)
)
self.__parent.append(x)
self.__rank.append(0)
self.__N = self.__N + 1
def union(self, x, y):
self.__validate_ele(x)
self.__validate_ele(y)
x_root = self.find(x)
y_root = self.find(y)
if x_root == y_root:
return
# x and y are not already in same set. Merge them
if self.__rank[x_root] < self.__rank[y_root]:
self.__parent[x_root] = y_root
elif self.__rank[x_root] > self.__rank[y_root]:
self.__parent[y_root] = x_root
else:
self.__parent[y_root] = x_root
self.__rank[x_root] = self.__rank[x_root] + 1
def find(self, x):
self.__validate_ele(x)
if self.__parent[x] == x:
return x
else:
return self.find(self.__parent[x])
def is_connected(self, x, y):
self.__validate_ele(x)
self.__validate_ele(y)
return self.find(x) == self.find(y)
def __validate_ele(self, x):
if type(x) != int:
raise TypeError("{0} is not an integer".format(x))
if x < 0 or x >= self.__N:
raise ValueError("{0} is not in [0,{1})".format(x, self.__N))
| UnionFindByRank |
python | doocs__leetcode | solution/2400-2499/2446.Determine if Two Events Have Conflict/Solution.py | {
"start": 0,
"end": 158
} | class ____:
def haveConflict(self, event1: List[str], event2: List[str]) -> bool:
return not (event1[0] > event2[1] or event1[1] < event2[0])
| Solution |
python | openai__openai-python | src/openai/_module_client.py | {
"start": 2929,
"end": 3072
} | class ____(LazyProxy["Embeddings"]):
@override
def __load__(self) -> Embeddings:
return _load_client().embeddings
| EmbeddingsProxy |
python | ray-project__ray | python/ray/_common/tests/test_utils.py | {
"start": 4571,
"end": 6510
} | class ____:
"""Tests for the load_class utility function."""
def test_load_builtin_class(self):
"""Test loading a builtin class."""
list_class = load_class("builtins.list")
assert list_class is list, "Should load the builtin list class"
def test_load_module(self):
"""Test loading a module."""
path_module = load_class("os.path")
import os.path
assert path_module is os.path, "Should load os.path module"
def test_load_function(self):
"""Test loading a function from a module."""
makedirs_func = load_class("os.makedirs")
assert makedirs_func is os.makedirs, "Should load os.makedirs function"
def test_load_standard_library_class(self):
"""Test loading a standard library class."""
temp_dir_class = load_class("tempfile.TemporaryDirectory")
assert (
temp_dir_class is tempfile.TemporaryDirectory
), "Should load TemporaryDirectory class"
def test_load_nested_module_class(self):
"""Test loading a class from a nested module."""
datetime_class = load_class("datetime.datetime")
import datetime
assert (
datetime_class is datetime.datetime
), "Should load datetime.datetime class"
def test_invalid_path_error(self):
"""Test error handling for invalid paths."""
with pytest.raises(ValueError, match="valid path like mymodule.provider_class"):
load_class("invalid")
def test_nonexistent_module_error(self):
"""Test error handling for nonexistent modules."""
with pytest.raises((ImportError, ModuleNotFoundError)):
load_class("nonexistent_module.SomeClass")
def test_nonexistent_attribute_error(self):
"""Test error handling for nonexistent attributes."""
with pytest.raises(AttributeError):
load_class("os.NonexistentClass")
| TestLoadClass |
python | jina-ai__jina | jina/helper.py | {
"start": 29656,
"end": 30200
} | class ____:
"""
This context manager guarantees, that the :method:``__exit__`` of the
sub context is called, even when there is an Exception in the
:method:``__enter__``.
:param sub_context: The context, that should be taken care of.
"""
def __init__(self, sub_context):
self.sub_context = sub_context
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.sub_context.__exit__(exc_type, exc_val, exc_tb)
| CatchAllCleanupContextManager |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 7368,
"end": 7955
} | class ____(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
| lazy_attribute |
python | mlflow__mlflow | mlflow/entities/trace_location.py | {
"start": 2289,
"end": 4197
} | class ____(TraceLocationBase):
"""
Represents the location of a Databricks Unity Catalog (UC) schema.
Args:
catalog_name: The name of the Unity Catalog catalog name.
schema_name: The name of the Unity Catalog schema.
"""
catalog_name: str
schema_name: str
# These table names are set by the backend
_otel_spans_table_name: str | None = _UC_SCHEMA_DEFAULT_SPANS_TABLE_NAME
_otel_logs_table_name: str | None = _UC_SCHEMA_DEFAULT_LOGS_TABLE_NAME
@property
def schema_location(self) -> str:
return f"{self.catalog_name}.{self.schema_name}"
@property
def full_otel_spans_table_name(self) -> str | None:
if self._otel_spans_table_name:
return f"{self.catalog_name}.{self.schema_name}.{self._otel_spans_table_name}"
@property
def full_otel_logs_table_name(self) -> str | None:
if self._otel_logs_table_name:
return f"{self.catalog_name}.{self.schema_name}.{self._otel_logs_table_name}"
def to_dict(self) -> dict[str, Any]:
d = {
"catalog_name": self.catalog_name,
"schema_name": self.schema_name,
}
if self._otel_spans_table_name:
d["otel_spans_table_name"] = self._otel_spans_table_name
if self._otel_logs_table_name:
d["otel_logs_table_name"] = self._otel_logs_table_name
return d
@classmethod
def from_dict(cls, d: dict[str, Any]) -> "UCSchemaLocation":
location = cls(
catalog_name=d["catalog_name"],
schema_name=d["schema_name"],
)
if otel_spans_table_name := d.get("otel_spans_table_name"):
location._otel_spans_table_name = otel_spans_table_name
if otel_logs_table_name := d.get("otel_logs_table_name"):
location._otel_logs_table_name = otel_logs_table_name
return location
| UCSchemaLocation |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_details.py | {
"start": 12923,
"end": 16727
} | class ____(UserDetailsTest):
method = "put"
@fixture(autouse=True)
def _activate_staff_mode(self) -> Generator[None]:
with override_options({"staff.ga-rollout": True}):
yield
def test_staff_can_change_is_active(self) -> None:
self.user.update(is_active=True)
self.login_as(user=self.staff_user, staff=True)
resp = self.get_success_response(
self.user.id,
isActive="false",
)
assert resp.data["id"] == str(self.user.id)
user = User.objects.get(id=self.user.id)
assert not user.is_active
def test_staff_with_permission_can_change_is_active(self) -> None:
self.user.update(is_active=True)
UserPermission.objects.create(user=self.staff_user, permission="users.admin")
self.login_as(user=self.staff_user, staff=True)
resp = self.get_success_response(
self.user.id,
isActive="false",
)
assert resp.data["id"] == str(self.user.id)
user = User.objects.get(id=self.user.id)
assert not user.is_active
def test_staff_cannot_add_superuser(self) -> None:
self.user.update(is_superuser=False)
self.login_as(user=self.staff_user, staff=True)
resp = self.get_error_response(
self.user.id,
isSuperuser="true",
status_code=403,
)
assert resp.data["detail"] == "Missing required permission to add superuser."
user = User.objects.get(id=self.user.id)
assert not user.is_superuser
def test_staff_cannot_add_staff(self) -> None:
self.user.update(is_staff=False)
self.login_as(user=self.staff_user, staff=True)
resp = self.get_error_response(
self.user.id,
isStaff="true",
status_code=403,
)
assert resp.data["detail"] == "Missing required permission to add admin."
user = User.objects.get(id=self.user.id)
assert not user.is_staff
def test_superuser_cannot_add_superuser_or_staff_with_feature_flag(self) -> None:
self.user.update(is_staff=False)
self.login_as(user=self.superuser, superuser=True)
resp = self.get_error_response(
self.user.id,
isStaff="true",
status_code=403,
)
assert resp.data["detail"] == "Missing required permission to add admin."
resp = self.get_error_response(
self.user.id,
isSuperuser="true",
status_code=403,
)
assert resp.data["detail"] == "Missing required permission to add superuser."
user = User.objects.get(id=self.user.id)
assert not user.is_staff
assert not user.is_superuser
def test_staff_with_permission_can_add_superuser(self) -> None:
self.user.update(is_superuser=False)
UserPermission.objects.create(user=self.staff_user, permission="users.admin")
self.login_as(user=self.staff_user, staff=True)
resp = self.get_success_response(
self.user.id,
isSuperuser="true",
)
assert resp.data["id"] == str(self.user.id)
user = User.objects.get(id=self.user.id)
assert user.is_superuser
def test_staff_with_permission_can_add_staff(self) -> None:
self.user.update(is_staff=False)
UserPermission.objects.create(user=self.staff_user, permission="users.admin")
self.login_as(user=self.staff_user, staff=True)
resp = self.get_success_response(
self.user.id,
isStaff="true",
)
assert resp.data["id"] == str(self.user.id)
user = User.objects.get(id=self.user.id)
assert user.is_staff
@control_silo_test
| UserDetailsStaffUpdateTest |
python | altair-viz__altair | altair/utils/deprecation.py | {
"start": 4786,
"end": 5620
} | class ____:
def __init__(self) -> None:
self._warned: dict[LiteralString, Literal[True]] = {}
self._lock = threading.Lock()
def __contains__(self, key: LiteralString, /) -> bool:
with self._lock:
return key in self._warned
def hit(self, key: LiteralString, /) -> None:
with self._lock:
self._warned[key] = True
def clear(self) -> None:
with self._lock:
self._warned.clear()
_warnings_monitor = _WarningsMonitor()
def _warn_once(
msg: LiteralString, /, *, category: type[AltairDeprecationWarning], stacklevel: int
) -> None:
global _warnings_monitor
if msg in _warnings_monitor:
return
else:
_warnings_monitor.hit(msg)
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
| _WarningsMonitor |
python | google__pytype | pytype/tools/traces/traces_test.py | {
"start": 10384,
"end": 11137
} | class ____(MatchAstTestCase):
def test_modulo(self):
matches = self._get_traces("""
v = "hello %s"
print(v % "world")
""", ast.BinOp)
self.assertTracesEqual(matches, [((2, 6), _BINMOD_OP, "__mod__", ("str",))])
def test_modulo_multiline_string(self):
matches = self._get_traces("""
('%s'
'%s' %
('hello',
'world'))
""", ast.BinOp)
self.assertTracesEqual(matches, [((1, 1), _BINMOD_OP, "__mod__", ("str",))])
def test_format_multiline_string(self):
matches = self._get_traces("""
('%s'
'%s' %
(__any_object__,
__any_object__))
""", ast.BinOp)
self.assertTracesEqual(
matches, [((1, 1), _FORMAT_OP, "__mod__", ("str",))])
| MatchBinOpTest |
python | joke2k__faker | faker/providers/job/el_GR/__init__.py | {
"start": 42,
"end": 16458
} | class ____(BaseProvider):
jobs = [
"Αγγειοπλάστης",
"Αγιογράφος Βυζαντινών Εικόνων και Τοιχογραφιών",
"Αγρονόμος Τοπογράφος Μηχανικός",
"Αγρότης Καλλιεργητής",
"Αεροναυπηγός Μηχανικός",
"Αεροσυνοδός",
"Αθλητικός Δημοσιογράφος – Ρεπόρτερ",
"Αισθητικός - Ειδικός Εφαρμογών Αισθητικής",
"Αισθητικός Αρωματοθεραπείας",
"Αισθητικός Επαγγελματικού Μακιγιάζ Θεάτρου και Σκηνής",
"Αισθητικός Ποδολογίας και Καλλωπισμού Νυχιών",
"Τεχνίτης Περιποίησης Νυχιών",
"Ακροβάτης",
"Ακτινοδιαγνώστης",
"Αλουμινάς",
"Αναισθησιολόγος",
"Αναλογιστής",
"Ανθοκόμος",
"Αξιωματικός Μηχανικός Ναυσιπλοΐας",
"Αξιωματικός Νοσηλευτικής",
"Αξιωματικός Πολεμικής Αεροπορίας",
"Αξιωματικός Στρατού Ξηράς",
"Αξιωματικός Σωμάτων Ενόπλων Δυνάμεων",
"Αξιωματικός Φαρμακοποιός",
"Αργυροχρυσοχόος",
"Αρτοποιός",
"Αρχαιολόγος",
"Αρχειοθέτης",
"Αρχιμάγειρας",
"Αρχισυντάκτης",
"Αρχιτέκτονας",
"Αστυνομικός",
"Ασφαλιστής",
"Βιβλιοδέτης",
"Βιβλιοθηκονόμος",
"Βιολόγος",
"Βιοπληροφορικός",
"Βοηθός Αναισθησιολόγου",
"Βοηθός Αρτοποιίας – Ζαχαροπλαστικής",
"Βοηθός Ιατρικών και Βιολογικών Εργαστηρίων, Βιοχημικού και Μικροβιολογικού Εργαστηρίου",
"Βοηθός Ιατρικών και Βιολογικών Εργαστηρίων, Αιματολογικού Εργαστηρίου",
"Βοηθός Κομμωτή",
"Βοηθός Λογιστή",
"Βοηθός Μαγειρικής Τέχνης",
"Βοηθός Νοσηλευτή Μονάδας Εντατικής Θεραπείας",
"Βοηθός Νοσηλευτή Τραυματολογίας",
"Βοηθός Νοσηλευτή Χειρουργείου",
"Βοηθός Οδοντιάτρου",
"Βοηθός Φαρμακοποιού",
"Βοηθός Φυσικοθεραπευτή σε Ιαματικές Πηγές και Λουτρά",
"Βοηθός Φυσικοθεραπευτή - Υπάλληλος Εργαστηρίου Φυσικοθεραπείας",
"Βοτανολόγος",
"Βρεφοκόμος",
"Γεωγράφος",
"Γεωλόγος",
"Γεωπόνος Φυτικής Παραγωγής",
"Γεωργός Βιολογικής Γεωργίας",
"Γεωτεχνολόγος",
"Γεωτρυπανιστής",
"Γεωφυσικός",
"Γλωσσολόγος",
"Γλύπτης",
"Γουνοποιός",
"Γραμματέας",
"Γραμματέας Νομικών Επαγγελμάτων",
"Γραφίστας",
"Γραφολόγος",
"Δάσκαλος",
"Δασολόγος",
"Δασοφύλακας - Ειδικός Δασικής Προστασίας",
"Δημοσιογράφος Συντάκτης - Ρεπόρτερ Δημοσιογραφίας",
"Διαδικτυακός Διαφημιστής",
"Διαιτητής Αθλήματος",
"Διαιτολόγος - Ειδικός Εφαρμογών Διαιτητικής",
"Διακοσμητής - Τεχνικός Διακόσμησης",
"Διαφημιστής",
"Διαχειριστής Logistics",
"Διαχειριστής Τεχνολογίας Internet",
"Διερμηνέας",
"Διευθυντής Eπιχειρήσεων",
"Διευθύνων Επιχειρηματίας",
"Δικαστής",
"Δικαστικός Επιμελητής",
"Δικηγόρος",
"Διοικητικό Στέλεχος Αθλητισμού",
"Διοικητικό και Οικονομικό Στέλεχος Επιχειρήσεων",
"Διοικητικός Υπάλληλος",
"Διπλωμάτης",
"Εγκληματολόγος",
"Εθνολόγος - Λαογράφος",
"Ειδικός 3D Animation",
"Ειδικός Αεροπορικών Εταιριών",
"Ειδικός Αργυροχρυσοχοΐας",
"Ειδικός Ασφαλιστικών Εργασιών",
"Ειδικός Γεωγραφικών Πληροφοριακών Συστημάτων",
"Ειδικός Διοίκησης Πολιτισμικών Μονάδων",
"Ειδικός Εκπαίδευσης Ενηλίκων",
"Ειδικός Εμπορίας, Διαφήμισης και Προώθησης Προϊόντων",
"Ειδικός Εμπορικών Επιχειρήσεων",
"Ειδικός Ενδυματολογίας και Σχεδίασης Μόδας",
"Ειδικός Επιστήμων Θάλασσας",
"Ειδικός Ηλεκτρονικής Σχεδίασης Εντύπου",
"Ειδικός Ιατρικών Συσκευών Ακτινοθεραπείας",
"Ειδικός Ιατρικών Συσκευών Απεικονίσεων",
"Ειδικός Ιατρικών και Βιολογικών Εργαστηρίων Αιματολογικού Εργαστηρίου",
"Ειδικός Κινηματογράφου",
"Ειδικός Μάρκετινγκ",
"Ειδικός Μουσικής με Τεχνολογία Η/Υ",
"Ειδικός Ναυτιλιακής Κατεύθυνσης",
"Ειδικός Νοσηλευτικής Ατόμων με Ψυχικές Παθήσεις",
"Ειδικός Νοσηλευτικής Μονάδων Εντατικής Θεραπείας",
"Ειδικός Νοσηλευτικής Ογκολογικών Παθήσεων",
"Ειδικός Νοσηλευτικής Τραυματολογίας",
"Ειδικός Ξενοδοχειακής Ψυχαγωγίας",
"Ειδικός Ξενοδοχειακών Υπηρεσιών και Τροφοδοσίας",
"Ειδικός Οδοντοτεχνικής Ορθοδοντικής",
"Ειδικός Οδοντοτεχνικής Πορσελάνης",
"Ειδικός Οπτικών και Ηχητικών Εφέ",
"Ειδικός Παθολόγος Λόγου και Ομιλίας",
"Ειδικός Πληροφοριακών Συστημάτων με Πολυμέσα",
"Ειδικός Προσχολικής Αγωγής Δραστηριοτήτων Δημιουργίας και Έκφρασης",
"Ειδικός Προσχολικής Αγωγής Ημερήσιας Φροντίδας Παιδιών με Ειδικές Ανάγκες",
"Ειδικός Πρόληψης και Θεραπείας Εξαρτημένων Ατόμων",
"Ειδικός Πωλήσεων",
"Ειδικός Στατιστικής Έρευνας Αγοράς και Δημοσκοπήσεων – Στέλεχος Στατιστικής Δημοσκόπησης",
"Ειδικός Συναρμολόγησης Εικόνας (Μοντέρ) - Ηλεκτρονική Επεξεργασία Εικόνας",
"Ειδικός Σχεδίασης Διαδραστικών και Βιομηχανικών Προϊόντων και Συστημάτων",
"Ειδικός Σχεδίου και Μάρκετινγκ Μόδας",
"Ειδικός Σχεδιασμού Αστικής και Περιφερειακής Ανάπτυξης",
"Ειδικός Σχεδιασμού Επίπλου",
"Ειδικός Τροφοδοσίας-Σίτισης",
"Υδροθεραπευτής - Λουτροθεραπευτής",
"Ειδικός Φοροτεχνικού Γραφείου",
"Ειδικός Φρουρός",
"Ειδικός Ψηφιακών Μορφών Τέχνης",
"Εικονογράφος - Σκιτσογράφος",
"Εικονολήπτης",
"Εισαγγελέας",
"Εκδότης Βιβλίων",
"Εκκλησιαστική και Πολιτιστική Κατάρτιση",
"Εκπαιδευτής Ζώων",
"Εκπαιδευτής Υποψήφιων Οδηγών Αυτοκινήτων και Μοτοσικλετών",
"Εκπαιδευτικός Ειδικής Αγωγής",
"Εκτελωνιστής και Πράκτορας Μεταφορών",
"Εκτιμητής Έργων Τέχνης",
"Εκτιμητής και Εκπλειστηριαστής",
"Εκφωνητής",
"Ελαιοχρωματιστής",
"Ελεγκτής Εναέριας Κυκλοφορίας",
"Ελεγκτής Ολικής Ποιότητας",
"Ενδυματολόγος",
"Επαγγελματίας Αθλητής",
"Επαγγελματίας Δύτης",
"Επαγγελματικό Μακιγιάζ",
"Επιθεωρητής κτιρίων και πυρασφάλειας",
"Επικοινωνιολόγος",
"Επιμελητής Πτήσεων",
"Επιπλοποιός",
"Επισκέπτης Υγείας",
"Επισκευαστής Λαμαρινών Αυτοκινήτων-Οχημάτων",
"Επισκευαστής Πλαστικών Σκαφών",
"Επιστήμων Πληροφορικής και Η/Υ",
"Επόπτης Δημόσιας Υγείας",
"Εργοθεραπευτής",
"Ζαχαροπλάστης",
"Ζωγράφος",
"Ζωολόγος",
"Ηθοποιός",
"Ηλεκτρολόγος",
"Ηλεκτρολόγος Μηχανικός και Μηχανικός Η/Υ",
"Ηλεκτρονικός",
"Ηχολήπτης",
"Θεατρολόγος",
"Θεολόγος-Ιεροκήρυκας",
"Ιατρικός Επισκέπτης",
"Ιατροδικαστής",
"Ιατρός",
"Ιερέας",
"Ιεροψάλτης",
"Ιστορικός",
"Ιστορικός Τέχνης",
"Ιχθυοκαλλιεργητής",
"Ιχθυοπαθολόγος",
"Καθηγητής Γυμναστικής",
"Καθηγητής Δευτεροβάθμιας Εκπαίδευσης",
"Καθηγητής Πρωτοβάθμιας Εκπαίδευσης",
"Καθηγητής Τριτοβάθμιας Εκπαίδευσης",
"Καλλιτέχνης",
"Καμαριέρης - Καμαριέρα",
"Κασκαντέρ",
"Κατασκευαστής Ειδών Ενδυμασίας",
"Κατασκευαστής Υποδημάτων",
"Κεραμίστας",
"Κηπουρός - Ανθοκηπουρός - Κηποτεχνικός",
"Κλειδαράς",
"Κλιματολόγος",
"Κλωστοϋφαντουργός",
"Κλόουν - Γελοτοποιός",
"Κοινωνικός Ανθρωπολόγος",
"Κοινωνικός Λειτουργός",
"Κοινωνιολόγος",
"Κομμωτής – Τεχνικός Περιποίησης Κόμης",
"Κορνιζοποιός",
"Κοσμετολόγος",
"Κρεοπώλης",
"Κριτικός Τέχνης",
"Κτηματομεσίτης",
"Μεσίτης Αστικών Συμβάσεων",
"Διαχειριστής Ακίνητης Περιουσίας",
"Κτηνίατρος",
"Κτηνοτρόφος",
"Κόπτης Ενδυμάτων",
"Κόφτης Υποδημάτων",
"Λατόμος",
"Λιθογράφος Ψηφιακής Τεχνολογίας",
"Λιμενεργάτης",
"Λιμενικός",
"Λογιστής - Ειδικός Μηχανογραφημένου Λογιστηρίου",
"Λογοθεραπευτής",
"Λογοτέχνης",
"Μαθηματικός",
"Μαιευτής – Μαία",
"Μακιγιέρ/Μακιγιέζ",
"Μαρμαράς",
"Μελισσοκόμος",
"Μεσίτης Ναυτιλιακών Συμβάσεων",
"Μεταφραστής",
"Μετεωρολόγος",
"Μηχανικός Αεροσκαφών",
"Μηχανικός Αυτοκινήτων",
"Μηχανικός Διαχείρισης Ενεργειακών Πόρων",
"Μηχανικός Εμπορικού Ναυτικού",
"Μηχανικός Επιστήμης και Τεχνολογίας των Υλικών",
"Μηχανικός Η/Υ, Τηλεπικοινωνιών και Δικτύων",
"Μηχανικός Μεταλλείων",
"Μηχανικός Ορυκτών Πόρων",
"Μηχανικός Παραγωγής και Διοίκησης",
"Μηχανικός Περιβάλλοντος",
"Μηχανικός Πληροφοριακών και Επικοινωνιακών Συστημάτων",
"Μηχανικός Πλοίων",
"Μηχανικός Πολεμικής Αεροπορίας",
"Μηχανικός Πολεμικού Ναυτικού",
"Μηχανικός Τηλεπικοινωνιών",
"Μηχανικός Χωροταξίας και Περιφερειακής Ανάπτυξης",
"Μηχανοδηγός Τρένου",
"Μηχανολόγος Μηχανικός",
"Μηχανολόγος Μηχανικός Βιομηχανίας",
"Μηχανολόγος Μηχανικός Συστημάτων Αυτόματου Ελέγχου και Ρομποτικής",
"Μηχανοσυνθέτης Αεροσκαφών",
"Μικροβιολόγος",
"Μουσειολόγος Μουσειογράφος και Σχεδιαστής Εκθέσεων",
"Μουσικολόγος",
"Μουσικός",
"Μουσικός Ενορχηστρωτής",
"Μουσικός Επιμελητής",
"Μόνιμος Υπαξιωματικός Αεροπορίας",
"Μόνιμος Υπαξιωματικός Ναυτικού",
"Μόνιμος Υπαξιωματικός Στρατού Ξηράς",
"Νανοτεχνολόγος",
"Ναυαγοσώστης",
"Ναυπηγός Μηχανικός",
"Ναύτης",
"Νηπιαγωγός",
"Νοσοκόμα Παίδων",
"Νοσοκόμος-Νοσηλευτής",
"Ντετέκτιβ",
"Ξεναγός",
"Ξενοδοχοϋπάλληλος",
"Ξυλουργός",
"Οδηγός Αυτοκινήτου Αγώνων",
"Οδηγός Οχημάτων Δημοσίας Χρήσης",
"Οδηγός ΤΑΞΙ",
"Οδοντίατρος",
"Οδοντοτεχνίτης",
"Οικονομολόγος",
"Οινολόγος",
"Οπτικός",
"Οργανοποιός",
"Οργανωτής Συνεδρίων, Εκθέσεων και Εκδηλώσεων",
"Οργανωτικός Συντονιστής Παραγωγής - Παραγωγός",
"Οφθαλμίατρος",
"Παιδίατρος",
"Παραγωγός Κινηματογράφου και Τηλεόρασης",
"Παραγωγός Προϊόντων Αλευρόμυλων, Παραγωγή Αμύλων και Προϊόντων Αμύλου",
"Παραγωγός Ραδιοφωνικών Εκπομπών",
"Πατωματζής (Επαγγέλματα Οικοδομής)",
"Περιβαλλοντολόγος",
"Πιλοποιός",
"Πιλότος",
"Πλέκτης",
"Πλακάς",
"Πλανόδιος Πωλητής",
"Πλοίαρχος",
"Πλοηγός Σκαφών Αναψυχής",
"Πολιτικός Επιστήμονας",
"Πολιτικός Μηχανικός",
"Πολιτικός Συντάκτης",
"Προγραμματιστής Συστημάτων και Εφαρμογών Η/Υ",
"Προπονητής Αθλήματος",
"Προπονητής Αντισφαίρισης",
"Προπονητής Γυμναστικής με Βάρη",
"Προπονητής Ελεύθερης Γυμναστικής",
"Προπονητής Καλαθοσφαίρισης",
"Προπονητής Κλασικού Αθλητισμού",
"Προπονητής Κολύμβησης",
"Προπονητής Πετοσφαίρισης",
"Προπονητής Ποδοσφαίρου",
"Πυροσβέστης",
"Πωλητής",
"Πωλητής Οικολογικών Τροφίμων και άλλων Ειδών",
"Ράφτης",
"Ρεσεψιονίστ Ξενοδοχείου",
"Στέλεχος Υποδοχής Ξενοδοχείου",
"Σεισμολόγος",
"Σεναριογράφος",
"Σερβιτόρος",
"Σερβιτόρος Ποτών - Μπάρμαν",
"Σιδεράς - Σιδηρουργός",
"Σιδερωτής",
"Σκηνογράφος",
"Σκηνοθέτης Τηλεόρασης",
"Σοβατζής",
"Στέλεχος Marketing",
"Στέλεχος Διεθνούς Εμπορίου",
"Στέλεχος Εκδοτικών Επιχειρήσεων",
"Στέλεχος Κοστολόγησης στη Βιομηχανία - Βιοτεχνία",
"Στέλεχος Μικρομεσαίων Επιχειρήσεων",
"Στέλεχος Τουρισμού",
"Στέλεχος Τραπεζικών Εργασιών",
"Στέλεχος Υπηρεσιών Ασφαλείας",
"Στέλεχος Υπηρεσιών Εφοδιαστικής Αλυσίδας",
"Στέλεχος Χρηματιστηριακών Εργασιών",
"Στατιστικολόγος",
"Στρατιωτικός Ψυχολόγος",
"Στυλίστας",
"Συμβολαιογράφος",
"Συνοδός Εδάφους",
"Συντηρητής Αρχαιοτήτων και Έργων Τέχνης",
"Τεχνικός Συντήρησης Έργων Τέχνης",
"Σφουγγαράς",
"Σχεδιαστής Επίπλου",
"Σχεδιαστής Κινουμένων Σχεδίων",
"Σχεδιαστής Μέσω Συστημάτων Η/Υ",
"Σχεδιαστής Πολυμέσων - Ειδικός Πολυμέσων",
"Σχεδιαστής Υποδημάτων",
"Σχολικός Σύμβουλος",
"Σύμβουλος Επιχειρήσεων",
"Σύμβουλος Σχολικού και Επαγγελματικού Προσανατολισμού",
"Ταμίας",
"Ταξιδιωτικός Συνοδός και Συνοδός δρυμών",
"Τουριστικός Συνοδός",
"Ταπετσέρης",
"Ταπητουργός",
"Ταχυδρόμος",
"Χειριστής Τηλεφωνικού Κέντρου",
"Τεχνίτης Αδαμαντοκοπής Μαρμάρων",
"Τεχνίτης Αλιείας - Ιχθυοπαραγωγής - Τεχνολόγος Υδατοκαλλιεργητής",
"Τεχνίτης Δερμάτινων Ειδών",
"Τεχνίτης Φαρμάκων - Καλλυντικών - Τεχνικός Φαρμάκων Καλλυντικών και Παρεμφερών Προϊόντων",
"Τεχνίτης Ψάθας - Καλαθοποιίας",
"Τεχνίτης Ψηφιδωτών",
"Τεχνικός - Ειδικός Εφαρμογών Πληροφορικής",
"Τεχνικός Αερίων Καυσίμων",
"Τεχνικός Αεροσκαφών - Ειδικός Μηχανοσύνθετων Αεροσκαφών",
"Τεχνικός Αμπελουργίας - Οινοτεχνίας",
"Τεχνικός Ανελκυστήρων",
"Τεχνικός Αρδεύσεων",
"Τεχνικός Αρτοποιίας - Ζαχαροπλαστικής",
"Τεχνικός Ασφαλείας Δικτύων Η/Υ - Τεχνικός Δικτύων Υπολογιστών",
"Τεχνικός Αυτοκινήτων Οχημάτων",
"Τεχνικός Αυτοματισμών",
"Τεχνικός Διαχείρισης Συστημάτων και Παροχής Υπηρεσιών Intranet - Internet",
"Τεχνικός Διαχείρισης και Ανακύκλωσης Αποβλήτων",
"Τεχνικός Δομικών Έργων",
"Τεχνικός Ελέγχου Βιομηχανικού και Εργασιακού Περιβάλλοντος",
"Τεχνικός Ελέγχου Ρύπανσης και Εγκαταστάσεων Αντιρρύπανσης",
"Τεχνικός Ελέγχου Υλικών",
"Τεχνικός Επεξεργασίας Γάλακτος",
"Τεχνικός Εργαλειομηχανών",
"Τεχνικός Εφαρμογών Ιατρικής Πληροφορικής",
"Τεχνικός Εφαρμογών Πληροφορικής",
"Τεχνικός Εφαρμογών Πληροφορικής, Δικτύων και Αυτοματισμού Γραφείου",
"Τεχνικός Θέρμανσης και Υδραυλικών Εγκαταστάσεων",
"Τεχνικός Κοσμήματος Παραγωγής",
"Τεχνικός Μηχανών Θαλάσσης και Αναψυχής",
"Τεχνικός Οργάνων Μετρήσεων",
"Τεχνικός Ποτοποιίας - Αποσταγματοποιίας",
"Τεχνικός Υπαξιωματικός Αεροπορίας",
"Τεχνικός Ψηφιακής Φωτογραφίας και CD-ROM",
"Τεχνολόγος - Δασοπόνος Διαχείρισης Φυσικών Πόρων",
"Τεχνολόγος Ακτινολογίας Ακτινοθεραπείας",
"Τεχνολόγος Αντιρρύπανσης",
"Τεχνολόγος Βιομηχανικού Σχεδιασμού",
"Τεχνολόγος Ενεργειακής Τεχνικής",
"Τεχνολόγος Ιατρικών Οργάνων",
"Τεχνολόγος Ιχθυοκαλλιέργειας",
"Τεχνολόγος Ορυχείων",
"Τεχνολόγος Πετρελαίου",
"Τεχνολόγος Τηλεϊατρικής",
"Τεχνολόγος Τροφίμων",
"Τεχνολόγος Φυτικής Παραγωγής",
"Τζακάς",
"Τζαμάς",
"Τηλεπαρουσιαστής",
"Τηλεφωνητής",
"Τουριστικός Πράκτορας",
"Τραγουδιστής",
"Τυπογράφος",
"Υαλουργός",
"Υδραυλικός",
"Υλοτόμος",
"Υπάλληλος Εξυπηρέτησης Πελατών",
"Υπάλληλος Τουριστικού Γραφείου – Ειδικός Τουριστικού Πρακτορείου",
"Υπάλληλος Τράπεζας",
"Υπαξιωματικός Ενόπλων Δυνάμεων",
"Υπεύθυνος Ανθρώπινου Δυναμικού",
"Υπεύθυνος Γραφείου Τύπου",
"Υπεύθυνος Δημοσίων Σχέσεων",
"Υπεύθυνος Προμηθειών και Διαχείρισης Αποθήκης",
"Φαρμακοποιός",
"Φιλόλογος",
"Φοντοποιός",
"Φυσικός",
"Φυσιοθεραπευτής",
"Φωτογράφος - Τεχνικός Λήψης Φωτογραφίας - Έγχρωμη Φωτογραφία",
"Φωτομοντέλο - Μανεκέν",
"Φύλακας Ασφαλείας",
"Φύλακας Μουσείων και Αρχαιολογικών Χώρων",
"Χαράκτης",
"Χειριστές Βαρέων Μηχανημάτων και Αγροτικών Μηχανημάτων",
"Χειριστής Μηχανημάτων Κοπής Ξύλου",
"Χειρούργος",
"Χημικός",
"Χημικός Μηχανικός",
"Χορευτής",
"Χορογράφος",
"Χρηματιστής",
"Χρηματοοικονομικός Σύμβουλος",
"Χτίστης",
"Ψαράς",
"Ψυκτικός - Τεχνικός Εγκαταστάσεων Ψύξης Αερισμού και Κλιματισμού",
"Ψυχίατρος",
"Ψυχολόγος",
"Ωκεανογράφος",
"Ωρολογοποιός",
]
| Provider |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 2209,
"end": 2327
} | class ____(MetaflowException):
headline = "Argo Workflows sensor clean up error"
| ArgoWorkflowsSensorCleanupException |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 19774,
"end": 25057
} | class ____(GoogleCloudBaseOperator):
"""
Creates a lake resource within a lake.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. Lake identifier.
:param body: Required. The Request body contains an instance of Lake.
:param validate_only: Optional. Only validate the request, but do not perform mutations. The default is
false.
:param api_version: The version of the api that will be requested for example 'v1'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag informing should the Dataplex lake be created asynchronously.
This is useful for long-running creating lakes.
"""
template_fields = (
"project_id",
"lake_id",
"body",
"validate_only",
"impersonation_chain",
)
template_fields_renderers = {"body": "json"}
operator_extra_links = (DataplexLakeLink(),)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any],
validate_only: bool | None = None,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.body = body
self.validate_only = validate_only
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"lake_id": self.lake_id,
"region": self.region,
"project_id": self.project_id,
}
def execute(self, context: Context) -> dict:
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Dataplex lake %s", self.lake_id)
try:
operation = hook.create_lake(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
body=self.body,
validate_only=self.validate_only,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if not self.asynchronous:
self.log.info("Waiting for Dataplex lake %s to be created", self.lake_id)
lake = hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Lake %s created successfully", self.lake_id)
else:
is_done = operation.done()
self.log.info("Is operation done already? %s", is_done)
return is_done
except HttpError as err:
if err.resp.status not in (409, "409"):
raise
self.log.info("Lake %s already exists", self.lake_id)
# Wait for lake to be ready
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
lake = hook.get_lake(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if lake["state"] != "CREATING":
break
time.sleep(time_to_wait)
DataplexLakeLink.persist(context=context)
return Lake.to_dict(lake)
| DataplexCreateLakeOperator |
python | django-import-export__django-import-export | import_export/forms.py | {
"start": 843,
"end": 2314
} | class ____(FieldNamePrefixMixin, forms.Form):
# id attr is explicitly declared because js logic uses the id
resource = forms.ChoiceField(
label=_("Resource"),
choices=(),
required=False,
widget=forms.Select(attrs={"id": "id_resource"}),
)
format = forms.ChoiceField(
label=_("Format"),
choices=(),
)
def __init__(self, formats, resources, **kwargs):
super().__init__(**kwargs)
self._init_resources(resources)
self._init_formats(formats)
def _init_resources(self, resources):
if not resources:
raise ValueError("no defined resources")
self.fields["resource"].choices = [
(i, resource.get_display_name()) for i, resource in enumerate(resources)
]
if len(resources) == 1:
self.fields["resource"].widget = forms.HiddenInput()
self.initial["resource"] = "0"
def _init_formats(self, formats):
if not formats:
raise ValueError("invalid formats list")
choices = [(str(i), f().get_title()) for i, f in enumerate(formats)]
if len(formats) == 1:
field = self.fields["format"]
field.value = formats[0]().get_title()
field.initial = 0
field.widget.attrs["readonly"] = True
if len(formats) > 1:
choices.insert(0, ("", "---"))
self.fields["format"].choices = choices
| ImportExportFormBase |
python | django__django | tests/utils_tests/test_datastructures.py | {
"start": 9453,
"end": 9736
} | class ____(SimpleTestCase):
def test_dictwrapper(self):
def f(x):
return "*%s" % x
d = DictWrapper({"a": "a"}, f, "xx_")
self.assertEqual(
"Normal: %(a)s. Modified: %(xx_a)s" % d, "Normal: a. Modified: *a"
)
| DictWrapperTests |
python | getsentry__sentry | tests/apidocs/endpoints/scim/test_group_details.py | {
"start": 184,
"end": 3834
} | class ____(APIDocsTestCase, SCIMTestCase):
def setUp(self) -> None:
super().setUp()
member_user = self.create_user()
self.member = self.create_member(user=member_user, organization=self.organization)
self.team = self.create_team(
organization=self.organization, members=[self.user, member_user]
)
self.url = reverse(
"sentry-api-0-organization-scim-team-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"team_id_or_slug": self.team.id,
},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_delete(self) -> None:
response = self.client.delete(self.url)
request = RequestFactory().delete(self.url)
self.validate_schema(request, response)
def test_patch_rename(self) -> None:
patch_data = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [
{
"op": "replace",
"value": {
"id": self.team.id,
"displayName": "newName",
},
}
],
}
response = self.client.patch(self.url, patch_data)
request = RequestFactory().patch(self.url, patch_data)
self.validate_schema(request, response)
def test_patch_replace(self) -> None:
newmember = self.create_member(user=self.create_user(), organization=self.organization)
patch_data = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [
{
"op": "replace",
"path": "members",
"value": [
{
"value": newmember.id,
"display": "test2.user@okta.local",
},
],
}
],
}
response = self.client.patch(self.url, patch_data)
request = RequestFactory().patch(self.url, patch_data)
self.validate_schema(request, response)
def test_patch_add_member(self) -> None:
newmember = self.create_member(user=self.create_user(), organization=self.organization)
patch_data = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [
{
"op": "add",
"path": "members",
"value": [
{
"value": newmember.id,
"display": newmember.email,
}
],
},
],
}
response = self.client.patch(self.url, patch_data)
request = RequestFactory().patch(self.url, patch_data)
self.validate_schema(request, response)
def test_patch_remove_member(self) -> None:
patch_data = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": [
{
"op": "remove",
"path": f'members[value eq "{self.member.id}"]',
}
],
}
response = self.client.patch(self.url, patch_data)
request = RequestFactory().patch(self.url, patch_data)
self.validate_schema(request, response)
| SCIMTeamDetailsDocs |
python | run-llama__llama_index | llama-index-core/tests/memory/test_memory_blocks_base.py | {
"start": 1288,
"end": 1812
} | class ____(BaseMemoryBlock[List[ChatMessage]]):
"""Memory block that returns chat messages."""
async def _aget(
self, messages: List[ChatMessage], **kwargs: Any
) -> List[ChatMessage]:
return [
ChatMessage(role="user", content="Historical user message"),
ChatMessage(role="assistant", content="Historical assistant response"),
]
async def _aput(self, messages: List[ChatMessage]) -> None:
# Just a no-op for testing
pass
| ChatMessagesMemoryBlock |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 37285,
"end": 43965
} | class ____(AriaPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
def __init__(self, config: AriaConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = AriaProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.FloatTensor] = None,
vision_feature_layer: int = -1,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
pixel_mask (`torch.FloatTensor]`, *optional*):
The tensors corresponding to the input image mask.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
patch_attention_mask = self._create_patch_attention_mask(pixel_mask)
image_outputs = self.vision_tower(
pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True
)
image_attn_mask = None
if patch_attention_mask is not None:
flattened_mask = patch_attention_mask.flatten(1)
image_attn_mask = torch.logical_not(flattened_mask)
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, AriaModelOutputWithPast]:
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# 2. Merge text and images
if pixel_values is not None and inputs_embeds.shape[1] != 1:
image_features = self.get_image_features(
pixel_values=pixel_values,
pixel_mask=pixel_mask,
vision_feature_layer=self.config.vision_feature_layer,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AriaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
def _create_patch_attention_mask(self, pixel_mask):
if pixel_mask is None:
return None
patches_subgrid = pixel_mask.unfold(
dimension=1,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
patches_subgrid = patches_subgrid.unfold(
dimension=2,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
@auto_docstring(
custom_intro="""
Aria model for conditional generation tasks.
This model combines a vision tower, a multi-modal projector, and a language model
to perform tasks that involve both image and text inputs.
"""
)
| AriaModel |
python | spack__spack | lib/spack/spack/bootstrap/core.py | {
"start": 10042,
"end": 23696
} | class ____(Bootstrapper):
"""Install the software needed during bootstrapping from sources."""
def __init__(self, conf) -> None:
super().__init__(conf)
self.last_search: Optional[QueryInfo] = None
self.config_scope_name = f"bootstrap_source-{uuid.uuid4()}"
def try_import(self, module: str, abstract_spec_str: str) -> bool:
info: QueryInfo = {}
if _try_import_from_store(module, abstract_spec_str, query_info=info):
self.last_search = info
return True
tty.debug(f"Bootstrapping {module} from sources")
# If we compile code from sources detecting a few build tools
# might reduce compilation time by a fair amount
_add_externals_if_missing()
# Try to build and install from sources
with spack_python_interpreter():
if module == "clingo":
bootstrapper = ClingoBootstrapConcretizer(configuration=spack.config.CONFIG)
concrete_spec = bootstrapper.concretize()
else:
abstract_spec = spack.spec.Spec(
abstract_spec_str + " ^" + spec_for_current_python()
)
concrete_spec = spack.concretize.concretize_one(abstract_spec)
msg = "[BOOTSTRAP MODULE {0}] Try installing '{1}' from sources"
tty.debug(msg.format(module, abstract_spec_str))
# Install the spec that should make the module importable
with spack.config.override(self.mirror_scope):
PackageInstaller(
[concrete_spec.package],
fail_fast=True,
root_policy="source_only",
dependencies_policy="source_only",
).install()
if _try_import_from_store(module, query_spec=concrete_spec, query_info=info):
self.last_search = info
return True
return False
def try_search_path(self, executables: Tuple[str], abstract_spec_str: str) -> bool:
info: QueryInfo = {}
if _executables_in_store(executables, abstract_spec_str, query_info=info):
self.last_search = info
return True
tty.debug(f"Bootstrapping {abstract_spec_str} from sources")
# If we compile code from sources detecting a few build tools
# might reduce compilation time by a fair amount
_add_externals_if_missing()
concrete_spec = spack.concretize.concretize_one(abstract_spec_str)
msg = "[BOOTSTRAP] Try installing '{0}' from sources"
tty.debug(msg.format(abstract_spec_str))
with spack.config.override(self.mirror_scope):
PackageInstaller([concrete_spec.package], fail_fast=True).install()
if _executables_in_store(executables, concrete_spec, query_info=info):
self.last_search = info
return True
return False
def create_bootstrapper(conf: ConfigDictionary):
"""Return a bootstrap object built according to the configuration argument"""
btype = conf["type"]
return _bootstrap_methods[btype](conf)
def source_is_enabled(conf: ConfigDictionary) -> bool:
"""Returns true if the source is not enabled for bootstrapping"""
return spack.config.get("bootstrap:trusted").get(conf["name"], False)
def ensure_module_importable_or_raise(module: str, abstract_spec: Optional[str] = None):
"""Make the requested module available for import, or raise.
This function tries to import a Python module in the current interpreter
using, in order, the methods configured in bootstrap.yaml.
If none of the methods succeed, an exception is raised. The function exits
on first success.
Args:
module: module to be imported in the current interpreter
abstract_spec: abstract spec that might provide the module. If not
given it defaults to "module"
Raises:
ImportError: if the module couldn't be imported
"""
# If we can import it already, that's great
tty.debug(f"[BOOTSTRAP MODULE {module}] Try importing from Python")
if _python_import(module):
return
abstract_spec = abstract_spec or module
exception_handler = GroupedExceptionHandler()
for current_config in bootstrapping_sources():
if not source_is_enabled(current_config):
continue
with exception_handler.forward(current_config["name"], Exception):
if create_bootstrapper(current_config).try_import(module, abstract_spec):
return
msg = f'cannot bootstrap the "{module}" Python module '
if abstract_spec:
msg += f'from spec "{abstract_spec}" '
if not exception_handler:
msg += ": no bootstrapping sources are enabled"
elif spack.error.debug or spack.error.SHOW_BACKTRACE:
msg += exception_handler.grouped_message(with_tracebacks=True)
else:
msg += exception_handler.grouped_message(with_tracebacks=False)
msg += "\nRun `spack --backtrace ...` for more detailed errors"
raise ImportError(msg)
def ensure_executables_in_path_or_raise(
executables: list,
abstract_spec: str,
cmd_check: Optional[Callable[[spack.util.executable.Executable], bool]] = None,
):
"""Ensure that some executables are in path or raise.
Args:
executables (list): list of executables to be searched in the PATH,
in order. The function exits on the first one found.
abstract_spec (str): abstract spec that provides the executables
cmd_check (object): callable predicate that takes a
``spack.util.executable.Executable`` command and validate it. Should return
``True`` if the executable is acceptable, ``False`` otherwise.
Can be used to, e.g., ensure a suitable version of the command before
accepting for bootstrapping.
Raises:
RuntimeError: if the executables cannot be ensured to be in PATH
Return:
Executable object
"""
cmd = spack.util.executable.which(*executables)
if cmd:
if not cmd_check or cmd_check(cmd):
return cmd
executables_str = ", ".join(executables)
exception_handler = GroupedExceptionHandler()
for current_config in bootstrapping_sources():
if not source_is_enabled(current_config):
continue
with exception_handler.forward(current_config["name"], Exception):
current_bootstrapper = create_bootstrapper(current_config)
if current_bootstrapper.try_search_path(executables, abstract_spec):
# Additional environment variables needed
concrete_spec, cmd = (
current_bootstrapper.last_search["spec"],
current_bootstrapper.last_search["command"],
)
assert cmd is not None, "expected an Executable"
cmd.add_default_envmod(
spack.user_environment.environment_modifications_for_specs(
concrete_spec, set_package_py_globals=False
)
)
return cmd
msg = f"cannot bootstrap any of the {executables_str} executables "
if abstract_spec:
msg += f'from spec "{abstract_spec}" '
if not exception_handler:
msg += ": no bootstrapping sources are enabled"
elif spack.error.debug or spack.error.SHOW_BACKTRACE:
msg += exception_handler.grouped_message(with_tracebacks=True)
else:
msg += exception_handler.grouped_message(with_tracebacks=False)
msg += "\nRun `spack --backtrace ...` for more detailed errors"
raise RuntimeError(msg)
def _add_externals_if_missing() -> None:
search_list = [
# clingo
"cmake",
"bison",
# GnuPG
"gawk",
# develop deps
"git",
]
if IS_WINDOWS:
search_list.append("winbison")
externals = spack.detection.by_path(search_list)
# System git is typically deprecated, so mark as non-buildable to force it as external
non_buildable_externals = {k: externals.pop(k) for k in ("git",) if k in externals}
spack.detection.update_configuration(externals, scope="bootstrap", buildable=True)
spack.detection.update_configuration(
non_buildable_externals, scope="bootstrap", buildable=False
)
def clingo_root_spec() -> str:
"""Return the root spec used to bootstrap clingo"""
return _root_spec("clingo-bootstrap@spack+python")
def ensure_clingo_importable_or_raise() -> None:
"""Ensure that the clingo module is available for import."""
ensure_module_importable_or_raise(module="clingo", abstract_spec=clingo_root_spec())
def gnupg_root_spec() -> str:
"""Return the root spec used to bootstrap GnuPG"""
root_spec_name = "win-gpg" if IS_WINDOWS else "gnupg"
return _root_spec(f"{root_spec_name}@2.3:")
def ensure_gpg_in_path_or_raise() -> None:
"""Ensure gpg or gpg2 are in the PATH or raise."""
return ensure_executables_in_path_or_raise(
executables=["gpg2", "gpg"], abstract_spec=gnupg_root_spec()
)
def patchelf_root_spec() -> str:
"""Return the root spec used to bootstrap patchelf"""
# 0.13.1 is the last version not to require C++17.
return _root_spec("patchelf@0.13.1:")
def verify_patchelf(patchelf: "spack.util.executable.Executable") -> bool:
"""Older patchelf versions can produce broken binaries, so we
verify the version here.
Arguments:
patchelf: patchelf executable
"""
out = patchelf("--version", output=str, error=os.devnull, fail_on_error=False).strip()
if patchelf.returncode != 0:
return False
parts = out.split(" ")
if len(parts) < 2:
return False
try:
version = spack.version.Version(parts[1])
except ValueError:
return False
return version >= spack.version.Version("0.13.1")
def ensure_patchelf_in_path_or_raise() -> spack.util.executable.Executable:
"""Ensure patchelf is in the PATH or raise."""
# The old concretizer is not smart and we're doing its job: if the latest patchelf
# does not concretize because the compiler doesn't support C++17, we try to
# concretize again with an upperbound @:13.
try:
return ensure_executables_in_path_or_raise(
executables=["patchelf"], abstract_spec=patchelf_root_spec(), cmd_check=verify_patchelf
)
except RuntimeError:
return ensure_executables_in_path_or_raise(
executables=["patchelf"],
abstract_spec=_root_spec("patchelf@0.13.1:0.13"),
cmd_check=verify_patchelf,
)
def ensure_winsdk_external_or_raise() -> None:
"""Ensure the Windows SDK + WGL are available on system
If both of these package are found, the Spack user or bootstrap
configuration (depending on where Spack is running)
will be updated to include all versions and variants detected.
If either the WDK or WSDK are not found, this method will raise
a RuntimeError.
**NOTE:** This modifies the Spack config in the current scope,
either user or environment depending on the calling context.
This is different from all other current bootstrap dependency
checks.
"""
if set(["win-sdk", "wgl"]).issubset(spack.config.get("packages").keys()):
return
externals = spack.detection.by_path(["win-sdk", "wgl"])
if not set(["win-sdk", "wgl"]) == externals.keys():
missing_packages_lst = []
if "wgl" not in externals:
missing_packages_lst.append("wgl")
if "win-sdk" not in externals:
missing_packages_lst.append("win-sdk")
missing_packages = " & ".join(missing_packages_lst)
raise RuntimeError(
f"Unable to find the {missing_packages}, please install these packages via the Visual "
"Studio installer before proceeding with Spack or provide the path to a non standard "
"install with 'spack external find --path'"
)
# wgl/sdk are not required for bootstrapping Spack, but
# are required for building anything non trivial
# add to user config so they can be used by subsequent Spack ops
spack.detection.update_configuration(externals, buildable=False)
def ensure_core_dependencies() -> None:
"""Ensure the presence of all the core dependencies."""
if sys.platform.lower() == "linux":
ensure_patchelf_in_path_or_raise()
ensure_gpg_in_path_or_raise()
ensure_clingo_importable_or_raise()
def all_core_root_specs() -> List[str]:
"""Return a list of all the core root specs that may be used to bootstrap Spack"""
return [clingo_root_spec(), gnupg_root_spec(), patchelf_root_spec()]
def bootstrapping_sources(scope: Optional[str] = None):
"""Return the list of configured sources of software for bootstrapping Spack
Args:
scope: if a valid configuration scope is given, return the
list only from that scope
"""
source_configs = spack.config.get("bootstrap:sources", default=None, scope=scope)
source_configs = source_configs or []
list_of_sources = []
for entry in source_configs:
current = copy.copy(entry)
metadata_dir = spack.util.path.canonicalize_path(entry["metadata"])
metadata_yaml = os.path.join(metadata_dir, METADATA_YAML_FILENAME)
try:
with open(metadata_yaml, encoding="utf-8") as stream:
current.update(spack.util.spack_yaml.load(stream))
list_of_sources.append(current)
except OSError:
pass
return list_of_sources
| SourceBootstrapper |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 17136,
"end": 17675
} | class ____(DifferentiableAOTInput):
"""The input is a plain input, corresponding to a particular positional index.
Note that AOTInput is always relative to a function with a *flat* calling convention,
e.g., as accepted by `aot_module_simplified`. There are some AOTAutograd APIs that
flatten pytrees, and we don't record PyTree key paths from the flattening (but we
could and should!)
"""
idx: int
def expr(self) -> str:
return f"args[{self.idx}]"
@dataclasses.dataclass(frozen=True)
| PlainAOTInput |
python | apache__airflow | airflow-core/src/airflow/timetables/base.py | {
"start": 3502,
"end": 4956
} | class ____(NamedTuple):
"""
Information to schedule a DagRun.
Instances of this will be returned by timetables when they are asked to
schedule a DagRun creation.
"""
run_after: DateTime
"""The earliest time this DagRun is created and its tasks scheduled.
This **MUST** be "aware", i.e. contain timezone information.
"""
data_interval: DataInterval
"""The data interval this DagRun to operate over."""
@classmethod
def exact(cls, at: DateTime) -> DagRunInfo:
"""Represent a run on an exact time."""
return cls(run_after=at, data_interval=DataInterval.exact(at))
@classmethod
def interval(cls, start: DateTime, end: DateTime) -> DagRunInfo:
"""
Represent a run on a continuous schedule.
In such a schedule, each data interval starts right after the previous
one ends, and each run is scheduled right after the interval ends. This
applies to all schedules prior to AIP-39 except ``@once`` and ``None``.
"""
return cls(run_after=end, data_interval=DataInterval(start, end))
@property
def logical_date(self: DagRunInfo) -> DateTime:
"""
Infer the logical date to represent a DagRun.
This replaces ``execution_date`` in Airflow 2.1 and prior. The idea is
essentially the same, just a different name.
"""
return self.data_interval.start
@runtime_checkable
| DagRunInfo |
python | dagster-io__dagster | python_modules/dagster/dagster/components/component/component_scaffolder.py | {
"start": 79,
"end": 404
} | class ____(Scaffolder):
def scaffold(self, request: ScaffoldRequest) -> None:
# This will be deleted once all components are converted to the new ComponentScaffolder API
from dagster.components.component_scaffolding import scaffold_component
scaffold_component(request, {})
| DefaultComponentScaffolder |
python | Netflix__metaflow | metaflow/plugins/aws/batch/batch.py | {
"start": 1063,
"end": 1139
} | class ____(MetaflowException):
headline = "AWS Batch error"
| BatchException |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/control_flow_test.py | {
"start": 15372,
"end": 17510
} | class ____(ControlFlowTestBase):
def test_basic(self):
def f(l):
s1 = 0
s2 = 0
for e in l:
s1 += e
s2 += e * e
return s1, s2
self.assertTransformedResult(f, constant_op.constant([1, 3]), (4, 10))
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(f, empty_vector, (0, 0))
def test_single_output(self):
def f(l):
s = 0
for e in l:
s += e
return s
self.assertTransformedResult(f, constant_op.constant([1, 3]), 4)
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(f, empty_vector, 0)
def test_iterated_expression(self):
eval_count = [0]
def count_evals(x):
eval_count[0] += 1
return x
def f(n):
s = 0
for e in count_evals(range(n)):
s += e
return s
tr = self.transform(f, control_flow)
self.assertEqual(tr(5), 10)
self.assertEqual(eval_count[0], 1)
def test_composite_state_initialized_in_loop(self):
class TestClass(object):
pass
def f(n, x):
tc = TestClass()
for i in n:
if i == 0:
tc.x = x
else:
tc.x = tc.x + i
return tc.x
self.assertTransformedResult(f, (range(5), constant_op.constant(10)), 20)
tr = self.transform(f, control_flow)
with self.assertRaisesRegex(
ValueError, "'tc.x' must be defined before the loop"):
tr(constant_op.constant(list(range(5))), 0)
def test_tuple_unpacking(self):
def f(x_list):
z = constant_op.constant(0) # pylint:disable=undefined-variable
for i, x in enumerate(x_list):
z = z + x + i
return z
self.assertTransformedResult(f, [3, 3], 7)
def test_with_comprehension_in_body(self):
def f(l, n):
s = constant_op.constant(list(range(n)))
for _ in l:
s += constant_op.constant([a for a in range(n)])
return s
self.assertTransformedResult(f, (constant_op.constant([1, 2, 3]), 5),
np.array(range(5)) * 4)
| ForStatementTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/schema.py | {
"start": 3789,
"end": 5360
} | class ____:
"""Helper to compare SQL structures based on compare()"""
def __init__(self, target):
self.target = target
def __eq__(self, other):
return self.target.compare(other)
def __ne__(self, other):
return not self.target.compare(other)
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return (
name[0 : max(dialect.max_identifier_length - 6, 0)]
+ "_"
+ hex(hash(name) % 64)[2:]
)
else:
return name
def pep435_enum(name):
# Implements PEP 435 in the minimal fashion needed by SQLAlchemy
__members__ = OrderedDict()
def __init__(self, name, value, alias=None):
self.name = name
self.value = value
self.__members__[name] = self
value_to_member[value] = self
setattr(self.__class__, name, self)
if alias:
self.__members__[alias] = self
setattr(self.__class__, alias, self)
value_to_member = {}
@classmethod
def get(cls, value):
return value_to_member[value]
someenum = type(
name,
(object,),
{"__members__": __members__, "__init__": __init__, "get": get},
)
# getframe() trick for pickling I don't understand courtesy
# Python namedtuple()
try:
module = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError):
pass
if module is not None:
someenum.__module__ = module
return someenum
| eq_clause_element |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_diamond_base.py | {
"start": 206,
"end": 685
} | class ____(FlowSpec):
"""First branch: parameters and config"""
param_a = Parameter("param_a", help="Parameter from BaseA", default=100)
config_a = Config("config_a", default_value={"branch": "A", "priority": 1})
@retry(times=2)
@step
def start(self):
"""Start step from BaseA"""
print(f"Start from BaseA: param_a={self.param_a}")
self.value_a = self.param_a * self.config_a.get("priority", 1)
self.next(self.process)
| BaseA |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_web_vitals_detection.py | {
"start": 82,
"end": 1068
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-web-vitals-detection"
method = "get"
def setUp(self):
super().setUp()
self.login_as(user=self.user)
@patch("sentry.api.endpoints.project_web_vitals_detection.dispatch_detection_for_project_ids")
def test_get_success(self, mock_dispatch):
mock_dispatch.return_value = {self.project.id: {"success": True}}
response = self.get_success_response(
self.organization.slug, self.project.slug, status_code=202
)
assert response.status_code == 202
assert response.data == {"status": "dispatched"}
mock_dispatch.assert_called_once_with([self.project.id])
def test_get_requires_project_access(self):
other_user = self.create_user()
self.login_as(user=other_user)
response = self.get_error_response(self.organization.slug, self.project.slug)
assert response.status_code == 403
| ProjectWebVitalsDetectionTest |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 14214,
"end": 14698
} | class ____(WrapperLine):
wrapper: PythonWrapperCodegen
graph: GraphLowering
def __post_init__(self) -> None:
self.wrapper.push_computed_sizes(self.wrapper.computed_sizes)
def codegen(self, code: IndentedBuffer) -> None:
self.wrapper.push_codegened_graph(self.graph)
code.do_indent()
def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:
return converter._generate_enter_subgraph
@dataclasses.dataclass
| EnterSubgraphLine |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-right-side-view.py | {
"start": 154,
"end": 692
} | class ____(object):
# @param root, a tree node
# @return a list of integers
def rightSideView(self, root):
result = []
self.rightSideViewDFS(root, 1, result)
return result
def rightSideViewDFS(self, node, depth, result):
if not node:
return
if depth > len(result):
result.append(node.val)
self.rightSideViewDFS(node.right, depth+1, result)
self.rightSideViewDFS(node.left, depth+1, result)
# BFS solution
# Time: O(n)
# Space: O(n)
| Solution |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/module_long_help/package.py | {
"start": 217,
"end": 650
} | class ____(Package):
"""Package to test long description message generated in modulefile.
Message too long is wrapped over multiple lines."""
homepage = "http://www.spack.llnl.gov"
url = "http://www.spack.llnl.gov/module-long-help-1.0.tar.gz"
version("1.0", "0123456789abcdef0123456789abcdef")
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.set("FOO", "bar")
| ModuleLongHelp |
python | Netflix__metaflow | metaflow/flowspec.py | {
"start": 9218,
"end": 45594
} | class ____(metaclass=FlowSpecMeta):
"""
Main class from which all Flows should inherit.
Attributes
----------
index
input
"""
# Attributes that are not saved in the datastore when checkpointing.
# Name starting with '__', methods, functions and Parameters do not need
# to be listed.
_EPHEMERAL = {
"_EPHEMERAL",
"_NON_PARAMETERS",
"_datastore",
"_cached_input",
"_graph",
"_flow_state",
"_steps",
"index",
"input",
}
# When checking for parameters, we look at dir(self) but we want to exclude
# attributes that are definitely not parameters and may be expensive to
# compute (like anything related to the `foreach_stack`). We don't need to exclude
# names starting with `_` as those are already excluded from `_get_parameters`.
_NON_PARAMETERS = {"cmd", "foreach_stack", "index", "input", "script_name", "name"}
def __init__(self, use_cli=True):
"""
Construct a FlowSpec
Parameters
----------
use_cli : bool, default True
Set to True if the flow is invoked from __main__ or the command line
"""
self.name = self.__class__.__name__
self._datastore = None
self._transition = None
self._cached_input = {}
if use_cli:
with parameters.flow_context(self.__class__) as _:
from . import cli
cli.main(self)
@property
def script_name(self) -> str:
"""
[Legacy function - do not use. Use `current` instead]
Returns the name of the script containing the flow
Returns
-------
str
A string containing the name of the script
"""
fname = inspect.getfile(self.__class__)
if fname.endswith(".pyc"):
fname = fname[:-1]
return os.path.basename(fname)
@property
def _flow_decorators(self):
# Backward compatible method to access flow decorators
return self._flow_state[FlowStateItems.FLOW_DECORATORS]
@property
def _flow_mutators(self):
return self._flow_state[FlowStateItems.FLOW_MUTATORS]
@classmethod
def _check_parameters(cls, config_parameters=False):
seen = set()
for _, param in cls._get_parameters():
if param.IS_CONFIG_PARAMETER != config_parameters:
continue
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
@classmethod
def _process_config_decorators(cls, config_options, process_configs=True):
if cls._configs_processed:
debug.userconf_exec("Mutating step/flow decorators already processed")
return None
cls._configs_processed = True
# Fast path for no user configurations
if not process_configs or (
not cls._flow_state[FlowStateItems.FLOW_MUTATORS]
and all(len(step.config_decorators) == 0 for step in cls._steps)
):
# Process parameters to allow them to also use config values easily
for var, param in cls._get_parameters():
if isinstance(param, ConfigValue) or param.IS_CONFIG_PARAMETER:
continue
param.init(not process_configs)
return None
debug.userconf_exec("Processing mutating step/flow decorators")
# We need to convert all the user configurations from DelayedEvaluationParameters
# to actual values so they can be used as is in the mutators.
# We, however, need to make sure _get_parameters still works properly so
# we store what was a config and has been set to a specific value.
# This is safe to do for now because all other uses of _get_parameters typically
# do not rely on the variable itself but just the parameter.
to_save_configs = []
cls._check_parameters(config_parameters=True)
for var, param in cls._get_parameters():
if not param.IS_CONFIG_PARAMETER:
continue
# Note that a config with no default and not required will be None
val = config_options.get(param.name.replace("-", "_").lower())
if isinstance(val, DelayedEvaluationParameter):
val = val()
# We store the value as well so that in _set_constants, we don't try
# to recompute (no guarantee that it is stable)
param._store_value(val)
to_save_configs.append((var, param))
debug.userconf_exec("Setting config %s to %s" % (var, str(val)))
setattr(cls, var, val)
cls._flow_state[FlowStateItems.SET_CONFIG_PARAMETERS] = to_save_configs
# Run all the decorators. We first run the flow-level decorators
# and then the step level ones to maintain a consistent order with how
# other decorators are run.
for deco in cls._flow_state[FlowStateItems.FLOW_MUTATORS]:
if isinstance(deco, FlowMutator):
inserted_by_value = [deco.decorator_name] + (deco.inserted_by or [])
mutable_flow = MutableFlow(
cls,
pre_mutate=True,
statically_defined=deco.statically_defined,
inserted_by=inserted_by_value,
)
# Sanity check to make sure we are applying the decorator to the right
# class
if not deco._flow_cls == cls and not issubclass(cls, deco._flow_cls):
raise MetaflowInternalError(
"FlowMutator registered on the wrong flow -- "
"expected %s but got %s"
% (deco._flow_cls.__name__, cls.__name__)
)
debug.userconf_exec(
"Evaluating flow level decorator %s (pre-mutate)"
% deco.__class__.__name__
)
deco.pre_mutate(mutable_flow)
else:
raise MetaflowInternalError(
"A non FlowMutator found in flow custom decorators"
)
for step in cls._steps:
for deco in step.config_decorators:
if isinstance(deco, StepMutator):
inserted_by_value = [deco.decorator_name] + (deco.inserted_by or [])
debug.userconf_exec(
"Evaluating step level decorator %s for %s (pre-mutate)"
% (deco.__class__.__name__, step.name)
)
deco.pre_mutate(
MutableStep(
cls,
step,
pre_mutate=True,
statically_defined=deco.statically_defined,
inserted_by=inserted_by_value,
)
)
else:
raise MetaflowInternalError(
"A non StepMutator found in step custom decorators"
)
# Process parameters to allow them to also use config values easily
for var, param in cls._get_parameters():
if param.IS_CONFIG_PARAMETER:
continue
param.init()
# Set the current flow class we are in (the one we just created)
parameters.replace_flow_context(cls)
# Re-calculate class level attributes after modifying the class
cls._init_graph()
return cls
def _set_constants(self, graph, kwargs, config_options):
from metaflow.decorators import (
flow_decorators,
) # To prevent circular dependency
# Persist values for parameters and other constants (class level variables)
# only once. This method is called before persist_constants is called to
# persist all values set using setattr
self._check_parameters(config_parameters=False)
seen = set()
self._success = True
parameters_info = []
for var, param in self._get_parameters():
seen.add(var)
if param.IS_CONFIG_PARAMETER:
# Use computed value if already evaluated, else get from config_options
val = param._computed_value or config_options.get(param.name)
else:
val = kwargs[param.name.replace("-", "_").lower()]
# Support for delayed evaluation of parameters.
if isinstance(val, DelayedEvaluationParameter):
val = val()
val = val.split(param.separator) if val and param.separator else val
if isinstance(val, ConfigValue):
# We store config values as dict so they are accessible with older
# metaflow clients. It also makes it easier to access.
val = val.to_dict()
setattr(self, var, val)
parameters_info.append({"name": var, "type": param.__class__.__name__})
# Do the same for class variables which will be forced constant as modifications
# to them don't propagate well since we create a new process for each step and
# re-read the flow file
constants_info = []
for var in dir(self.__class__):
if var[0] == "_" or var in self._NON_PARAMETERS or var in seen:
continue
val = getattr(self.__class__, var)
if isinstance(val, (MethodType, FunctionType, property, type)):
continue
constants_info.append({"name": var, "type": type(val).__name__})
setattr(self, var, val)
# We store the DAG information as an artifact called _graph_info
steps_info, graph_structure = graph.output_steps()
graph_info = {
"file": os.path.basename(os.path.abspath(sys.argv[0])),
"parameters": parameters_info,
"constants": constants_info,
"steps": steps_info,
"graph_structure": graph_structure,
"doc": graph.doc,
"decorators": [
{
"name": deco.name,
"attributes": to_pod(deco.attributes),
"statically_defined": deco.statically_defined,
"inserted_by": deco.inserted_by,
}
for deco in flow_decorators(self)
if not deco.name.startswith("_")
]
+ [
{
"name": deco.__class__.__name__,
"attributes": {},
"statically_defined": deco.statically_defined,
"inserted_by": deco.inserted_by,
}
for deco in self._flow_state[FlowStateItems.FLOW_MUTATORS]
],
"extensions": extension_info(),
}
self._graph_info = graph_info
@classmethod
def _get_parameters(cls):
cached = cls._flow_state[FlowStateItems.CACHED_PARAMETERS]
returned = set()
if cached is not None:
for set_config in cls._flow_state[FlowStateItems.SET_CONFIG_PARAMETERS]:
returned.add(set_config[0])
yield set_config[0], set_config[1]
for var in cached:
if var not in returned:
yield var, getattr(cls, var)
return
build_list = []
for set_config in cls._flow_state[FlowStateItems.SET_CONFIG_PARAMETERS]:
returned.add(set_config[0])
yield set_config[0], set_config[1]
for var in dir(cls):
if var[0] == "_" or var in cls._NON_PARAMETERS:
continue
try:
val = getattr(cls, var)
except:
continue
if isinstance(val, Parameter) and var not in returned:
build_list.append(var)
yield var, val
cls._flow_state[FlowStateItems.CACHED_PARAMETERS] = build_list
def _set_datastore(self, datastore):
self._datastore = datastore
def __iter__(self):
"""
[Legacy function - do not use]
Iterate over all steps in the Flow
Returns
-------
Iterator[graph.DAGNode]
Iterator over the steps in the flow
"""
return iter(self._steps)
def __getattr__(self, name: str):
if self._datastore and name in self._datastore:
# load the attribute from the datastore...
x = self._datastore[name]
# ...and cache it in the object for faster access
setattr(self, name, x)
return x
else:
raise AttributeError("Flow %s has no attribute '%s'" % (self.name, name))
def cmd(self, cmdline, input={}, output=[]):
"""
[Legacy function - do not use]
"""
return cmd_with_io.cmd(cmdline, input=input, output=output)
@property
def index(self) -> Optional[int]:
"""
The index of this foreach branch.
In a foreach step, multiple instances of this step (tasks) will be executed,
one for each element in the foreach. This property returns the zero based index
of the current task. If this is not a foreach step, this returns None.
If you need to know the indices of the parent tasks in a nested foreach, use
`FlowSpec.foreach_stack`.
Returns
-------
int, optional
Index of the task in a foreach step.
"""
if self._foreach_stack:
return self._foreach_stack[-1].index
@property
def input(self) -> Optional[Any]:
"""
The value of the foreach artifact in this foreach branch.
In a foreach step, multiple instances of this step (tasks) will be executed,
one for each element in the foreach. This property returns the element passed
to the current task. If this is not a foreach step, this returns None.
If you need to know the values of the parent tasks in a nested foreach, use
`FlowSpec.foreach_stack`.
Returns
-------
object, optional
Input passed to the foreach task.
"""
return self._find_input()
def foreach_stack(self) -> Optional[List[Tuple[int, int, Any]]]:
"""
Returns the current stack of foreach indexes and values for the current step.
Use this information to understand what data is being processed in the current
foreach branch. For example, considering the following code:
```
@step
def root(self):
self.split_1 = ['a', 'b', 'c']
self.next(self.nest_1, foreach='split_1')
@step
def nest_1(self):
self.split_2 = ['d', 'e', 'f', 'g']
self.next(self.nest_2, foreach='split_2'):
@step
def nest_2(self):
foo = self.foreach_stack()
```
`foo` will take the following values in the various tasks for nest_2:
```
[(0, 3, 'a'), (0, 4, 'd')]
[(0, 3, 'a'), (1, 4, 'e')]
...
[(0, 3, 'a'), (3, 4, 'g')]
[(1, 3, 'b'), (0, 4, 'd')]
...
```
where each tuple corresponds to:
- The index of the task for that level of the loop.
- The number of splits for that level of the loop.
- The value for that level of the loop.
Note that the last tuple returned in a task corresponds to:
- 1st element: value returned by `self.index`.
- 3rd element: value returned by `self.input`.
Returns
-------
List[Tuple[int, int, Any]]
An array describing the current stack of foreach steps.
"""
return [
(frame.index, frame.num_splits, self._find_input(stack_index=i))
for i, frame in enumerate(self._foreach_stack)
]
def _find_input(self, stack_index=None):
if stack_index is None:
stack_index = len(self._foreach_stack) - 1
if stack_index in self._cached_input:
return self._cached_input[stack_index]
elif self._foreach_stack:
# NOTE this is obviously an O(n) operation which also requires
# downloading the whole input data object in order to find the
# right split. One can override this method with a more efficient
# input data handler if this is a problem.
frame = self._foreach_stack[stack_index]
try:
var = getattr(self, frame.var)
except AttributeError:
# this is where AttributeError happens:
# [ foreach x ]
# [ foreach y ]
# [ inner ]
# [ join y ] <- call self.foreach_stack here,
# self.x is not available
self._cached_input[stack_index] = None
else:
try:
self._cached_input[stack_index] = var[frame.index]
except TypeError:
# __getitem__ not supported, fall back to an iterator
self._cached_input[stack_index] = next(
islice(var, frame.index, frame.index + 1)
)
return self._cached_input[stack_index]
def merge_artifacts(
self,
inputs: Inputs,
exclude: Optional[List[str]] = None,
include: Optional[List[str]] = None,
) -> None:
"""
Helper function for merging artifacts in a join step.
This function takes all the artifacts coming from the branches of a
join point and assigns them to self in the calling step. Only artifacts
not set in the current step are considered. If, for a given artifact, different
values are present on the incoming edges, an error will be thrown and the artifacts
that conflict will be reported.
As a few examples, in the simple graph: A splitting into B and C and joining in D:
```
A:
self.x = 5
self.y = 6
B:
self.b_var = 1
self.x = from_b
C:
self.x = from_c
D:
merge_artifacts(inputs)
```
In D, the following artifacts are set:
- `y` (value: 6), `b_var` (value: 1)
- if `from_b` and `from_c` are the same, `x` will be accessible and have value `from_b`
- if `from_b` and `from_c` are different, an error will be thrown. To prevent this error,
you need to manually set `self.x` in D to a merged value (for example the max) prior to
calling `merge_artifacts`.
Parameters
----------
inputs : Inputs
Incoming steps to the join point.
exclude : List[str], optional, default None
If specified, do not consider merging artifacts with a name in `exclude`.
Cannot specify if `include` is also specified.
include : List[str], optional, default None
If specified, only merge artifacts specified. Cannot specify if `exclude` is
also specified.
Raises
------
MetaflowException
This exception is thrown if this is not called in a join step.
UnhandledInMergeArtifactsException
This exception is thrown in case of unresolved conflicts.
MissingInMergeArtifactsException
This exception is thrown in case an artifact specified in `include` cannot
be found.
"""
include = include or []
exclude = exclude or []
node = self._graph[self._current_step]
if node.type != "join":
msg = (
"merge_artifacts can only be called in a join and step *{step}* "
"is not a join".format(step=self._current_step)
)
raise MetaflowException(msg)
if len(exclude) > 0 and len(include) > 0:
msg = "`exclude` and `include` are mutually exclusive in merge_artifacts"
raise MetaflowException(msg)
to_merge = {}
unresolved = []
for inp in inputs:
# available_vars is the list of variables from inp that should be considered
if include:
available_vars = (
(var, sha)
for var, sha in inp._datastore.items()
if (var in include) and (not hasattr(self, var))
)
else:
available_vars = (
(var, sha)
for var, sha in inp._datastore.items()
if (var not in exclude)
and (not hasattr(self, var))
and (var not in INTERNAL_ARTIFACTS_SET)
)
for var, sha in available_vars:
_, previous_sha = to_merge.setdefault(var, (inp, sha))
if previous_sha != sha:
# We have a conflict here
unresolved.append(var)
# Check if everything in include is present in to_merge
missing = []
for v in include:
if v not in to_merge and not hasattr(self, v):
missing.append(v)
if unresolved:
# We have unresolved conflicts, so we do not set anything and error out
msg = (
"Step *{step}* cannot merge the following artifacts due to them "
"having conflicting values:\n[{artifacts}].\nTo remedy this issue, "
"be sure to explicitly set those artifacts (using "
"self.<artifact_name> = ...) prior to calling merge_artifacts.".format(
step=self._current_step, artifacts=", ".join(unresolved)
)
)
raise UnhandledInMergeArtifactsException(msg, unresolved)
if missing:
msg = (
"Step *{step}* specifies that [{include}] should be merged but "
"[{missing}] are not present.\nTo remedy this issue, make sure "
"that the values specified in only come from at least one branch".format(
step=self._current_step,
include=", ".join(include),
missing=", ".join(missing),
)
)
raise MissingInMergeArtifactsException(msg, missing)
# If things are resolved, we pass down the variables from the input datastores
for var, (inp, _) in to_merge.items():
self._datastore.passdown_partial(inp._datastore, [var])
def _validate_ubf_step(self, step_name):
join_list = self._graph[step_name].out_funcs
if len(join_list) != 1:
msg = (
"UnboundedForeach is supported only over a single node, "
"not an arbitrary DAG. Specify a single `join` node"
" instead of multiple:{join_list}.".format(join_list=join_list)
)
raise InvalidNextException(msg)
join_step = join_list[0]
join_node = self._graph[join_step]
join_type = join_node.type
if join_type != "join":
msg = (
"UnboundedForeach found for:{node} -> {join}."
" The join type isn't valid.".format(node=step_name, join=join_step)
)
raise InvalidNextException(msg)
def _get_foreach_item_value(self, item: Any):
"""
Get the unique value for the item in the foreach iterator. If no suitable value
is found, return the value formatted by reprlib, which is at most 30 characters long.
Parameters
----------
item : Any
The item to get the value from.
Returns
-------
str
The value to use for the item.
"""
def _is_primitive_type(item):
return (
isinstance(item, basestring)
or isinstance(item, int)
or isinstance(item, float)
or isinstance(item, bool)
)
value = item if _is_primitive_type(item) else reprlib.Repr().repr(item)
return basestring(value)[:MAXIMUM_FOREACH_VALUE_CHARS]
def next(self, *dsts: Callable[..., None], **kwargs) -> None:
"""
Indicates the next step to execute after this step has completed.
This statement should appear as the last statement of each step, except
the end step.
There are several valid formats to specify the next step:
- Straight-line connection: `self.next(self.next_step)` where `next_step` is a method in
the current class decorated with the `@step` decorator.
- Static fan-out connection: `self.next(self.step1, self.step2, ...)` where `stepX` are
methods in the current class decorated with the `@step` decorator.
- Foreach branch:
```
self.next(self.foreach_step, foreach='foreach_iterator')
```
In this situation, `foreach_step` is a method in the current class decorated with the
`@step` decorator and `foreach_iterator` is a variable name in the current class that
evaluates to an iterator. A task will be launched for each value in the iterator and
each task will execute the code specified by the step `foreach_step`.
- Switch statement:
```
self.next({"case1": self.step_a, "case2": self.step_b}, condition='condition_variable')
```
In this situation, `step_a` and `step_b` are methods in the current class decorated
with the `@step` decorator and `condition_variable` is a variable name in the current
class. The value of the condition variable determines which step to execute. If the
value doesn't match any of the dictionary keys, a RuntimeError is raised.
Parameters
----------
dsts : Callable[..., None]
One or more methods annotated with `@step`.
Raises
------
InvalidNextException
Raised if the format of the arguments does not match one of the ones given above.
"""
step = self._current_step
foreach = kwargs.pop("foreach", None)
num_parallel = kwargs.pop("num_parallel", None)
condition = kwargs.pop("condition", None)
if kwargs:
kw = next(iter(kwargs))
msg = (
"Step *{step}* passes an unknown keyword argument "
"'{invalid}' to self.next().".format(step=step, invalid=kw)
)
raise InvalidNextException(msg)
# check: next() is called only once
if self._transition is not None:
msg = (
"Multiple self.next() calls detected in step *{step}*. "
"Call self.next() only once.".format(step=step)
)
raise InvalidNextException(msg)
# check: switch case using condition
if condition is not None:
if len(dsts) != 1 or not isinstance(dsts[0], dict) or not dsts[0]:
msg = (
"Step *{step}* has an invalid self.next() transition. "
"When using 'condition', the transition must be to a single, "
"non-empty dictionary mapping condition values to step methods.".format(
step=step
)
)
raise InvalidNextException(msg)
if not isinstance(condition, basestring):
msg = (
"Step *{step}* has an invalid self.next() transition. "
"The argument to 'condition' must be a string.".format(step=step)
)
raise InvalidNextException(msg)
if foreach is not None or num_parallel is not None:
msg = (
"Step *{step}* has an invalid self.next() transition. "
"Switch statements cannot be combined with foreach or num_parallel.".format(
step=step
)
)
raise InvalidNextException(msg)
switch_cases = dsts[0]
# Validate that condition variable exists
try:
condition_value = getattr(self, condition)
except AttributeError:
msg = (
"Condition variable *self.{var}* in step *{step}* "
"does not exist. Make sure you set self.{var} in this step.".format(
step=step, var=condition
)
)
raise InvalidNextException(msg)
if condition_value not in switch_cases:
available_cases = list(switch_cases.keys())
raise RuntimeError(
f"Switch condition variable '{condition}' has value '{condition_value}' "
f"which is not in the available cases: {available_cases}"
)
# Get the chosen step and set transition directly
chosen_step_func = switch_cases[condition_value]
# Validate that the chosen step exists
try:
name = chosen_step_func.__func__.__name__
except:
msg = (
"Step *{step}* specifies a switch transition that is not a function. "
"Make sure the value in the dictionary is a method "
"of the Flow class.".format(step=step)
)
raise InvalidNextException(msg)
if not hasattr(self, name):
msg = (
"Step *{step}* specifies a switch transition to an "
"unknown step, *{name}*.".format(step=step, name=name)
)
raise InvalidNextException(msg)
self._transition = ([name], None)
return
# Check for an invalid transition: a dictionary used without a 'condition' parameter.
if len(dsts) == 1 and isinstance(dsts[0], dict):
msg = (
"Step *{step}* has an invalid self.next() transition. "
"Dictionary argument requires 'condition' parameter.".format(step=step)
)
raise InvalidNextException(msg)
# check: all destinations are methods of this object
funcs = []
for i, dst in enumerate(dsts):
try:
name = dst.__func__.__name__
except:
msg = (
"In step *{step}* the {arg}. argument in self.next() is "
"not a function. Make sure all arguments in self.next() "
"are methods of the Flow class.".format(step=step, arg=i + 1)
)
raise InvalidNextException(msg)
if not hasattr(self, name):
msg = (
"Step *{step}* specifies a self.next() transition to an "
"unknown step, *{name}*.".format(step=step, name=name)
)
raise InvalidNextException(msg)
funcs.append(name)
if num_parallel is not None and num_parallel >= 1:
if len(dsts) > 1:
raise InvalidNextException(
"Only one destination allowed when num_parallel used in self.next()"
)
foreach = "_parallel_ubf_iter"
self._parallel_ubf_iter = ParallelUBF(num_parallel)
# check: foreach is valid
if foreach:
if not isinstance(foreach, basestring):
msg = (
"Step *{step}* has an invalid self.next() transition. "
"The argument to 'foreach' must be a string.".format(step=step)
)
raise InvalidNextException(msg)
if len(dsts) != 1:
msg = (
"Step *{step}* has an invalid self.next() transition. "
"Specify exactly one target for 'foreach'.".format(step=step)
)
raise InvalidNextException(msg)
try:
foreach_iter = getattr(self, foreach)
except:
msg = (
"Foreach variable *self.{var}* in step *{step}* "
"does not exist. Check your variable.".format(
step=step, var=foreach
)
)
raise InvalidNextException(msg)
self._foreach_values = None
if issubclass(type(foreach_iter), UnboundedForeachInput):
self._unbounded_foreach = True
self._foreach_num_splits = None
self._validate_ubf_step(funcs[0])
else:
try:
if INCLUDE_FOREACH_STACK:
self._foreach_values = []
for item in foreach_iter:
value = self._get_foreach_item_value(item)
self._foreach_values.append(value)
self._foreach_num_splits = len(self._foreach_values)
else:
self._foreach_num_splits = sum(1 for _ in foreach_iter)
except Exception as e:
msg = (
"Foreach variable *self.{var}* in step *{step}* "
"is not iterable. Please check details: {err}".format(
step=step, var=foreach, err=str(e)
)
)
raise InvalidNextException(msg)
if self._foreach_num_splits == 0:
msg = (
"Foreach iterator over *{var}* in step *{step}* "
"produced zero splits. Check your variable.".format(
step=step, var=foreach
)
)
raise InvalidNextException(msg)
self._foreach_var = foreach
# check: non-keyword transitions are valid
if foreach is None and condition is None:
if len(dsts) < 1:
msg = (
"Step *{step}* has an invalid self.next() transition. "
"Specify at least one step function as an argument in "
"self.next().".format(step=step)
)
raise InvalidNextException(msg)
self._transition = (funcs, foreach)
def __str__(self):
step_name = getattr(self, "_current_step", None)
if step_name:
index = ",".join(str(idx) for idx, _, _ in self.foreach_stack())
if index:
inp = self.input
if inp is None:
return "<flow %s step %s[%s]>" % (self.name, step_name, index)
else:
inp = str(inp)
if len(inp) > 20:
inp = inp[:20] + "..."
return "<flow %s step %s[%s] (input: %s)>" % (
self.name,
step_name,
index,
inp,
)
else:
return "<flow %s step %s>" % (self.name, step_name)
else:
return "<flow %s>" % self.name
def __getstate__(self):
raise MetaflowException(
"Flows can't be serialized. Maybe you tried "
"to assign *self* or one of the *inputs* "
"to an attribute? Instead of serializing the "
"whole flow, you should choose specific "
"attributes, e.g. *input.some_var*, to be "
"stored."
)
| FlowSpec |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 9466,
"end": 9824
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.module = UnusedParamTwoLinLayerNet()
def forward(self, x):
predictions = self.module(x)
loss = (predictions[0] + predictions[1]).sum()
return {
"predictions": predictions,
"loss": loss,
}
| DictOutputModule |
python | marshmallow-code__marshmallow | tests/test_deserialization.py | {
"start": 2493,
"end": 59790
} | class ____:
def test_float_field_deserialization(self):
field = fields.Float()
assert math.isclose(field.deserialize("12.3"), 12.3)
assert math.isclose(field.deserialize(12.3), 12.3)
@pytest.mark.parametrize("in_val", ["bad", "", {}, True, False])
def test_invalid_float_field_deserialization(self, in_val):
field = fields.Float()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_val)
assert excinfo.value.args[0] == "Not a valid number."
def test_float_field_overflow(self):
field = fields.Float()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(2**1024)
assert excinfo.value.args[0] == "Number too large."
def test_integer_field_deserialization(self):
field = fields.Integer()
assert field.deserialize("42") == 42
with pytest.raises(ValidationError) as excinfo:
field.deserialize("42.0")
assert excinfo.value.args[0] == "Not a valid integer."
with pytest.raises(ValidationError):
field.deserialize("bad")
assert excinfo.value.args[0] == "Not a valid integer."
with pytest.raises(ValidationError):
field.deserialize({})
assert excinfo.value.args[0] == "Not a valid integer."
def test_strict_integer_field_deserialization(self):
field = fields.Integer(strict=True)
assert field.deserialize(42) == 42
with pytest.raises(ValidationError) as excinfo:
field.deserialize(42.0)
assert excinfo.value.args[0] == "Not a valid integer."
with pytest.raises(ValidationError) as excinfo:
field.deserialize(decimal.Decimal("42.0"))
assert excinfo.value.args[0] == "Not a valid integer."
with pytest.raises(ValidationError) as excinfo:
field.deserialize("42")
assert excinfo.value.args[0] == "Not a valid integer."
def test_decimal_field_deserialization(self):
m1 = 12
m2 = "12.355"
m3 = decimal.Decimal(1)
m4 = 3.14
m5 = "abc"
m6 = [1, 2]
field = fields.Decimal()
assert isinstance(field.deserialize(m1), decimal.Decimal)
assert field.deserialize(m1) == decimal.Decimal(12)
assert isinstance(field.deserialize(m2), decimal.Decimal)
assert field.deserialize(m2) == decimal.Decimal("12.355")
assert isinstance(field.deserialize(m3), decimal.Decimal)
assert field.deserialize(m3) == decimal.Decimal(1)
assert isinstance(field.deserialize(m4), decimal.Decimal)
assert field.deserialize(m4).as_tuple() == (0, (3, 1, 4), -2)
with pytest.raises(ValidationError) as excinfo:
field.deserialize(m5)
assert excinfo.value.args[0] == "Not a valid number."
with pytest.raises(ValidationError) as excinfo:
field.deserialize(m6)
assert excinfo.value.args[0] == "Not a valid number."
def test_decimal_field_with_places(self):
m1 = 12
m2 = "12.355"
m3 = decimal.Decimal(1)
m4 = "abc"
m5 = [1, 2]
field = fields.Decimal(1)
assert isinstance(field.deserialize(m1), decimal.Decimal)
assert field.deserialize(m1) == decimal.Decimal(12)
assert isinstance(field.deserialize(m2), decimal.Decimal)
assert field.deserialize(m2) == decimal.Decimal("12.4")
assert isinstance(field.deserialize(m3), decimal.Decimal)
assert field.deserialize(m3) == decimal.Decimal(1)
with pytest.raises(ValidationError) as excinfo:
field.deserialize(m4)
assert excinfo.value.args[0] == "Not a valid number."
with pytest.raises(ValidationError) as excinfo:
field.deserialize(m5)
assert excinfo.value.args[0] == "Not a valid number."
def test_decimal_field_with_places_and_rounding(self):
m1 = 12
m2 = "12.355"
m3 = decimal.Decimal(1)
m4 = "abc"
m5 = [1, 2]
field = fields.Decimal(1, decimal.ROUND_DOWN)
assert isinstance(field.deserialize(m1), decimal.Decimal)
assert field.deserialize(m1) == decimal.Decimal(12)
assert isinstance(field.deserialize(m2), decimal.Decimal)
assert field.deserialize(m2) == decimal.Decimal("12.3")
assert isinstance(field.deserialize(m3), decimal.Decimal)
assert field.deserialize(m3) == decimal.Decimal(1)
with pytest.raises(ValidationError):
field.deserialize(m4)
with pytest.raises(ValidationError):
field.deserialize(m5)
def test_decimal_field_deserialization_string(self):
m1 = 12
m2 = "12.355"
m3 = decimal.Decimal(1)
m4 = "abc"
m5 = [1, 2]
field = fields.Decimal(as_string=True)
assert isinstance(field.deserialize(m1), decimal.Decimal)
assert field.deserialize(m1) == decimal.Decimal(12)
assert isinstance(field.deserialize(m2), decimal.Decimal)
assert field.deserialize(m2) == decimal.Decimal("12.355")
assert isinstance(field.deserialize(m3), decimal.Decimal)
assert field.deserialize(m3) == decimal.Decimal(1)
with pytest.raises(ValidationError):
field.deserialize(m4)
with pytest.raises(ValidationError):
field.deserialize(m5)
def test_decimal_field_special_values(self):
m1 = "-NaN"
m2 = "NaN"
m3 = "-sNaN"
m4 = "sNaN"
m5 = "-Infinity"
m6 = "Infinity"
m7 = "-0"
field = fields.Decimal(places=2, allow_nan=True)
m1d = field.deserialize(m1)
assert isinstance(m1d, decimal.Decimal)
assert m1d.is_qnan()
assert not m1d.is_signed()
m2d = field.deserialize(m2)
assert isinstance(m2d, decimal.Decimal)
assert m2d.is_qnan()
assert not m2d.is_signed()
m3d = field.deserialize(m3)
assert isinstance(m3d, decimal.Decimal)
assert m3d.is_qnan()
assert not m3d.is_signed()
m4d = field.deserialize(m4)
assert isinstance(m4d, decimal.Decimal)
assert m4d.is_qnan()
assert not m4d.is_signed()
m5d = field.deserialize(m5)
assert isinstance(m5d, decimal.Decimal)
assert m5d.is_infinite()
assert m5d.is_signed()
m6d = field.deserialize(m6)
assert isinstance(m6d, decimal.Decimal)
assert m6d.is_infinite()
assert not m6d.is_signed()
m7d = field.deserialize(m7)
assert isinstance(m7d, decimal.Decimal)
assert m7d.is_zero()
assert m7d.is_signed()
def test_decimal_field_special_values_not_permitted(self):
m1 = "-NaN"
m2 = "NaN"
m3 = "-sNaN"
m4 = "sNaN"
m5 = "-Infinity"
m6 = "Infinity"
m7 = "-0"
field = fields.Decimal(places=2)
with pytest.raises(ValidationError) as excinfo:
field.deserialize(m1)
assert str(excinfo.value.args[0]) == (
"Special numeric values (nan or infinity) are not permitted."
)
with pytest.raises(ValidationError):
field.deserialize(m2)
with pytest.raises(ValidationError):
field.deserialize(m3)
with pytest.raises(ValidationError):
field.deserialize(m4)
with pytest.raises(ValidationError):
field.deserialize(m5)
with pytest.raises(ValidationError):
field.deserialize(m6)
m7d = field.deserialize(m7)
assert isinstance(m7d, decimal.Decimal)
assert m7d.is_zero()
assert m7d.is_signed()
@pytest.mark.parametrize("allow_nan", (None, False, True))
@pytest.mark.parametrize("value", ("nan", "-nan", "inf", "-inf"))
def test_float_field_allow_nan(self, value, allow_nan):
if allow_nan is None:
# Test default case is False
field = fields.Float()
else:
field = fields.Float(allow_nan=allow_nan)
if allow_nan is True:
res = field.deserialize(value)
assert isinstance(res, float)
if value.endswith("nan"):
assert math.isnan(res)
else:
assert res == float(value)
else:
with pytest.raises(ValidationError) as excinfo:
field.deserialize(value)
assert str(excinfo.value.args[0]) == (
"Special numeric values (nan or infinity) are not permitted."
)
def test_string_field_deserialization(self):
field = fields.String()
assert field.deserialize("foo") == "foo"
assert field.deserialize(b"foo") == "foo"
# https://github.com/marshmallow-code/marshmallow/issues/231
with pytest.raises(ValidationError) as excinfo:
field.deserialize(42)
assert excinfo.value.args[0] == "Not a valid string."
with pytest.raises(ValidationError):
field.deserialize({})
def test_boolean_field_deserialization(self):
field = fields.Boolean()
assert field.deserialize(True) is True
assert field.deserialize(False) is False
assert field.deserialize("True") is True
assert field.deserialize("False") is False
assert field.deserialize("true") is True
assert field.deserialize("false") is False
assert field.deserialize("1") is True
assert field.deserialize("0") is False
assert field.deserialize("on") is True
assert field.deserialize("ON") is True
assert field.deserialize("On") is True
assert field.deserialize("off") is False
assert field.deserialize("OFF") is False
assert field.deserialize("Off") is False
assert field.deserialize("y") is True
assert field.deserialize("Y") is True
assert field.deserialize("yes") is True
assert field.deserialize("YES") is True
assert field.deserialize("Yes") is True
assert field.deserialize("n") is False
assert field.deserialize("N") is False
assert field.deserialize("no") is False
assert field.deserialize("NO") is False
assert field.deserialize("No") is False
assert field.deserialize(1) is True
assert field.deserialize(0) is False
with pytest.raises(ValidationError) as excinfo:
field.deserialize({})
assert excinfo.value.args[0] == "Not a valid boolean."
with pytest.raises(ValidationError) as excinfo:
field.deserialize(42)
with pytest.raises(ValidationError) as excinfo:
field.deserialize("invalid-string")
def test_boolean_field_deserialization_with_custom_truthy_values(self):
class MyBoolean(fields.Boolean):
truthy = {"yep"}
field = MyBoolean()
assert field.deserialize("yep") is True
field2 = fields.Boolean(truthy=("yep",))
assert field2.deserialize("yep") is True
assert field2.deserialize(False) is False
@pytest.mark.parametrize("in_val", ["notvalid", 123])
def test_boolean_field_deserialization_with_custom_truthy_values_invalid(
self, in_val
):
class MyBoolean(fields.Boolean):
truthy = {"yep"}
field = MyBoolean()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_val)
expected_msg = "Not a valid boolean."
assert str(excinfo.value.args[0]) == expected_msg
field2 = fields.Boolean(truthy={"yep"})
with pytest.raises(ValidationError) as excinfo:
field2.deserialize(in_val)
expected_msg = "Not a valid boolean."
assert str(excinfo.value.args[0]) == expected_msg
field3 = MyBoolean(error_messages={"invalid": "bad input"})
with pytest.raises(ValidationError) as excinfo:
field3.deserialize(in_val)
assert str(excinfo.value.args[0]) == "bad input"
def test_boolean_field_deserialization_with_empty_truthy(self):
field = fields.Boolean(truthy=set())
assert field.deserialize("yep") is True
assert field.deserialize(True) is True
assert field.deserialize(False) is False
def test_boolean_field_deserialization_with_custom_falsy_values(self):
field = fields.Boolean(falsy=("nope",))
assert field.deserialize("nope") is False
assert field.deserialize(True) is True
def test_field_toggle_show_invalid_value_in_error_message(self):
error_messages = {"invalid": "Not valid: {input}"}
boolfield = fields.Boolean(error_messages=error_messages)
with pytest.raises(ValidationError) as excinfo:
boolfield.deserialize("notabool")
assert str(excinfo.value.args[0]) == "Not valid: notabool"
numfield = fields.Float(error_messages=error_messages)
with pytest.raises(ValidationError) as excinfo:
numfield.deserialize("notanum")
assert str(excinfo.value.args[0]) == "Not valid: notanum"
intfield = fields.Integer(error_messages=error_messages)
with pytest.raises(ValidationError) as excinfo:
intfield.deserialize("notanint")
assert str(excinfo.value.args[0]) == "Not valid: notanint"
date_error_messages = {"invalid": "Not a valid {obj_type}: {input}"}
datefield = fields.DateTime(error_messages=date_error_messages)
with pytest.raises(ValidationError) as excinfo:
datefield.deserialize("notadate")
assert str(excinfo.value.args[0]) == "Not a valid datetime: notadate"
@pytest.mark.parametrize(
"in_value",
[
"not-a-datetime",
42,
True,
False,
0,
"",
[],
"2018",
"2018-01",
dt.datetime.now().strftime("%H:%M:%S %Y-%m-%d"),
dt.datetime.now().strftime("%m-%d-%Y %H:%M:%S"),
],
)
def test_invalid_datetime_deserialization(self, in_value):
field = fields.DateTime()
with pytest.raises(ValidationError, match="Not a valid datetime."):
field.deserialize(in_value)
def test_custom_date_format_datetime_field_deserialization(self):
# Datetime string with format "%H:%M:%S.%f %Y-%m-%d"
datestring = "10:11:12.123456 2019-01-02"
# Deserialization should fail when datestring is not of same format
field = fields.DateTime(format="%d-%m-%Y %H:%M:%S")
with pytest.raises(ValidationError, match="Not a valid datetime."):
field.deserialize(datestring)
field = fields.DateTime(format="%H:%M:%S.%f %Y-%m-%d")
assert field.deserialize(datestring) == dt.datetime(
2019, 1, 2, 10, 11, 12, 123456
)
field = fields.NaiveDateTime(format="%H:%M:%S.%f %Y-%m-%d")
assert field.deserialize(datestring) == dt.datetime(
2019, 1, 2, 10, 11, 12, 123456
)
field = fields.AwareDateTime(format="%H:%M:%S.%f %Y-%m-%d")
with pytest.raises(ValidationError, match="Not a valid aware datetime."):
field.deserialize(datestring)
@pytest.mark.parametrize("fmt", ["rfc", "rfc822"])
@pytest.mark.parametrize(
("value", "expected", "aware"),
[
(
"Sun, 10 Nov 2013 01:23:45 -0000",
dt.datetime(2013, 11, 10, 1, 23, 45),
False,
),
(
"Sun, 10 Nov 2013 01:23:45 +0000",
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=dt.timezone.utc),
True,
),
(
"Sun, 10 Nov 2013 01:23:45 -0600",
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=central),
True,
),
],
)
def test_rfc_datetime_field_deserialization(self, fmt, value, expected, aware):
field = fields.DateTime(format=fmt)
assert field.deserialize(value) == expected
field = fields.NaiveDateTime(format=fmt)
if aware:
with pytest.raises(ValidationError, match="Not a valid naive datetime."):
field.deserialize(value)
else:
assert field.deserialize(value) == expected
field = fields.AwareDateTime(format=fmt)
if not aware:
with pytest.raises(ValidationError, match="Not a valid aware datetime."):
field.deserialize(value)
else:
assert field.deserialize(value) == expected
@pytest.mark.parametrize("fmt", ["iso", "iso8601"])
@pytest.mark.parametrize(
("value", "expected", "aware"),
[
("2013-11-10T01:23:45", dt.datetime(2013, 11, 10, 1, 23, 45), False),
(
"2013-11-10T01:23:45+00:00",
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=dt.timezone.utc),
True,
),
(
# Regression test for https://github.com/marshmallow-code/marshmallow/issues/1251
"2013-11-10T01:23:45.123+00:00",
dt.datetime(2013, 11, 10, 1, 23, 45, 123000, tzinfo=dt.timezone.utc),
True,
),
(
"2013-11-10T01:23:45.123456+00:00",
dt.datetime(2013, 11, 10, 1, 23, 45, 123456, tzinfo=dt.timezone.utc),
True,
),
(
"2013-11-10T01:23:45-06:00",
dt.datetime(2013, 11, 10, 1, 23, 45, tzinfo=central),
True,
),
],
)
def test_iso_datetime_field_deserialization(self, fmt, value, expected, aware):
field = fields.DateTime(format=fmt)
assert field.deserialize(value) == expected
field = fields.NaiveDateTime(format=fmt)
if aware:
with pytest.raises(ValidationError, match="Not a valid naive datetime."):
field.deserialize(value)
else:
assert field.deserialize(value) == expected
field = fields.AwareDateTime(format=fmt)
if not aware:
with pytest.raises(ValidationError, match="Not a valid aware datetime."):
field.deserialize(value)
else:
assert field.deserialize(value) == expected
@pytest.mark.parametrize(
("fmt", "value", "expected"),
[
("timestamp", 1384043025, dt.datetime(2013, 11, 10, 0, 23, 45)),
("timestamp", "1384043025", dt.datetime(2013, 11, 10, 0, 23, 45)),
("timestamp", 1384043025.12, dt.datetime(2013, 11, 10, 0, 23, 45, 120000)),
(
"timestamp",
1384043025.123456,
dt.datetime(2013, 11, 10, 0, 23, 45, 123456),
),
("timestamp", 1, dt.datetime(1970, 1, 1, 0, 0, 1)),
("timestamp_ms", 1384043025000, dt.datetime(2013, 11, 10, 0, 23, 45)),
("timestamp_ms", 1000, dt.datetime(1970, 1, 1, 0, 0, 1)),
],
)
def test_timestamp_field_deserialization(self, fmt, value, expected):
field = fields.DateTime(format=fmt)
assert field.deserialize(value) == expected
# By default, a datetime from a timestamp is never aware.
field = fields.NaiveDateTime(format=fmt)
assert field.deserialize(value) == expected
field = fields.AwareDateTime(format=fmt)
with pytest.raises(ValidationError, match="Not a valid aware datetime."):
field.deserialize(value)
# But it can be added by providing a default.
field = fields.AwareDateTime(format=fmt, default_timezone=central)
expected_aware = expected.replace(tzinfo=central)
assert field.deserialize(value) == expected_aware
@pytest.mark.parametrize("fmt", ["timestamp", "timestamp_ms"])
@pytest.mark.parametrize(
"in_value",
["", "!@#", -1],
)
def test_invalid_timestamp_field_deserialization(self, fmt, in_value):
field = fields.DateTime(format=fmt)
with pytest.raises(ValidationError, match="Not a valid datetime."):
field.deserialize(in_value)
# Regression test for https://github.com/marshmallow-code/marshmallow/pull/2102
@pytest.mark.parametrize("fmt", ["timestamp", "timestamp_ms"])
@pytest.mark.parametrize(
"mock_fromtimestamp", [MockDateTimeOSError, MockDateTimeOverflowError]
)
def test_oversized_timestamp_field_deserialization(self, fmt, mock_fromtimestamp):
with patch("datetime.datetime", mock_fromtimestamp):
field = fields.DateTime(format=fmt)
with pytest.raises(ValidationError, match="Not a valid datetime."):
field.deserialize(99999999999999999)
@pytest.mark.parametrize(
("fmt", "timezone", "value", "expected"),
[
("iso", None, "2013-11-10T01:23:45", dt.datetime(2013, 11, 10, 1, 23, 45)),
(
"iso",
dt.timezone.utc,
"2013-11-10T01:23:45+00:00",
dt.datetime(2013, 11, 10, 1, 23, 45),
),
(
"iso",
central,
"2013-11-10T01:23:45-03:00",
dt.datetime(2013, 11, 9, 22, 23, 45),
),
(
"rfc",
None,
"Sun, 10 Nov 2013 01:23:45 -0000",
dt.datetime(2013, 11, 10, 1, 23, 45),
),
(
"rfc",
dt.timezone.utc,
"Sun, 10 Nov 2013 01:23:45 +0000",
dt.datetime(2013, 11, 10, 1, 23, 45),
),
(
"rfc",
central,
"Sun, 10 Nov 2013 01:23:45 -0300",
dt.datetime(2013, 11, 9, 22, 23, 45),
),
],
)
def test_naive_datetime_with_timezone(self, fmt, timezone, value, expected):
field = fields.NaiveDateTime(format=fmt, timezone=timezone)
assert field.deserialize(value) == expected
@pytest.mark.parametrize("timezone", (dt.timezone.utc, central))
@pytest.mark.parametrize(
("fmt", "value"),
[("iso", "2013-11-10T01:23:45"), ("rfc", "Sun, 10 Nov 2013 01:23:45")],
)
def test_aware_datetime_default_timezone(self, fmt, timezone, value):
field = fields.AwareDateTime(format=fmt, default_timezone=timezone)
assert field.deserialize(value) == dt.datetime(
2013, 11, 10, 1, 23, 45, tzinfo=timezone
)
def test_time_field_deserialization(self):
field = fields.Time()
t = dt.time(1, 23, 45)
t_formatted = t.isoformat()
result = field.deserialize(t_formatted)
assert isinstance(result, dt.time)
assert_time_equal(result, t)
# With microseconds
t2 = dt.time(1, 23, 45, 6789)
t2_formatted = t2.isoformat()
result2 = field.deserialize(t2_formatted)
assert_time_equal(result2, t2)
@pytest.mark.parametrize("in_data", ["badvalue", "", [], 42])
def test_invalid_time_field_deserialization(self, in_data):
field = fields.Time()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_data)
assert excinfo.value.args[0] == "Not a valid time."
def test_custom_time_format_time_field_deserialization(self):
# Time string with format "%f.%S:%M:%H"
timestring = "123456.12:11:10"
# Deserialization should fail when timestring is not of same format
field = fields.Time(format="%S:%M:%H")
with pytest.raises(ValidationError, match="Not a valid time."):
field.deserialize(timestring)
field = fields.Time(format="%f.%S:%M:%H")
assert field.deserialize(timestring) == dt.time(10, 11, 12, 123456)
@pytest.mark.parametrize("fmt", ["iso", "iso8601", None])
@pytest.mark.parametrize(
("value", "expected"),
[
("01:23:45", dt.time(1, 23, 45)),
("01:23:45.123", dt.time(1, 23, 45, 123000)),
("01:23:45.123456", dt.time(1, 23, 45, 123456)),
(
"01:23:45+01:00",
dt.time(1, 23, 45, tzinfo=dt.timezone(dt.timedelta(seconds=3600))),
),
],
)
def test_iso_time_field_deserialization(self, fmt, value, expected):
if fmt is None:
field = fields.Time()
else:
field = fields.Time(format=fmt)
assert field.deserialize(value) == expected
def test_invalid_timedelta_precision(self):
with pytest.raises(ValueError, match="The precision must be one of: weeks,"):
fields.TimeDelta("invalid")
def test_timedelta_field_deserialization(self):
field = fields.TimeDelta()
result = field.deserialize("42")
assert isinstance(result, dt.timedelta)
assert result.days == 0
assert result.seconds == 42
assert result.microseconds == 0
field = fields.TimeDelta()
result = field.deserialize("42.9")
assert isinstance(result, dt.timedelta)
assert result.days == 0
assert result.seconds == 42
assert result.microseconds == 900000
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
result = field.deserialize(100000)
assert result.days == 1
assert result.seconds == 13600
assert result.microseconds == 0
field = fields.TimeDelta(fields.TimeDelta.DAYS)
result = field.deserialize("-42")
assert isinstance(result, dt.timedelta)
assert result.days == -42
assert result.seconds == 0
assert result.microseconds == 0
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
result = field.deserialize(10**6 + 1)
assert isinstance(result, dt.timedelta)
assert result.days == 0
assert result.seconds == 1
assert result.microseconds == 1
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
result = field.deserialize(86400 * 10**6 + 1)
assert isinstance(result, dt.timedelta)
assert result.days == 1
assert result.seconds == 0
assert result.microseconds == 1
field = fields.TimeDelta()
result = field.deserialize(12.9)
assert isinstance(result, dt.timedelta)
assert result.days == 0
assert result.seconds == 12
assert result.microseconds == 900000
field = fields.TimeDelta(fields.TimeDelta.WEEKS)
result = field.deserialize(1)
assert isinstance(result, dt.timedelta)
assert result.days == 7
assert result.seconds == 0
assert result.microseconds == 0
field = fields.TimeDelta(fields.TimeDelta.HOURS)
result = field.deserialize(25)
assert isinstance(result, dt.timedelta)
assert result.days == 1
assert result.seconds == 3600
assert result.microseconds == 0
field = fields.TimeDelta(fields.TimeDelta.MINUTES)
result = field.deserialize(1441)
assert isinstance(result, dt.timedelta)
assert result.days == 1
assert result.seconds == 60
assert result.microseconds == 0
field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS)
result = field.deserialize(123456)
assert isinstance(result, dt.timedelta)
assert result.days == 0
assert result.seconds == 123
assert result.microseconds == 456000
total_microseconds_value = 322.0
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
result = field.deserialize(total_microseconds_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(microseconds=1).total_seconds()
assert math.isclose(
result.total_seconds() / unit_value, total_microseconds_value
)
total_microseconds_value = 322.12345
field = fields.TimeDelta(fields.TimeDelta.MICROSECONDS)
result = field.deserialize(total_microseconds_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(microseconds=1).total_seconds()
assert math.isclose(
result.total_seconds() / unit_value, math.floor(total_microseconds_value)
)
total_milliseconds_value = 322.223
field = fields.TimeDelta(fields.TimeDelta.MILLISECONDS)
result = field.deserialize(total_milliseconds_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(milliseconds=1).total_seconds()
assert math.isclose(
result.total_seconds() / unit_value, total_milliseconds_value
)
total_seconds_value = 322.223
field = fields.TimeDelta(fields.TimeDelta.SECONDS)
result = field.deserialize(total_seconds_value)
assert isinstance(result, dt.timedelta)
assert math.isclose(result.total_seconds(), total_seconds_value)
total_minutes_value = 322.223
field = fields.TimeDelta(fields.TimeDelta.MINUTES)
result = field.deserialize(total_minutes_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(minutes=1).total_seconds()
assert math.isclose(result.total_seconds() / unit_value, total_minutes_value)
total_hours_value = 322.223
field = fields.TimeDelta(fields.TimeDelta.HOURS)
result = field.deserialize(total_hours_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(hours=1).total_seconds()
assert math.isclose(result.total_seconds() / unit_value, total_hours_value)
total_days_value = 322.223
field = fields.TimeDelta(fields.TimeDelta.DAYS)
result = field.deserialize(total_days_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(days=1).total_seconds()
assert math.isclose(result.total_seconds() / unit_value, total_days_value)
total_weeks_value = 322.223
field = fields.TimeDelta(fields.TimeDelta.WEEKS)
result = field.deserialize(total_weeks_value)
assert isinstance(result, dt.timedelta)
unit_value = dt.timedelta(weeks=1).total_seconds()
assert math.isclose(result.total_seconds() / unit_value, total_weeks_value)
@pytest.mark.parametrize("in_value", ["", "badvalue", [], 9999999999])
def test_invalid_timedelta_field_deserialization(self, in_value):
field = fields.TimeDelta(fields.TimeDelta.DAYS)
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid period of time."
@pytest.mark.parametrize("format", (None, "%Y-%m-%d"))
def test_date_field_deserialization(self, format): # noqa: A002
field = fields.Date(format=format)
d = dt.date(2014, 8, 21)
iso_date = d.isoformat()
result = field.deserialize(iso_date)
assert type(result) is dt.date
assert_date_equal(result, d)
@pytest.mark.parametrize(
"in_value", ["", 123, [], dt.date(2014, 8, 21).strftime("%d-%m-%Y")]
)
def test_invalid_date_field_deserialization(self, in_value):
field = fields.Date()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
msg = "Not a valid date."
assert excinfo.value.args[0] == msg
def test_dict_field_deserialization(self):
data = {"foo": "bar"}
field = fields.Dict()
load = field.deserialize(data)
assert load == {"foo": "bar"}
# Check load is a distinct object
load["foo"] = "baz"
assert data["foo"] == "bar"
with pytest.raises(ValidationError) as excinfo:
field.deserialize("baddict")
assert excinfo.value.args[0] == "Not a valid mapping type."
def test_structured_dict_value_deserialization(self):
field = fields.Dict(values=fields.List(fields.Str))
assert field.deserialize({"foo": ["bar", "baz"]}) == {"foo": ["bar", "baz"]}
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"foo": [1, 2], "bar": "baz", "ham": ["spam"]})
assert excinfo.value.args[0] == {
"foo": {"value": {0: ["Not a valid string."], 1: ["Not a valid string."]}},
"bar": {"value": ["Not a valid list."]},
}
assert excinfo.value.valid_data == {"foo": [], "ham": ["spam"]}
def test_structured_dict_key_deserialization(self):
field = fields.Dict(keys=fields.Str)
assert field.deserialize({"foo": "bar"}) == {"foo": "bar"}
with pytest.raises(ValidationError) as excinfo:
field.deserialize({1: "bar", "foo": "baz"})
assert excinfo.value.args[0] == {1: {"key": ["Not a valid string."]}}
assert excinfo.value.valid_data == {"foo": "baz"}
def test_structured_dict_key_value_deserialization(self):
field = fields.Dict(
keys=fields.Str(
validate=[validate.Email(), validate.Regexp(r".*@test\.com$")]
),
values=fields.Decimal,
)
assert field.deserialize({"foo@test.com": 1}) == {
"foo@test.com": decimal.Decimal(1)
}
with pytest.raises(ValidationError) as excinfo:
field.deserialize({1: "bar"})
assert excinfo.value.args[0] == {
1: {"key": ["Not a valid string."], "value": ["Not a valid number."]}
}
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"foo@test.com": "bar"})
assert excinfo.value.args[0] == {
"foo@test.com": {"value": ["Not a valid number."]}
}
assert excinfo.value.valid_data == {}
with pytest.raises(ValidationError) as excinfo:
field.deserialize({1: 1})
assert excinfo.value.args[0] == {1: {"key": ["Not a valid string."]}}
assert excinfo.value.valid_data == {}
with pytest.raises(ValidationError) as excinfo:
field.deserialize({"foo": "bar"})
assert excinfo.value.args[0] == {
"foo": {
"key": [
"Not a valid email address.",
"String does not match expected pattern.",
],
"value": ["Not a valid number."],
}
}
assert excinfo.value.valid_data == {}
def test_url_field_deserialization(self):
field = fields.Url()
assert field.deserialize("https://duckduckgo.com") == "https://duckduckgo.com"
with pytest.raises(ValidationError) as excinfo:
field.deserialize("badurl")
assert excinfo.value.args[0][0] == "Not a valid URL."
# Relative URLS not allowed by default
with pytest.raises(ValidationError) as excinfo:
field.deserialize("/foo/bar")
assert excinfo.value.args[0][0] == "Not a valid URL."
# regression test for https://github.com/marshmallow-code/marshmallow/issues/1400
def test_url_field_non_list_validators(self):
field = fields.Url(validate=(validate.Length(min=16),))
with pytest.raises(ValidationError, match="Shorter than minimum length"):
field.deserialize("https://abc.def")
def test_relative_url_field_deserialization(self):
field = fields.Url(relative=True)
assert field.deserialize("/foo/bar") == "/foo/bar"
def test_url_field_schemes_argument(self):
field = fields.URL()
url = "ws://test.test"
with pytest.raises(ValidationError):
field.deserialize(url)
field2 = fields.URL(schemes={"http", "https", "ws"})
assert field2.deserialize(url) == url
def test_email_field_deserialization(self):
field = fields.Email()
assert field.deserialize("foo@bar.com") == "foo@bar.com"
with pytest.raises(ValidationError) as excinfo:
field.deserialize("invalidemail")
assert excinfo.value.args[0][0] == "Not a valid email address."
field = fields.Email(validate=[validate.Length(min=12)])
with pytest.raises(ValidationError) as excinfo:
field.deserialize("foo@bar.com")
assert excinfo.value.args[0][0] == "Shorter than minimum length 12."
# regression test for https://github.com/marshmallow-code/marshmallow/issues/1400
def test_email_field_non_list_validators(self):
field = fields.Email(validate=(validate.Length(min=9),))
with pytest.raises(ValidationError, match="Shorter than minimum length"):
field.deserialize("a@bc.com")
def test_function_field_deserialization_is_noop_by_default(self):
field = fields.Function(lambda x: None)
# Default is noop
assert field.deserialize("foo") == "foo"
assert field.deserialize(42) == 42
def test_function_field_deserialization_with_callable(self):
field = fields.Function(lambda x: None, deserialize=lambda val: val.upper())
assert field.deserialize("foo") == "FOO"
def test_function_field_deserialization_missing_with_length_validator(self):
field = fields.Function(
deserialize=lambda value: value.get("some-key", missing),
validate=validate.Length(min=0),
)
assert field.deserialize({}) is missing
def test_function_field_passed_deserialize_only_is_load_only(self):
field = fields.Function(deserialize=lambda val: val.upper())
assert field.load_only is True
def test_function_field_passed_deserialize_and_serialize_is_not_load_only(self):
field = fields.Function(
serialize=lambda val: val.lower(), deserialize=lambda val: val.upper()
)
assert field.load_only is False
def test_uuid_field_deserialization(self):
field = fields.UUID()
uuid_str = str(uuid.uuid4())
result = field.deserialize(uuid_str)
assert isinstance(result, uuid.UUID)
assert str(result) == uuid_str
uuid4 = uuid.uuid4()
result = field.deserialize(uuid4)
assert isinstance(result, uuid.UUID)
assert result == uuid4
uuid_bytes = b"]\xc7wW\x132O\xf9\xa5\xbe\x13\x1f\x02\x18\xda\xbf"
result = field.deserialize(uuid_bytes)
assert isinstance(result, uuid.UUID)
assert result.bytes == uuid_bytes
@pytest.mark.parametrize("in_value", ["malformed", 123, [], b"tooshort"])
def test_invalid_uuid_deserialization(self, in_value):
field = fields.UUID()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid UUID."
def test_ip_field_deserialization(self):
field = fields.IP()
ipv4_str = "140.82.118.3"
result = field.deserialize(ipv4_str)
assert isinstance(result, ipaddress.IPv4Address)
assert str(result) == ipv4_str
ipv6_str = "2a00:1450:4001:824::200e"
result = field.deserialize(ipv6_str)
assert isinstance(result, ipaddress.IPv6Address)
assert str(result) == ipv6_str
@pytest.mark.parametrize(
"in_value",
["malformed", 123, b"\x01\x02\03", "192.168", "192.168.0.1/24", "ff::aa:1::2"],
)
def test_invalid_ip_deserialization(self, in_value):
field = fields.IP()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid IP address."
def test_ipv4_field_deserialization(self):
field = fields.IPv4()
ipv4_str = "140.82.118.3"
result = field.deserialize(ipv4_str)
assert isinstance(result, ipaddress.IPv4Address)
assert str(result) == ipv4_str
@pytest.mark.parametrize(
"in_value",
[
"malformed",
123,
b"\x01\x02\03",
"192.168",
"192.168.0.1/24",
"2a00:1450:4001:81d::200e",
],
)
def test_invalid_ipv4_deserialization(self, in_value):
field = fields.IPv4()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid IPv4 address."
def test_ipv6_field_deserialization(self):
field = fields.IPv6()
ipv6_str = "2a00:1450:4001:824::200e"
result = field.deserialize(ipv6_str)
assert isinstance(result, ipaddress.IPv6Address)
assert str(result) == ipv6_str
def test_ipinterface_field_deserialization(self):
field = fields.IPInterface()
ipv4interface_str = "140.82.118.3/24"
result = field.deserialize(ipv4interface_str)
assert isinstance(result, ipaddress.IPv4Interface)
assert str(result) == ipv4interface_str
ipv6interface_str = "2a00:1450:4001:824::200e/128"
result = field.deserialize(ipv6interface_str)
assert isinstance(result, ipaddress.IPv6Interface)
assert str(result) == ipv6interface_str
@pytest.mark.parametrize(
"in_value",
[
"malformed",
123,
b"\x01\x02\03",
"192.168",
"192.168.0.1/33",
"ff::aa:1::2",
"2a00:1450:4001:824::200e/129",
],
)
def test_invalid_ipinterface_deserialization(self, in_value):
field = fields.IPInterface()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid IP interface."
def test_ipv4interface_field_deserialization(self):
field = fields.IPv4Interface()
ipv4interface_str = "140.82.118.3/24"
result = field.deserialize(ipv4interface_str)
assert isinstance(result, ipaddress.IPv4Interface)
assert str(result) == ipv4interface_str
@pytest.mark.parametrize(
"in_value",
[
"malformed",
123,
b"\x01\x02\03",
"192.168",
"192.168.0.1/33",
"2a00:1450:4001:81d::200e",
"2a00:1450:4001:824::200e/129",
],
)
def test_invalid_ipv4interface_deserialization(self, in_value):
field = fields.IPv4Interface()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid IPv4 interface."
def test_ipv6interface_field_deserialization(self):
field = fields.IPv6Interface()
ipv6interface_str = "2a00:1450:4001:824::200e/128"
result = field.deserialize(ipv6interface_str)
assert isinstance(result, ipaddress.IPv6Interface)
assert str(result) == ipv6interface_str
@pytest.mark.parametrize(
"in_value",
[
"malformed",
123,
b"\x01\x02\03",
"ff::aa:1::2",
"192.168.0.1",
"192.168.0.1/24",
"2a00:1450:4001:824::200e/129",
],
)
def test_invalid_ipv6interface_deserialization(self, in_value):
field = fields.IPv6Interface()
with pytest.raises(ValidationError) as excinfo:
field.deserialize(in_value)
assert excinfo.value.args[0] == "Not a valid IPv6 interface."
def test_enum_field_by_symbol_deserialization(self):
field = fields.Enum(GenderEnum)
assert field.deserialize("male") == GenderEnum.male
def test_enum_field_by_symbol_invalid_value(self):
field = fields.Enum(GenderEnum)
with pytest.raises(
ValidationError, match="Must be one of: male, female, non_binary."
):
field.deserialize("dummy")
def test_enum_field_by_symbol_not_string(self):
field = fields.Enum(GenderEnum)
with pytest.raises(ValidationError, match="Not a valid string."):
field.deserialize(12)
def test_enum_field_by_value_true_deserialization(self):
field = fields.Enum(HairColorEnum, by_value=True)
assert field.deserialize("black hair") == HairColorEnum.black
field2 = fields.Enum(GenderEnum, by_value=True)
assert field2.deserialize(1) == GenderEnum.male
def test_enum_field_by_value_field_deserialization(self):
field = fields.Enum(HairColorEnum, by_value=fields.String)
assert field.deserialize("black hair") == HairColorEnum.black
field2 = fields.Enum(GenderEnum, by_value=fields.Integer)
assert field2.deserialize(1) == GenderEnum.male
field3 = fields.Enum(DateEnum, by_value=fields.Date(format="%d/%m/%Y"))
assert field3.deserialize("29/02/2004") == DateEnum.date_1
def test_enum_field_by_value_true_invalid_value(self):
field = fields.Enum(HairColorEnum, by_value=True)
with pytest.raises(
ValidationError,
match="Must be one of: black hair, brown hair, blond hair, red hair.",
):
field.deserialize("dummy")
field2 = fields.Enum(GenderEnum, by_value=True)
with pytest.raises(ValidationError, match="Must be one of: 1, 2, 3."):
field2.deserialize(12)
def test_enum_field_by_value_field_invalid_value(self):
field = fields.Enum(HairColorEnum, by_value=fields.String)
with pytest.raises(
ValidationError,
match="Must be one of: black hair, brown hair, blond hair, red hair.",
):
field.deserialize("dummy")
field2 = fields.Enum(GenderEnum, by_value=fields.Integer)
with pytest.raises(ValidationError, match="Must be one of: 1, 2, 3."):
field2.deserialize(12)
field3 = fields.Enum(DateEnum, by_value=fields.Date(format="%d/%m/%Y"))
with pytest.raises(
ValidationError, match="Must be one of: 29/02/2004, 29/02/2008, 29/02/2012."
):
field3.deserialize("28/02/2004")
def test_enum_field_by_value_true_wrong_type(self):
field = fields.Enum(HairColorEnum, by_value=True)
with pytest.raises(
ValidationError,
match="Must be one of: black hair, brown hair, blond hair, red hair.",
):
field.deserialize("dummy")
field = fields.Enum(GenderEnum, by_value=True)
with pytest.raises(ValidationError, match="Must be one of: 1, 2, 3."):
field.deserialize(12)
def test_enum_field_by_value_field_wrong_type(self):
field = fields.Enum(HairColorEnum, by_value=fields.String)
with pytest.raises(ValidationError, match="Not a valid string."):
field.deserialize(12)
field = fields.Enum(GenderEnum, by_value=fields.Integer)
with pytest.raises(ValidationError, match="Not a valid integer."):
field.deserialize("dummy")
field = fields.Enum(DateEnum, by_value=fields.Date(format="%d/%m/%Y"))
with pytest.raises(ValidationError, match="Not a valid date."):
field.deserialize("30/02/2004")
def test_deserialization_function_must_be_callable(self):
with pytest.raises(TypeError):
fields.Function(lambda x: None, deserialize="notvalid")
def test_method_field_deserialization_is_noop_by_default(self):
class MiniUserSchema(Schema):
uppername = fields.Method("uppercase_name")
def uppercase_name(self, obj):
return obj.upper()
s = MiniUserSchema()
assert s.fields["uppername"].deserialize("steve") == "steve"
def test_deserialization_method(self):
class MiniUserSchema(Schema):
uppername = fields.Method("uppercase_name", deserialize="lowercase_name")
def uppercase_name(self, obj):
return obj.name.upper()
def lowercase_name(self, value):
return value.lower()
s = MiniUserSchema()
assert s.fields["uppername"].deserialize("STEVE") == "steve"
def test_deserialization_method_must_be_a_method(self):
class BadSchema(Schema):
uppername = fields.Method("uppercase_name", deserialize="lowercase_name")
with pytest.raises(AttributeError):
BadSchema()
def test_method_field_deserialize_only(self):
class MethodDeserializeOnly(Schema):
name = fields.Method(deserialize="lowercase_name")
def lowercase_name(self, value):
return value.lower()
assert MethodDeserializeOnly().load({"name": "ALEC"})["name"] == "alec"
def test_datetime_list_field_deserialization(self):
dtimes = dt.datetime.now(), dt.datetime.now(), dt.datetime.now(dt.timezone.utc)
dstrings = [each.isoformat() for each in dtimes]
field = fields.List(fields.DateTime())
result = field.deserialize(dstrings)
assert all(isinstance(each, dt.datetime) for each in result)
for actual, expected in zip(result, dtimes, strict=True):
assert_date_equal(actual, expected)
def test_list_field_deserialize_invalid_item(self):
field = fields.List(fields.DateTime)
with pytest.raises(ValidationError) as excinfo:
field.deserialize(["badvalue"])
assert excinfo.value.args[0] == {0: ["Not a valid datetime."]}
field = fields.List(fields.Str())
with pytest.raises(ValidationError) as excinfo:
field.deserialize(["good", 42])
assert excinfo.value.args[0] == {1: ["Not a valid string."]}
def test_list_field_deserialize_multiple_invalid_items(self):
field = fields.List(
fields.Int(
validate=validate.Range(10, 20, error="Value {input} not in range")
)
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize([10, 5, 25])
assert len(excinfo.value.args[0]) == 2
assert excinfo.value.args[0][1] == ["Value 5 not in range"]
assert excinfo.value.args[0][2] == ["Value 25 not in range"]
@pytest.mark.parametrize("value", ["notalist", 42, {}])
def test_list_field_deserialize_value_that_is_not_a_list(self, value):
field = fields.List(fields.Str())
with pytest.raises(ValidationError) as excinfo:
field.deserialize(value)
assert excinfo.value.args[0] == "Not a valid list."
def test_datetime_int_tuple_field_deserialization(self):
dtime = dt.datetime.now()
data = dtime.isoformat(), 42
field = fields.Tuple([fields.DateTime(), fields.Integer()])
result = field.deserialize(data)
assert isinstance(result, tuple)
assert len(result) == 2
for val, type_, true_val in zip(
result, (dt.datetime, int), (dtime, 42), strict=True
):
assert isinstance(val, type_)
assert val == true_val
def test_tuple_field_deserialize_invalid_item(self):
field = fields.Tuple([fields.DateTime])
with pytest.raises(ValidationError) as excinfo:
field.deserialize(["badvalue"])
assert excinfo.value.args[0] == {0: ["Not a valid datetime."]}
field = fields.Tuple([fields.Str(), fields.Integer()])
with pytest.raises(ValidationError) as excinfo:
field.deserialize(["good", "bad"])
assert excinfo.value.args[0] == {1: ["Not a valid integer."]}
def test_tuple_field_deserialize_multiple_invalid_items(self):
validator = validate.Range(10, 20, error="Value {input} not in range")
field = fields.Tuple(
[
fields.Int(validate=validator),
fields.Int(validate=validator),
fields.Int(validate=validator),
]
)
with pytest.raises(ValidationError) as excinfo:
field.deserialize([10, 5, 25])
assert len(excinfo.value.args[0]) == 2
assert excinfo.value.args[0][1] == ["Value 5 not in range"]
assert excinfo.value.args[0][2] == ["Value 25 not in range"]
@pytest.mark.parametrize("value", ["notalist", 42, {}])
def test_tuple_field_deserialize_value_that_is_not_a_collection(self, value):
field = fields.Tuple([fields.Str()])
with pytest.raises(ValidationError) as excinfo:
field.deserialize(value)
assert excinfo.value.args[0] == "Not a valid tuple."
def test_tuple_field_deserialize_invalid_length(self):
field = fields.Tuple([fields.Str(), fields.Str()])
with pytest.raises(ValidationError) as excinfo:
field.deserialize([42])
assert excinfo.value.args[0] == "Length must be 2."
def test_constant_field_deserialization(self):
field = fields.Constant("something")
assert field.deserialize("whatever") == "something"
def test_constant_is_always_included_in_deserialized_data(self):
class MySchema(Schema):
foo = fields.Constant(42)
sch = MySchema()
assert sch.load({})["foo"] == 42
assert sch.load({"foo": 24})["foo"] == 42
def test_field_deserialization_with_user_validator_function(self):
field = fields.String(validate=predicate(lambda s: s.lower() == "valid"))
assert field.deserialize("Valid") == "Valid"
with pytest.raises(ValidationError) as excinfo:
field.deserialize("invalid")
assert excinfo.value.args[0][0] == "Invalid value."
assert type(excinfo.value) is ValidationError
def test_field_deserialization_with_user_validator_that_raises_error_with_list(
self,
):
def validator(val):
raise ValidationError(["err1", "err2"])
class MySchema(Schema):
foo = fields.Raw(validate=validator)
errors = MySchema().validate({"foo": 42})
assert errors["foo"] == ["err1", "err2"]
def test_field_deserialization_with_validator_with_nonascii_input(self):
def validate(val):
raise ValidationError("oops")
field = fields.String(validate=validate)
with pytest.raises(ValidationError) as excinfo:
field.deserialize("привет")
assert type(excinfo.value) is ValidationError
def test_field_deserialization_with_user_validators(self):
validators_gen = (
func
for func in (
predicate(lambda s: s.lower() == "valid"),
predicate(lambda s: s.lower()[::-1] == "dilav"),
)
)
m_colletion_type = [
fields.String(
validate=[
predicate(lambda s: s.lower() == "valid"),
predicate(lambda s: s.lower()[::-1] == "dilav"),
]
),
fields.String(
validate=(
predicate(lambda s: s.lower() == "valid"),
predicate(lambda s: s.lower()[::-1] == "dilav"),
)
),
fields.String(validate=validators_gen),
]
for field in m_colletion_type:
assert field.deserialize("Valid") == "Valid"
with pytest.raises(ValidationError, match="Invalid value."):
field.deserialize("invalid")
@pytest.mark.parametrize(
("field", "value"),
(
pytest.param(fields.List(fields.String()), ["foo", "bar"], id="List"),
pytest.param(
fields.Tuple((fields.String(), fields.Integer())),
("foo", 42),
id="Tuple",
),
pytest.param(fields.String(), "valid", id="String"),
pytest.param(fields.UUID(), uuid.uuid4(), id="UUID"),
pytest.param(fields.Integer(), 42, id="Integer"),
pytest.param(fields.Float(), 42.3, id="Float"),
pytest.param(fields.Decimal(), decimal.Decimal("42.3"), id="Decimal"),
pytest.param(fields.Boolean(), True, id="Boolean"),
pytest.param(fields.DateTime(), dt.datetime(2014, 8, 21), id="DateTime"),
pytest.param(fields.Time(), dt.time(10, 15), id="Time"),
pytest.param(fields.Date(), dt.date(2014, 8, 21), id="Date"),
pytest.param(fields.TimeDelta(), dt.timedelta(days=1), id="TimeDelta"),
pytest.param(fields.Dict(), {"foo": "bar"}, id="Dict"),
pytest.param(fields.Url(), "https://mallow.com", id="Url"),
pytest.param(fields.Email(), "barbara37@example.net", id="Email"),
pytest.param(fields.IP(), ipaddress.IPv4Address("67.60.134.65"), id="IP"),
pytest.param(
fields.IPv4(), ipaddress.IPv4Address("55.81.158.106"), id="IPv4"
),
pytest.param(
fields.IPv6(),
ipaddress.IPv6Address("89f4:41b6:b97e:ad48:8480:1fda:a811:d1a5"),
id="IPv6",
),
pytest.param(fields.Enum(GenderEnum), GenderEnum.non_binary, id="Enum"),
),
)
def test_fields_accept_internal_types(self, field, value):
assert field.deserialize(value) == value
# No custom deserialization behavior, so a dict is returned
| TestFieldDeserialization |
python | kamyu104__LeetCode-Solutions | Python/final-array-state-after-k-multiplication-operations-ii.py | {
"start": 3102,
"end": 4000
} | class ____(object):
def getFinalState(self, nums, k, multiplier):
"""
:type nums: List[int]
:type k: int
:type multiplier: int
:rtype: List[int]
"""
MOD = 10**9+7
if multiplier == 1:
return nums
min_heap = [(x, i) for i, x in enumerate(nums)]
heapq.heapify(min_heap)
mx = max(nums)
for k in reversed(xrange(1, k+1)):
if min_heap[0][0]*multiplier > mx:
break
x, i = heapq.heappop(min_heap)
heapq.heappush(min_heap, (x*multiplier, i))
else:
k = 0
vals = sorted(min_heap)
q, r = divmod(k, len(nums))
m = pow(multiplier, q, MOD)
result = [0]*len(nums)
for idx, (x, i) in enumerate(vals):
result[i] = x*m*(multiplier if idx < r else 1)%MOD
return result
| Solution3 |
python | django__django | tests/foreign_object/models/empty_join.py | {
"start": 2842,
"end": 3094
} | class ____(StartsWithRelation):
"""
This model is designed to yield no join conditions and
raise an exception in ``Join.as_sql()``.
"""
def get_extra_restriction(self, alias, related_alias):
return None
| BrokenContainsRelation |
python | huggingface__transformers | src/transformers/models/vilt/modeling_vilt.py | {
"start": 50102,
"end": 53502
} | class ____(ViltPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.vilt = ViltModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[TokenClassifierOutput, tuple[torch.FloatTensor]]:
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vilt(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
text_input_size = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output[:, :text_input_size])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# move labels to correct device to enable PP
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"ViltForImageAndTextRetrieval",
"ViltForImagesAndTextClassification",
"ViltForTokenClassification",
"ViltForMaskedLM",
"ViltForQuestionAnswering",
"ViltLayer",
"ViltModel",
"ViltPreTrainedModel",
]
| ViltForTokenClassification |
python | pytorch__pytorch | torch/_higher_order_ops/triton_kernel_wrap.py | {
"start": 42687,
"end": 56093
} | class ____(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("triton_kernel_wrapper_functional", cacheable=True)
def __call__(
self,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
tensors_to_clone: list[str],
) -> dict[str, Any]:
return super().__call__(
kernel_idx=kernel_idx,
constant_args_idx=constant_args_idx,
grid=grid,
tma_descriptor_metadata=tma_descriptor_metadata,
kwargs=kwargs,
tensors_to_clone=tensors_to_clone,
)
triton_kernel_wrapper_functional = TritonKernelWrapperFunctional()
@triton_kernel_wrapper_mutation.py_impl(DispatchKey.CompositeExplicitAutograd)
def triton_kernel_wrapper_mutation_dense(
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
) -> None:
from torch._inductor.codegen.wrapper import user_defined_kernel_grid_fn_code
kernel = kernel_side_table.get_kernel(kernel_idx)
constant_args = kernel_side_table.get_constant_args(constant_args_idx)
if len(grid) == 1:
grid_fn = grid[0]
else:
fn_name, code = user_defined_kernel_grid_fn_code(
# pyrefly: ignore [missing-attribute]
kernel.fn.__name__,
# pyrefly: ignore [missing-attribute]
kernel.configs,
grid,
)
namespace: dict[str, Any] = {}
exec(code, namespace)
grid_fn = namespace[fn_name]
if tma_descriptor_metadata:
# as we need to launch the kernel here, we "unwrap" the
# tma_descriptor_metadata, create the TMA descriptors
# from it, and replace the tensors in the kwargs by the
# corresponding TMA descriptors before launching
kwargs = kwargs.copy()
for k, v in tma_descriptor_metadata.items():
tensor = kwargs[k]
if (exp_meta := maybe_unpack_tma_experimental_metadata(v)) is not None:
from triton.tools.experimental_descriptor import ( # noqa: F401
create_1d_tma_descriptor,
create_2d_tma_descriptor,
)
dims, block_dims, element_size = exp_meta
create_tma_descriptor = (
create_1d_tma_descriptor
if len(dims) == 1
else create_2d_tma_descriptor
)
kwargs[k] = create_tma_descriptor(
tensor.data_ptr(),
*dims,
*block_dims,
element_size,
)
else:
stable_meta = maybe_unpack_tma_stable_metadata(v)
assert stable_meta is not None
from triton.tools.tensor_descriptor import TensorDescriptor
block_shape = stable_meta[0]
# pyrefly: ignore # bad-argument-type
kwargs[k] = TensorDescriptor.from_tensor(tensor, block_shape)
# move as many positional arguments from dicts to args as we
# can to circumvent the bug with the kwargs and pre_/post_hook:
# https://github.com/triton-lang/triton/issues/5082
# TODO: remove this when the Triton issue above is fixed
args = []
# copy kwargs and constant_args here to
# avoid mutating the original inputs
kwargs = kwargs.copy()
constant_args = constant_args.copy()
# pyrefly: ignore [missing-attribute]
for name in kernel.arg_names:
if name in kwargs:
args.append(kwargs.pop(name))
elif name in constant_args:
args.append(constant_args.pop(name))
else:
break
# pyrefly: ignore [index-error]
kernel[grid_fn](*args, **kwargs, **constant_args)
@triton_kernel_wrapper_mutation.py_impl(FakeTensorMode)
def triton_kernel_wrapper_mutation_fake_tensor_mode(
mode: FakeTensorMode,
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
) -> None:
with mode:
return None
@triton_kernel_wrapper_mutation.py_impl(DispatchKey.Meta)
def _(
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
) -> None:
return None
def trace_triton_kernel_wrapper(
proxy_mode: ProxyTorchDispatchMode,
func_overload: Callable[..., Any],
node_args: dict[str, Any],
) -> Optional[dict[str, Any]]:
with disable_proxy_modes_tracing():
out = func_overload(**node_args)
proxy_args = pytree.tree_map(
proxy_mode.tracer.unwrap_proxy, # type: ignore[union-attr]
node_args,
)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function",
func_overload,
(),
proxy_args,
name=func_overload.__name__ + "_proxy",
)
ret = track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
return ret
@triton_kernel_wrapper_mutation.py_impl(ProxyTorchDispatchMode)
def triton_kernel_wrapper_mutation_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode,
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
) -> None:
trace_triton_kernel_wrapper(
mode,
triton_kernel_wrapper_mutation,
{
"kernel_idx": kernel_idx,
"constant_args_idx": constant_args_idx,
"grid": grid,
"tma_descriptor_metadata": tma_descriptor_metadata,
"kwargs": kwargs,
},
)
return None
def get_mutated_tensors(
kernel_idx: int,
constant_args_idx: int,
kwargs: dict[str, Any],
tma_descriptor_metadata: TMADescriptorMetadata,
) -> list[str]:
kernel = kernel_side_table.get_kernel(kernel_idx)
constant_args = kernel_side_table.get_constant_args(constant_args_idx)
return identify_mutated_tensors(
kernel, {**kwargs, **constant_args}, tma_descriptor_metadata
)
@triton_kernel_wrapper_mutation.py_functionalize_impl
def triton_kernel_wrapper_mutation_functionalize(
ctx: "BaseFunctionalizeAPI",
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
) -> None:
unwrapped_kwargs = ctx.unwrap_tensors(kwargs) # type: ignore[arg-type]
# TODO(oulgen): Preexisting bug, if two kernel inputs are views of each
# other, and one gets mutated in kernel, and later another gets mutated,
# they are no longer equal. Fix this by graph breaking on this condition
# earlier in dynamo.
tensors_to_clone = get_mutated_tensors(
kernel_idx, constant_args_idx, unwrapped_kwargs, tma_descriptor_metadata
)
with ctx.redispatch_to_next():
unwrapped_outputs = triton_kernel_wrapper_functional(
kernel_idx=kernel_idx,
constant_args_idx=constant_args_idx,
grid=grid,
tma_descriptor_metadata=tma_descriptor_metadata,
kwargs=unwrapped_kwargs,
tensors_to_clone=tensors_to_clone,
)
assert set(unwrapped_outputs.keys()).issubset(set(kwargs.keys()))
for key, output_arg in unwrapped_outputs.items():
if not isinstance(output_arg, Tensor):
continue
input_arg = kwargs[key]
assert isinstance(input_arg, Tensor)
ctx.replace(input_arg, output_arg)
# indicate that above replace is hidden from autograd
ctx.mark_mutation_hidden_from_autograd(input_arg)
ctx.commit_update(input_arg)
ctx.sync(input_arg)
return None
@triton_kernel_wrapper_functional.py_impl(DispatchKey.CompositeExplicitAutograd)
def triton_kernel_wrapper_functional_dense(
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
tensors_to_clone: list[str],
) -> dict[str, Any]:
# TODO(oulgen): For performance reasons, we want to ensure that these
# `clone_preserve_strides` calls are never executed at runtime
# (inductor should always optimize them away).
# Requires https://github.com/pytorch/pytorch/issues/109240
kwargs = {
key: (clone_preserve_strides(val) if key in tensors_to_clone else val)
for key, val in kwargs.items()
}
triton_kernel_wrapper_mutation(
kernel_idx=kernel_idx,
constant_args_idx=constant_args_idx,
grid=grid,
tma_descriptor_metadata=tma_descriptor_metadata,
kwargs=kwargs,
)
return {key: val for key, val in kwargs.items() if key in tensors_to_clone}
@triton_kernel_wrapper_functional.py_impl(FakeTensorMode)
def triton_kernel_wrapper_functional_fake_tensor_mode(
mode: FakeTensorMode,
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
tensors_to_clone: list[str],
) -> dict[str, Any]:
# TODO(oulgen): For performance reasons, we want to ensure that these
# `clone_preserve_strides` calls are never executed at runtime
# (inductor should always optimize them away).
# Requires https://github.com/pytorch/pytorch/issues/109240
with mode:
return {
key: clone_preserve_strides(val)
for key, val in kwargs.items()
if key in tensors_to_clone
}
@triton_kernel_wrapper_functional.py_impl(ProxyTorchDispatchMode)
def triton_kernel_wrapper_functional_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode,
*,
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
tensors_to_clone: list[str],
) -> dict[str, Any]:
ret = trace_triton_kernel_wrapper(
mode,
triton_kernel_wrapper_functional,
{
"kernel_idx": kernel_idx,
"constant_args_idx": constant_args_idx,
"grid": grid,
"tma_descriptor_metadata": tma_descriptor_metadata,
"kwargs": kwargs,
"tensors_to_clone": tensors_to_clone,
},
)
assert ret is not None
return ret
@triton_kernel_wrapper_functional.py_functionalize_impl
def triton_kernel_wrapper_functional_functionalize(
ctx: "BaseFunctionalizeAPI",
kernel_idx: int,
constant_args_idx: int,
grid: list["TritonGridType"],
tma_descriptor_metadata: TMADescriptorMetadata,
kwargs: dict[str, Any],
tensors_to_clone: list[str],
) -> dict[str, Any]:
unwrapped_kwargs = ctx.unwrap_tensors(kwargs) # type: ignore[arg-type]
with ctx.redispatch_to_next():
outputs = triton_kernel_wrapper_functional(
kernel_idx=kernel_idx,
constant_args_idx=constant_args_idx,
grid=grid,
tma_descriptor_metadata=tma_descriptor_metadata,
kwargs=unwrapped_kwargs,
tensors_to_clone=tensors_to_clone,
)
return ctx.wrap_tensors(outputs) # type: ignore[return-value,arg-type]
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined]
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined]
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.ADInplaceOrView)
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.BackendSelect)
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined]
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined]
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCUDA)
triton_kernel_wrapper_mutation.fallthrough(DispatchKey.AutogradCPU)
triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonDispatcher) # type: ignore[attr-defined]
triton_kernel_wrapper_functional.fallthrough(DispatchKey.PythonTLSSnapshot) # type: ignore[attr-defined]
triton_kernel_wrapper_functional.fallthrough(DispatchKey.ADInplaceOrView)
triton_kernel_wrapper_functional.fallthrough(DispatchKey.BackendSelect)
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCPU) # type: ignore[attr-defined]
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutocastCUDA) # type: ignore[attr-defined]
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA)
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCUDA)
triton_kernel_wrapper_functional.fallthrough(DispatchKey.AutogradCPU)
# Adds SAC support for triton ops
redirect_to_mode(triton_kernel_wrapper_mutation, _CachingTorchDispatchMode)
redirect_to_mode(triton_kernel_wrapper_mutation, _CachedTorchDispatchMode)
###############################################################################
# The "TritonHOPifier": a class that transforms a call to a triton kernel into
# a call to the triton_kernel_wrapper_mutation HOP.
| TritonKernelWrapperFunctional |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-zulip/llama_index/readers/zulip/base.py | {
"start": 228,
"end": 2420
} | class ____(BaseReader):
"""Zulip reader."""
def __init__(
self,
zulip_email: str,
zulip_domain: str,
earliest_date: Optional[datetime] = None,
latest_date: Optional[datetime] = None,
) -> None:
import zulip
"""Initialize with parameters."""
# Read the Zulip token from the environment variable
zulip_token = os.environ.get("ZULIP_TOKEN")
if zulip_token is None:
raise ValueError("ZULIP_TOKEN environment variable not set.")
# Initialize Zulip client with provided parameters
self.client = zulip.Client(
api_key=zulip_token, email=zulip_email, site=zulip_domain
)
def _read_stream(self, stream_name: str, reverse_chronological: bool) -> str:
"""Read a stream."""
params = {
"narrow": [{"operator": "stream", "operand": stream_name}],
"anchor": "newest",
"num_before": 100,
"num_after": 0,
}
response = self.client.get_messages(params)
messages = response["messages"]
if reverse_chronological:
messages.reverse()
return " ".join([message["content"] for message in messages])
def load_data(
self, streams: List[str], reverse_chronological: bool = True
) -> List[Document]:
"""Load data from the input streams."""
# Load data logic here
data = []
for stream_name in streams:
stream_content = self._read_stream(stream_name, reverse_chronological)
data.append(
Document(text=stream_content, extra_info={"stream": stream_name})
)
return data
def get_all_streams(self) -> list:
# Fetch all streams
response = self.client.get_streams()
streams_data = response["streams"]
# Collect the stream IDs
return [stream["name"] for stream in streams_data]
if __name__ == "__main__":
reader = ZulipReader(
zulip_email="ianita-bot@plurigrid.zulipchat.com",
zulip_domain="plurigrid.zulipchat.com",
)
logging.info(reader.load_data(reader.get_all_streams()))
| ZulipReader |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass10.py | {
"start": 410,
"end": 508
} | class ____(type):
@classmethod
def meta_method(cls) -> None: ...
MyMeta.meta_method()
| MyMeta |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_use_orig_params.py | {
"start": 28508,
"end": 33753
} | class ____(FSDPTest):
"""Tests original parameter access."""
@property
def world_size(self):
# Force a world size of 2 since the tests hard code to the FSDP
# sharding strategy to check sharded parameter parity
return 2
@skip_if_lt_x_gpu(2)
def test_access_params_after_forward(self):
"""
Tests that accessing the original parameters after the forward but
before the backward. Notably, this is not supported when
``use_orig_params=False``. However, for ``True``, FSDP exposes the
(flattened) sharded original parameters, making it possible.
"""
self.run_subtests(
{
"sharding_strategy": [
ShardingStrategy.NO_SHARD,
ShardingStrategy.FULL_SHARD,
ShardingStrategy.SHARD_GRAD_OP,
],
},
self._test_access_params_after_forward,
)
def _test_access_params_after_forward(
self,
sharding_strategy: ShardingStrategy,
):
# NOTE: This test needs to be changed if the FSDP sharding algorithm
# changes. It is still valuable until such a change to sanity check the
# `use_orig_params=True` implementation.
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(42)
# 5 * 5 = 25 numel -> pad to 26 -> 13 on each rank
self.lin1 = nn.Linear(5, 5, bias=False)
# 5 * 7 + (1) + 7 = 43 numel -> pad to 44 -> 22 on each rank,
# where the (1) is from intra-`FlatParameter` alignment padding
# 22 of weight on rank 0; 13 of weight, 1 alignment padding,
# and 7 of bias on rank 1
self.lin2 = nn.Linear(5, 7)
def forward(self, x: torch.Tensor) -> torch.Tensor:
z = self.lin1(x)
z = nn.functional.relu(z)
z = self.lin2(z)
return z
def get_input(self, device: torch.device) -> tuple[torch.Tensor, ...]:
return (torch.randn((2, 5)).to(device),)
def get_loss(self, inp, out):
return out.sum()
def check_parameter_parity(
ddp_model: DDP, fsdp_model: FSDP, between_fwd_and_bwd: bool
):
assert self.rank in (
0,
1,
), f"Expects world size of 2 but got {self.world_size}"
for (n1, p1), (n2, p2) in zip(
ddp_model.module.named_parameters(),
fsdp_model.named_parameters(),
):
self.assertEqual(n1, clean_tensor_name(n2))
if sharding_strategy == ShardingStrategy.NO_SHARD:
# For `NO_SHARD`, do nothing since the original parameters
# are unflattened
pass
elif (
between_fwd_and_bwd
and sharding_strategy in NO_RESHARD_AFTER_FORWARD_STRATEGIES
):
# For no reshard after forward strategies, do nothing since
# FSDP did not use sharded views after forward
pass
# Otherwise, case on the parameter (see the model definition)
elif n1 == "lin1.weight":
if self.rank == 0:
p1 = p1.flatten()[:13]
elif self.rank == 1:
p1 = p1.flatten()[13:]
elif n1 == "lin2.weight":
if self.rank == 0:
p1 = p1.flatten()[:22]
elif self.rank == 1:
p1 = p1.flatten()[22:]
elif n1 == "lin2.bias":
if self.rank == 0:
p1 = torch.empty(0, device=p1.device)
elif self.rank == 1:
p1 = p1.flatten()
torch.testing.assert_close(p1, p2)
ddp_model = DDP(Model().to(device=device_type), device_ids=[self.rank])
fsdp_model = FSDP(
Model().to(device=device_type),
sharding_strategy=sharding_strategy,
auto_wrap_policy=always_wrap_policy,
use_orig_params=True,
)
LR = 1e-2
ddp_optim = torch.optim.Adam(ddp_model.parameters(), lr=LR)
fsdp_optim = torch.optim.Adam(fsdp_model.parameters(), lr=LR)
device = torch.device(device_type)
inp = fsdp_model.get_input(device)
ddp_out = ddp_model(*inp)
fsdp_out = fsdp_model(*inp)
check_parameter_parity(ddp_model, fsdp_model, True)
ddp_loss = ddp_model.module.get_loss(inp, ddp_out)
fsdp_loss = fsdp_model.get_loss(inp, fsdp_out)
ddp_loss.backward()
fsdp_loss.backward()
ddp_optim.step()
fsdp_optim.step()
check_parameter_parity(ddp_model, fsdp_model, False)
inp = fsdp_model.get_input(device)
ddp_out = ddp_model(*inp)
fsdp_out = fsdp_model(*inp)
check_parameter_parity(ddp_model, fsdp_model, True)
| TestFSDPUseOrigParamsParamAccess |
python | pandas-dev__pandas | pandas/tests/series/methods/test_truncate.py | {
"start": 148,
"end": 2307
} | class ____:
def test_truncate_datetimeindex_tz(self):
# GH 9243
idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific")
s = Series(range(len(idx)), index=idx)
with pytest.raises(TypeError, match="Cannot compare tz-naive"):
# GH#36148 as of 2.0 we require tzawareness compat
s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4))
lb = idx[1]
ub = idx[3]
result = s.truncate(lb.to_pydatetime(), ub.to_pydatetime())
expected = Series([1, 2, 3], index=idx[1:4])
tm.assert_series_equal(result, expected)
def test_truncate_periodindex(self):
# GH 17717
idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
series1 = Series([1, 2, 3], index=idx1)
result1 = series1.truncate(after="2017-09-02")
expected_idx1 = pd.PeriodIndex(
[pd.Period("2017-09-02"), pd.Period("2017-09-02")]
)
tm.assert_series_equal(result1, Series([1, 2], index=expected_idx1))
idx2 = pd.PeriodIndex(
[pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")]
)
series2 = Series([1, 2, 3], index=idx2)
result2 = series2.sort_index().truncate(after="2017-09-02")
expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")])
tm.assert_series_equal(result2, Series([2], index=expected_idx2))
def test_truncate_one_element_series(self):
# GH 35544
series = Series([0.1], index=pd.DatetimeIndex(["2020-08-04"]))
before = pd.Timestamp("2020-08-02")
after = pd.Timestamp("2020-08-04")
result = series.truncate(before=before, after=after)
# the input Series and the expected Series are the same
tm.assert_series_equal(result, series)
def test_truncate_index_only_one_unique_value(self):
# GH 42365
obj = Series(0, index=date_range("2021-06-30", "2021-06-30")).repeat(5)
truncated = obj.truncate("2021-06-28", "2021-07-01")
tm.assert_series_equal(truncated, obj)
| TestTruncate |
python | jazzband__tablib | tests/test_tablib.py | {
"start": 60283,
"end": 61908
} | class ____(BaseTestCase):
def test_latex_export(self):
"""LaTeX export"""
expected = """\
% Note: add \\usepackage{booktabs} to your preamble
%
\\begin{table}[!htbp]
\\centering
\\caption{Founders}
\\begin{tabular}{lrr}
\\toprule
first\\_name & last\\_name & gpa \\\\
\\cmidrule(r){1-1} \\cmidrule(lr){2-2} \\cmidrule(l){3-3}
John & Adams & 90 \\\\
George & Washington & 67 \\\\
Thomas & Jefferson & 50 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
"""
output = self.founders.latex
self.assertEqual(output, expected)
def test_latex_export_empty_dataset(self):
self.assertIsNotNone(tablib.Dataset().latex)
def test_latex_export_no_headers(self):
d = tablib.Dataset()
d.append(('one', 'two', 'three'))
self.assertIn('one', d.latex)
def test_latex_export_caption(self):
d = tablib.Dataset()
d.append(('one', 'two', 'three'))
self.assertNotIn('caption', d.latex)
d.title = 'Title'
self.assertIn('\\caption{Title}', d.latex)
def test_latex_export_none_values(self):
headers = ['foo', None, 'bar']
d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
output = d.latex
self.assertIn('foo', output)
self.assertNotIn('None', output)
def test_latex_escaping(self):
d = tablib.Dataset(['~', '^'])
output = d.latex
self.assertNotIn('~', output)
self.assertIn('textasciitilde', output)
self.assertNotIn('^', output)
self.assertIn('textasciicircum', output)
| LatexTests |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 10179,
"end": 10606
} | class ____(IntervalProperty):
"""Size (diameter) of a point mark, in points, with scaling by area."""
_default_range = 2, 8 # TODO use rcparams?
def _forward(self, values):
"""Square native values to implement linear scaling of point area."""
return np.square(values)
def _inverse(self, values):
"""Invert areal values back to point diameter."""
return np.sqrt(values)
| PointSize |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/deprecated2.py | {
"start": 311,
"end": 406
} | class ____: ...
# This should generate an error if reportDeprecated is enabled.
ClassA()
| ClassA |
python | kamyu104__LeetCode-Solutions | Python/super-ugly-number.py | {
"start": 3009,
"end": 3646
} | class ____(object):
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
ugly_number = 0
heap = []
heapq.heappush(heap, 1)
for p in primes:
heapq.heappush(heap, p)
for _ in xrange(n):
ugly_number = heapq.heappop(heap)
for i in xrange(len(primes)):
if ugly_number % primes[i] == 0:
for j in xrange(i + 1):
heapq.heappush(heap, ugly_number * primes[j])
break
return ugly_number
| Solution5 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 8321,
"end": 8612
} | class ____(graphene.Interface):
"""Interface indicating that a run failed to terminate."""
run = graphene.NonNull(GrapheneRun)
message = graphene.NonNull(graphene.String)
class Meta:
name = "TerminatePipelineExecutionFailure"
| GrapheneTerminatePipelineExecutionFailure |
python | pytest-dev__pytest-rerunfailures | src/pytest_rerunfailures.py | {
"start": 13702,
"end": 14183
} | class ____(StatusDB):
def __init__(self):
super().__init__()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(1)
def _sock_recv(self, conn) -> str:
buf = b""
while True:
b = conn.recv(1)
if b == self.delim:
break
buf += b
return buf.decode()
def _sock_send(self, conn, msg: str):
conn.send(msg.encode() + self.delim)
| SocketDB |
python | walkccc__LeetCode | solutions/1034. Coloring A Border/1034.py | {
"start": 0,
"end": 1050
} | class ____:
def colorBorder(
self,
grid: list[list[int]],
r0: int,
c0: int,
color: int
) -> list[list[int]]:
def dfs(i: int, j: int, startColor: int) -> None:
if i < 0 or i == len(grid) or j < 0 or j == len(grid[0]):
return
if grid[i][j] != startColor:
return
grid[i][j] = -startColor
dfs(i + 1, j, startColor)
dfs(i - 1, j, startColor)
dfs(i, j + 1, startColor)
dfs(i, j - 1, startColor)
# If this cell is already on the boarder, it must be painted later.
if i == 0 or i == len(grid) - 1 or j == 0 or j == len(grid[0]) - 1:
return
if (abs(grid[i + 1][j]) == startColor and
abs(grid[i - 1][j]) == startColor and
abs(grid[i][j + 1]) == startColor and
abs(grid[i][j - 1]) == startColor):
grid[i][j] = startColor
dfs(r0, c0, grid[r0][c0])
for i, row in enumerate(grid):
for j, num in enumerate(row):
if num < 0:
grid[i][j] = color
return grid
| Solution |
python | pappasam__jedi-language-server | jedi_language_server/notebook_utils.py | {
"start": 1765,
"end": 8188
} | class ____:
"""Maps positions between individual notebook cells and the concatenated notebook document."""
def __init__(
self,
notebook_document: NotebookDocument,
cells: List[TextDocument],
):
self._document = notebook_document
self._cells = cells
# Construct helper data structures.
self._cell_by_uri: Dict[str, TextDocument] = {}
self._cell_line_range_by_uri: Dict[str, range] = {}
start_line = 0
for index, cell in enumerate(self._cells):
end_line = start_line + len(cell.lines)
self._cell_by_uri[cell.uri] = cell
self._cell_line_range_by_uri[cell.uri] = range(
start_line, end_line
)
start_line = end_line
@property
def notebook_source(self) -> str:
"""Concatenated notebook source."""
return "\n".join(cell.source for cell in self._cells)
@property
def notebook_uri(self) -> str:
"""The notebook document's URI."""
return self._document.uri
def notebook_position(
self, cell_uri: str, cell_position: Position
) -> Position:
"""Convert a cell position to a concatenated notebook position."""
line = (
self._cell_line_range_by_uri[cell_uri].start + cell_position.line
)
return Position(line=line, character=cell_position.character)
def notebook_range(self, cell_uri: str, cell_range: Range) -> Range:
"""Convert a cell range to a concatenated notebook range."""
start = self.notebook_position(cell_uri, cell_range.start)
end = self.notebook_position(cell_uri, cell_range.end)
return Range(start=start, end=end)
def cell_position(
self, notebook_position: Position
) -> Optional[DocumentPosition]:
"""Convert a concatenated notebook position to a cell position."""
for cell in self._cells:
line_range = self._cell_line_range_by_uri[cell.uri]
if notebook_position.line in line_range:
line = notebook_position.line - line_range.start
return DocumentPosition(
uri=cell.uri,
position=Position(
line=line, character=notebook_position.character
),
)
return None
def cell_range(self, notebook_range: Range) -> Optional[Location]:
"""Convert a concatenated notebook range to a cell range.
Returns a `Location` to identify the cell that the range is in.
"""
start = self.cell_position(notebook_range.start)
if start is None:
return None
end = self.cell_position(notebook_range.end)
if end is None:
return None
if start.uri != end.uri:
return None
return Location(
uri=start.uri, range=Range(start=start.position, end=end.position)
)
def cell_location(self, notebook_location: Location) -> Optional[Location]:
"""Convert a concatenated notebook location to a cell location."""
if notebook_location.uri != self._document.uri:
return None
return self.cell_range(notebook_location.range)
def cell_index(self, cell_uri: str) -> Optional[int]:
"""Get the index of a cell by its URI."""
for index, cell in enumerate(self._cells):
if cell.uri == cell_uri:
return index
return None
def cell_text_document_edits(
self, text_document_edit: TextDocumentEdit
) -> Iterable[TextDocumentEdit]:
"""Convert a concatenated notebook text document edit to cell text document edits."""
if text_document_edit.text_document.uri != self._document.uri:
return
# Convert edits in the concatenated notebook to per-cell edits, grouped by cell URI.
edits_by_uri: Dict[
str, List[Union[TextEdit, AnnotatedTextEdit, SnippetTextEdit]]
] = defaultdict(list)
for text_edit in text_document_edit.edits:
location = self.cell_range(text_edit.range)
if location is None:
continue
new_edit = attrs.evolve(text_edit, range=location.range)
edits_by_uri[location.uri].append(new_edit)
# Yield per-cell text document edits.
for uri, edits in edits_by_uri.items():
cell = self._cell_by_uri[uri]
version = 0 if cell.version is None else cell.version
yield TextDocumentEdit(
text_document=OptionalVersionedTextDocumentIdentifier(
uri=cell.uri, version=version
),
edits=edits,
)
def text_document_or_cell_locations(
workspace: Workspace, locations: Optional[List[Location]]
) -> Optional[List[Location]]:
"""Convert concatenated notebook locations to cell locations, leaving text document locations as-is."""
if locations is None:
return None
results = []
for location in locations:
mapper = notebook_coordinate_mapper(
workspace, notebook_uri=location.uri
)
if mapper is not None:
cell_location = mapper.cell_location(location)
if cell_location is not None:
location = cell_location
results.append(location)
return results if results else None
def cell_filename(
workspace: Workspace,
cell_uri: str,
) -> str:
"""Get the filename (used in diagnostics) for a cell URI."""
mapper = notebook_coordinate_mapper(workspace, cell_uri=cell_uri)
if mapper is None:
raise ValueError(
f"Notebook document not found for cell URI: {cell_uri}"
)
index = mapper.cell_index(cell_uri)
assert index is not None
return f"cell {index + 1}"
T_ls = TypeVar("T_ls", bound=LanguageServer)
T_params = TypeVar(
"T_params",
CallHierarchyPrepareParams,
CodeActionParams,
ColorPresentationParams,
CompletionParams,
DefinitionParams,
DocumentHighlightParams,
DocumentOnTypeFormattingParams,
HoverParams,
InlayHintParams,
InlineValueParams,
PrepareRenameParams,
ReferenceParams,
RenameParams,
SemanticTokensRangeParams,
SignatureHelpParams,
TextDocumentPositionParams,
)
T = TypeVar("T")
| NotebookCoordinateMapper |
python | numba__numba | numba/tests/test_errorhandling.py | {
"start": 14985,
"end": 15968
} | class ____(unittest.TestCase):
"""
Test that error messages handle file paths with curly braces (issue #10094)
"""
def test_placeholders_with_positional_args(self):
# used on typeinfer: placeholders with positional args
problematic_path = (
r"C:\\Users\\"
r"{fa977bf3384160bce9243175b380be8}"
r"\\file.py"
)
fmt = "Error at {0}"
result = errors._format_msg(fmt, (problematic_path,), {})
expected = f"Error at {problematic_path}"
self.assertEqual(result, expected)
def test_preformatted_string_no_args(self):
# used on compiler_machinery: preformatted string without args
name_with_braces = "{abc123}"
fmt = f"Pass {name_with_braces}"
result = errors._format_msg(fmt, (), {})
expected = f"Pass {name_with_braces}"
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| TestCurlyBracesInPaths |
python | apache__airflow | providers/google/src/airflow/providers/google/suite/hooks/calendar.py | {
"start": 1180,
"end": 9000
} | class ____(GoogleBaseHook):
"""
Interact with Google Calendar via Google Cloud connection.
Reading and writing cells in Google Sheet: https://developers.google.com/calendar/api/v3/reference
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: API Version. For example v3
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
def __init__(
self,
api_version: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
self._conn = None
def get_conn(self) -> Any:
"""
Retrieve connection to Google Calendar.
:return: Google Calendar services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("calendar", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def get_events(
self,
calendar_id: str = "primary",
i_cal_uid: str | None = None,
max_attendees: int | None = None,
max_results: int | None = None,
order_by: str | None = None,
private_extended_property: str | None = None,
q: str | None = None,
shared_extended_property: str | None = None,
show_deleted: bool | None = False,
show_hidden_invitation: bool | None = False,
single_events: bool | None = False,
sync_token: str | None = None,
time_max: datetime | None = None,
time_min: datetime | None = None,
time_zone: str | None = None,
updated_min: datetime | None = None,
) -> list:
"""
Get events from Google Calendar from a single calendar_id.
https://developers.google.com/calendar/api/v3/reference/events/list
:param calendar_id: The Google Calendar ID to interact with
:param i_cal_uid: Optional. Specifies event ID in the ``iCalendar`` format in the response.
:param max_attendees: Optional. If there are more than the specified number of attendees,
only the participant is returned.
:param max_results: Optional. Maximum number of events returned on one result page.
Incomplete pages can be detected by a non-empty ``nextPageToken`` field in the response.
By default, the value is 250 events. The page size can never be larger than 2500 events
:param order_by: Optional. Acceptable values are ``"startTime"`` or "updated"
:param private_extended_property: Optional. Extended properties constraint specified as
``propertyName=value``. Matches only private properties. This parameter might be repeated
multiple times to return events that match all given constraints.
:param q: Optional. Free text search.
:param shared_extended_property: Optional. Extended properties constraint specified as
``propertyName=value``. Matches only shared properties. This parameter might be repeated
multiple times to return events that match all given constraints.
:param show_deleted: Optional. False by default
:param show_hidden_invitation: Optional. False by default
:param single_events: Optional. False by default
:param sync_token: Optional. Token obtained from the ``nextSyncToken`` field returned
:param time_max: Optional. Upper bound (exclusive) for an event's start time to filter by.
Default is no filter
:param time_min: Optional. Lower bound (exclusive) for an event's end time to filter by.
Default is no filter
:param time_zone: Optional. Time zone used in response. Default is calendars time zone.
:param updated_min: Optional. Lower bound for an event's last modification time
"""
service = self.get_conn()
page_token = None
events = []
while True:
response = (
service.events()
.list(
calendarId=calendar_id,
iCalUID=i_cal_uid,
maxAttendees=max_attendees,
maxResults=max_results,
orderBy=order_by,
pageToken=page_token,
privateExtendedProperty=private_extended_property,
q=q,
sharedExtendedProperty=shared_extended_property,
showDeleted=show_deleted,
showHiddenInvitations=show_hidden_invitation,
singleEvents=single_events,
syncToken=sync_token,
timeMax=time_max,
timeMin=time_min,
timeZone=time_zone,
updatedMin=updated_min,
)
.execute(num_retries=self.num_retries)
)
events.extend(response["items"])
page_token = response.get("nextPageToken")
if not page_token:
break
return events
def create_event(
self,
event: dict[str, Any],
calendar_id: str = "primary",
conference_data_version: int | None = 0,
max_attendees: int | None = None,
send_notifications: bool | None = False,
send_updates: str | None = "false",
supports_attachments: bool | None = False,
) -> dict:
"""
Create event on the specified calendar.
https://developers.google.com/calendar/api/v3/reference/events/insert.
:param calendar_id: The Google Calendar ID to interact with
:param conference_data_version: Optional. Version number of conference data
supported by the API client.
:param max_attendees: Optional. If there are more than the specified number of attendees,
only the participant is returned.
:param send_notifications: Optional. Default is False
:param send_updates: Optional. Default is "false". Acceptable values as "all", "none",
``"externalOnly"``
https://developers.google.com/calendar/api/v3/reference/events#resource
"""
if "start" not in event or "end" not in event:
raise AirflowException(
f"start and end must be specified in the event body while creating an event. API docs:"
f"https://developers.google.com/calendar/api/{self.api_version}/reference/events/insert "
)
service = self.get_conn()
response = (
service.events()
.insert(
calendarId=calendar_id,
conferenceDataVersion=conference_data_version,
maxAttendees=max_attendees,
sendNotifications=send_notifications,
sendUpdates=send_updates,
supportsAttachments=supports_attachments,
body=event,
)
.execute(num_retries=self.num_retries)
)
return response
| GoogleCalendarHook |
python | mwaskom__seaborn | tests/_marks/test_line.py | {
"start": 5716,
"end": 8282
} | class ____:
def test_xy_data(self):
x = [1, 5, 3, np.nan, 2]
y = [1, 4, 2, 5, 3]
g = [1, 2, 1, 1, 2]
p = Plot(x=x, y=y, group=g).add(Paths()).plot()
lines, = p._figure.axes[0].collections
verts = lines.get_paths()[0].vertices.T
assert_array_equal(verts[0], [1, 3, np.nan])
assert_array_equal(verts[1], [1, 2, np.nan])
verts = lines.get_paths()[1].vertices.T
assert_array_equal(verts[0], [5, 2])
assert_array_equal(verts[1], [4, 3])
def test_set_properties(self):
x = y = [1, 2, 3]
m = Paths(color=".737", linewidth=1, linestyle=(3, 1))
p = Plot(x=x, y=y).add(m).plot()
lines, = p._figure.axes[0].collections
assert same_color(lines.get_color().squeeze(), m.color)
assert lines.get_linewidth().item() == m.linewidth
assert lines.get_dashes()[0] == (0, list(m.linestyle))
def test_mapped_properties(self):
x = y = [1, 2, 3, 4]
g = ["a", "a", "b", "b"]
p = Plot(x=x, y=y, color=g, linewidth=g, linestyle=g).add(Paths()).plot()
lines, = p._figure.axes[0].collections
assert not np.array_equal(lines.get_colors()[0], lines.get_colors()[1])
assert lines.get_linewidths()[0] != lines.get_linewidth()[1]
assert lines.get_linestyle()[0] != lines.get_linestyle()[1]
def test_color_with_alpha(self):
x = y = [1, 2, 3]
m = Paths(color=(.2, .6, .9, .5))
p = Plot(x=x, y=y).add(m).plot()
lines, = p._figure.axes[0].collections
assert same_color(lines.get_colors().squeeze(), m.color)
def test_color_and_alpha(self):
x = y = [1, 2, 3]
m = Paths(color=(.2, .6, .9), alpha=.5)
p = Plot(x=x, y=y).add(m).plot()
lines, = p._figure.axes[0].collections
assert same_color(lines.get_colors().squeeze(), to_rgba(m.color, m.alpha))
def test_capstyle(self):
x = y = [1, 2]
rc = {"lines.solid_capstyle": "projecting"}
with mpl.rc_context(rc):
p = Plot(x, y).add(Paths()).plot()
lines = p._figure.axes[0].collections[0]
assert lines.get_capstyle() == "projecting"
p = Plot(x, y).add(Paths(linestyle="--")).plot()
lines = p._figure.axes[0].collections[0]
assert lines.get_capstyle() == "projecting"
p = Plot(x, y).add(Paths({"capstyle": "butt"})).plot()
lines = p._figure.axes[0].collections[0]
assert lines.get_capstyle() == "butt"
| TestPaths |
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 40501,
"end": 42343
} | class ____(FixtureTest):
"""test the behavior for [ticket:2452] - rollback on begin_nested()
only expires objects tracked as being modified in that transaction.
"""
run_inserts = None
__sparse_driver_backend__ = True
def _run_test(self, update_fn):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
with fixture_session() as s:
u1 = User(name="u1")
u2 = User(name="u2")
s.add_all([u1, u2])
s.commit()
u1.name
u2.name
trans = s._transaction
assert trans is not None
s.begin_nested()
update_fn(s, u2)
eq_(u2.name, "u2modified")
s.rollback()
assert s._transaction is None
assert "name" not in u1.__dict__
assert "name" not in u2.__dict__
eq_(u2.name, "u2")
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint(self):
def update_fn(s, u2):
u2.name = "u2modified"
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_eval(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name="u2").update(
dict(name="u2modified"), synchronize_session="evaluate"
)
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_fetch(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name="u2").update(
dict(name="u2modified"), synchronize_session="fetch"
)
self._run_test(update_fn)
| CleanSavepointTest |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/components.py | {
"start": 44615,
"end": 45459
} | class ____(_JSONViewer, UserComponent):
"""
A component for displaying JSON data with syntax highlighting and collapsible sections.
This component provides a rich view of JSON data with proper formatting, syntax highlighting,
and the ability to collapse/expand sections for better readability.
Example:
```python
from metaflow.cards import JSONViewer, EventsTimeline
from metaflow import current
# Use in events timeline
events = EventsTimeline(title="API Calls")
events.update({
"action": "api_request",
"endpoint": "/users",
"payload": JSONViewer({"user_id": 123, "fields": ["name", "email"]})
})
# Use standalone
data = {"config": {"debug": True, "timeout": 30}}
current.card.append(JSONViewer(data, collapsible=True))
```
"""
pass
| JSONViewer |
python | ray-project__ray | python/ray/dag/tests/experimental/test_torch_tensor_transport.py | {
"start": 14598,
"end": 17665
} | class ____:
"""Tests worker to worker tensor transport with GPU device."""
@pytest.mark.parametrize("gpu_device", ["gpu", "cuda"])
def test_src_cpu_tensor_dst_cpu_node(self, ray_start_regular, gpu_device):
sender = Actor.remote()
receiver = Actor.remote()
ref = run_worker_to_worker_dag(sender, receiver, gpu_device, "cpu")
with pytest.raises(
RayTaskError, match="RuntimeError: No CUDA GPUs are available"
):
ray.get(ref)
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_cpu_tensor_dst_gpu_node(self, ray_start_regular):
sender = Actor.remote()
receiver = Actor.options(num_gpus=1).remote()
ref = run_worker_to_worker_dag(sender, receiver, "cuda", "cpu")
assert ray.get(ref) == "cuda:0"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_gpu_tensor_dst_cpu_node(self, ray_start_regular):
sender = Actor.options(num_gpus=1).remote()
receiver = Actor.remote()
ref = run_worker_to_worker_dag(sender, receiver, "cuda", "cuda")
with pytest.raises(
RayTaskError, match="RuntimeError: No CUDA GPUs are available"
):
ray.get(ref)
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 2}], indirect=True)
def test_src_gpu_tensor_dst_gpu_node(self, ray_start_regular):
sender = Actor.options(num_gpus=1).remote()
receiver = Actor.options(num_gpus=1).remote()
with pytest.raises(
ValueError,
match="accelerator transport is not supported with CPU target device.",
):
run_worker_to_worker_dag(sender, receiver, "cpu", "cpu")
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_mix_tensors_dst_cpu_node(self, ray_start_regular):
sender = Actor.options(num_gpus=1).remote()
receiver = Actor.options().remote()
ref = run_worker_to_worker_dag(
sender,
receiver,
"cuda",
{"cpu_tensor": "cpu", "gpu_tensor": "cuda"},
is_dict=True,
)
with pytest.raises(
RayTaskError, match="RuntimeError: No CUDA GPUs are available"
):
ray.get(ref)
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
@pytest.mark.parametrize("ray_start_regular", [{"num_cpus": 2}], indirect=True)
@pytest.mark.parametrize("gpu_device", ["gpu", "cuda"])
def test_src_mix_tensors_dst_gpu_node(self, ray_start_regular, gpu_device):
sender = Actor.options(num_gpus=1).remote()
receiver = Actor.options(num_gpus=1).remote()
ref = run_worker_to_worker_dag(
sender,
receiver,
gpu_device,
{"cpu_tensor": "cpu", "gpu_tensor": "cuda"},
is_dict=True,
)
assert ray.get(ref) == {"cpu_tensor": "cuda:0", "gpu_tensor": "cuda:0"}
| TestWorkerToWorkerDeviceGPU |
python | astropy__astropy | astropy/cosmology/_src/traits/darkmatter.py | {
"start": 348,
"end": 1463
} | class ____:
"""The cosmology has attributes and methods for the dark matter density.
This trait provides an ``Odm`` method that returns the dark matter
density parameter (i.e., total matter minus baryons) at redshift ``z``.
"""
Odm0: float | np.floating
"""Omega dark matter: dark matter density/critical density at z=0."""
inv_efunc: Callable[[NDArray[Any]], NDArray[Any]]
def Odm(self, z: Quantity | ArrayLike, /) -> FArray:
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
Odm : ndarray
The density of dark matter relative to the critical density at
each redshift.
"""
z = aszarr(z)
return self.Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
| DarkMatterComponent |
python | doocs__leetcode | solution/0500-0599/0583.Delete Operation for Two Strings/Solution.py | {
"start": 0,
"end": 546
} | class ____:
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
f = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
f[i][0] = i
for j in range(1, n + 1):
f[0][j] = j
for i, a in enumerate(word1, 1):
for j, b in enumerate(word2, 1):
if a == b:
f[i][j] = f[i - 1][j - 1]
else:
f[i][j] = min(f[i - 1][j], f[i][j - 1]) + 1
return f[m][n]
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/classes/generative.py | {
"start": 45000,
"end": 45720
} | class ____(BaseModel):
prompt: str
image_properties: Optional[List[str]]
images: Optional[Iterable[str]]
metadata: bool = False
debug: bool = False
def _to_grpc(
self, provider: _GenerativeConfigRuntime
) -> generative_pb2.GenerativeSearch.Single:
return generative_pb2.GenerativeSearch.Single(
prompt=self.prompt,
debug=self.debug,
queries=[
provider._to_grpc(
_GenerativeConfigRuntimeOptions(
self.metadata, self.images, self.image_properties
)
)
],
)
GroupedTask = _GroupedTask
SinglePrompt = _SinglePrompt
| _SinglePrompt |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 624,
"end": 1021
} | class ____(HTTPError):
"""Base exception for errors caused within a pool."""
def __init__(self, pool: ConnectionPool, message: str) -> None:
self.pool = pool
self._message = message
super().__init__(f"{pool}: {message}")
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, self._message)
| PoolError |
python | astropy__astropy | astropy/cosmology/_src/flrw/base.py | {
"start": 3997,
"end": 43290
} | class ____(
Cosmology,
# Traits
BaryonComponent,
TotalComponent,
CriticalDensity,
MatterComponent,
CurvatureComponent,
DarkEnergyComponent,
HubbleParameter,
PhotonComponent,
NeutrinoComponent,
ScaleFactor,
DarkMatterComponent,
TemperatureCMB,
):
"""An isotropic and homogeneous (Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0: Parameter = Parameter(
doc="Hubble constant at z=0.",
unit="km/(s Mpc)",
fvalidate="scalar",
)
Om0: Parameter = Parameter(
doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative",
)
Ode0: Parameter = ParameterOde0.clone()
Tcmb0: Parameter = Parameter(
default=0.0 * u.K,
doc="Temperature of the CMB at z=0.",
unit="Kelvin",
fvalidate="scalar",
)
Neff: Parameter = Parameter(
default=3.04,
doc="Number of effective neutrino species.",
fvalidate="non-negative",
)
m_nu: Parameter = Parameter(
default=0.0 * u.eV,
doc="Mass of neutrino species.",
unit="eV",
equivalencies=u.mass_energy(),
)
Ob0: Parameter = Parameter(
default=0.0,
doc="Omega baryon; baryonic matter density/critical density at z=0.",
)
def __post_init__(self) -> None:
# Compute neutrino parameters:
if self.m_nu is None:
nu_info = NeutrinoInfo(
n_nu=0,
neff_per_nu=None,
has_massive_nu=False,
n_massive_nu=0,
n_massless_nu=0,
nu_y=None,
nu_y_list=None,
)
else:
n_nu = floor(self.Neff)
massive = np.nonzero(self.m_nu.value > 0)[0]
has_massive_nu = massive.size > 0
n_massive_nu = len(massive)
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if has_massive_nu:
nu_y = (self.m_nu[massive] / (kB_evK * self.Tnu0)).to_value(u.one)
nu_y_list = nu_y.tolist()
else:
nu_y = nu_y_list = None
nu_info = NeutrinoInfo(
n_nu=n_nu,
# We share Neff between the neutrinos equally. In detail this is not
# correct. See NeutrinoInfo for more info.
neff_per_nu=self.Neff / n_nu,
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
has_massive_nu=has_massive_nu,
n_massive_nu=n_massive_nu,
n_massless_nu=n_nu - n_massive_nu,
nu_y=nu_y,
nu_y_list=nu_y_list,
)
self._nu_info: NeutrinoInfo
object.__setattr__(self, "_nu_info", nu_info)
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
object.__setattr__(self, "_inv_efunc_scalar", self.inv_efunc)
object.__setattr__(self, "_inv_efunc_scalar_args", ())
# ---------------------------------------------------------------
# Parameter details
@m_nu.validator
def m_nu(self, param: Parameter, value: Any) -> FArray | None:
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0. The number of
neutrinos must match floor(Neff). Neutrino masses cannot be
negative.
"""
# Check if there are any neutrinos
if (n_nu := floor(self.Neff)) == 0 or self.Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (n_nu,)):
raise ValueError(
"unexpected number of neutrino masses — "
f"expected {n_nu}, got {len(value)}."
)
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=n_nu)
return value
# ---------------------------------------------------------------
# Baryons
@Ob0.validator
def Ob0(self, param: Parameter, value: Any) -> float:
"""Validate baryon density to a non-negative float > matter density."""
value = validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError(
"baryonic density can not be larger than total matter density."
)
return value
# ---------------------------------------------------------------
# Critical Density
@cached_property
def critical_density0(self) -> u.Quantity:
r"""Critical mass density at z=0.
The critical density is the density of the Universe at which the Universe is
flat. It is defined as :math:`\rho_{\text{crit}} = 3 H_0^2 / (8 \pi G)`.
"""
return (3 * self.H0**2 / (8 * pi * const.G)).cgs
# ---------------------------------------------------------------
# Curvature
@cached_property
def Ok0(self) -> float | np.floating:
"""Omega curvature; the effective curvature density/critical density at z=0."""
return 1.0 - self.Om0 - self.Ode0 - self.Ogamma0 - self.Onu0
@property
def is_flat(self) -> bool:
"""Return `bool`; `True` if the cosmology is globally flat."""
return bool((self.Ok0 == 0.0) and (self.Otot0 == 1.0))
# ---------------------------------------------------------------
# Dark Matter
@cached_property
def Odm0(self) -> float:
"""Omega dark matter; dark matter density/critical density at z=0."""
return self.Om0 - self.Ob0
# ---------------------------------------------------------------
# Hubble Parameter
def efunc(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
E : array
The redshift scaling of the Hubble constant.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self.Ogamma0 + (
self.Onu0
if not self._nu_info.has_massive_nu
else self.Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(
zp1**2 * ((Or * zp1 + self.Om0) * zp1 + self.Ok0)
+ self.Ode0 * self.de_density_scale(z)
)
def inv_efunc(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
E : array
The redshift scaling of the inverse Hubble constant.
"""
# Avoid the function overhead by repeating code
Or = self.Ogamma0 + (
self.Onu0
if not self._nu_info.has_massive_nu
else self.Ogamma0 * self.nu_relative_density(z)
)
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (
zp1**2 * ((Or * zp1 + self.Om0) * zp1 + self.Ok0)
+ self.Ode0 * self.de_density_scale(z)
) ** (-0.5)
# ---------------------------------------------------------------
# properties
@property
def Otot0(self) -> float:
"""Omega total; the total density/critical density at z=0."""
return self.Om0 + self.Ogamma0 + self.Onu0 + self.Ode0 + self.Ok0
# ---------------------------------------------------------------
# Neutrino - implementing NeutrinoComponent abstract methods
@property
def has_massive_nu(self) -> bool:
"""Does this cosmology have at least one massive neutrino species?"""
if self.Tnu0.value == 0:
return False
return self._nu_info.has_massive_nu
@cached_property
def Onu0(self) -> float:
"""Omega nu; the density/critical density of neutrinos at z=0."""
if self._nu_info.has_massive_nu:
return self.Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
return NEUTRINO_FERMI_DIRAC_CORRECTION * self.Neff * self.Ogamma0
def nu_relative_density(self, z: u.Quantity | ArrayLike) -> FArray:
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
f : array
The neutrino density scaling factor relative to the density in
photons at each redshift.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._nu_info.has_massive_nu:
return NEUTRINO_FERMI_DIRAC_CORRECTION * self.Neff * np.ones_like(z)
curr_nu_y = self._nu_info.nu_y / (1.0 + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (KOMATSU_K * curr_nu_y) ** KOMATSU_P) ** KOMATSU_INVP
rel_mass = rel_mass_per.sum(-1) + self._nu_info.n_massless_nu
return NEUTRINO_FERMI_DIRAC_CORRECTION * self._nu_info.neff_per_nu * rel_mass
# ---------------------------------------------------------------
# Photon
@cached_property
def Ogamma0(self) -> float:
"""Omega gamma; the density/critical density of photons at z=0."""
# photon density from Tcmb
return a_B_c2 * self.Tcmb0.value**4 / self.critical_density0.value
# ---------------------------------------------------------------
def Otot(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshifts.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
Otot : array
The total density relative to the critical density at each redshift.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
# Odm is provided by the DarkMatterComponent trait
# Ogamma is provided by the PhotonComponent trait
# Onu, Tnu, and nu_relative_density are provided by NeutrinoComponent trait
def _lookback_time_integrand_scalar(self, z: float, /) -> float:
"""Integrand of the lookback time (equation 30 of [1]_).
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
I : array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z: u.Quantity | ArrayLike, /) -> float:
"""Integrand of the absorption distance (eq. 4, [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
dX : float
The integrand for the absorption distance (dimensionless).
References
----------
.. [1] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args)
def abs_distance_integrand(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Integrand of the absorption distance (eq. 4, [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
dX : array
The integrand for the absorption distance (dimensionless).
References
----------
.. [1] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def lookback_time(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
t : Quantity ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
t : Quantity ['time']
Lookback time in Gyr to each input redshift.
"""
return self.hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
t : ndarray
Lookback time to each input redshift in Hubble time units.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""The lookback distance is the light travel time distance to a given redshift.
It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
d : Quantity ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
t : Quantity ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
t : Quantity ['time']
The age of the universe in Gyr at each input redshift.
"""
return self.hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
t : array
The age of the universe at each input redshift in Hubble time units.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, inf)[0]
# ---------------------------------------------------------------
# Comoving distance
@overload
def comoving_distance(self, z: _InputT, /) -> u.Quantity: ...
@overload
def comoving_distance(self, z: _InputT, z2: _InputT, /) -> u.Quantity: ...
def comoving_distance(self, z: _InputT, z2: _InputT | None = None, /) -> u.Quantity:
r"""Comoving line-of-sight distance :math:`d_c(z1, z2)` in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z, z2 : Quantity ['redshift']
Input redshifts. If one argument ``z`` is given, the distance
:math:`d_c(0, z)` is returned. If two arguments ``z1, z2`` are
given, the distance :math:`d_c(z_1, z_2)` is returned.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z(1), z2 must be positional arguments.
Returns
-------
Quantity ['length']
Comoving distance in Mpc between each input redshift.
"""
z1, z2 = (0.0, z) if z2 is None else (z, z2)
return self._comoving_distance_z1z2(z1, z2)
def _comoving_distance_z1z2(
self, z1: u.Quantity | ArrayLike, z2: u.Quantity | ArrayLike, /
) -> u.Quantity:
"""Comoving line-of-sight distance in Mpc between redshifts ``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
d : Quantity ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
def _integral_comoving_distance_z1z2(
self, z1: u.Quantity | ArrayLike, z2: u.Quantity | ArrayLike, /
) -> u.Quantity:
"""Comoving line-of-sight distance (Mpc) between objects at redshifts z1 and z2.
The comoving distance along the line-of-sight between two objects remains
constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
|Quantity| ['length']
Comoving distance in Mpc between each input redshift.
"""
return self.hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) # fmt: skip
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(
self, z1: u.Quantity | ArrayLike, z2: u.Quantity | ArrayLike, /
) -> FArray:
"""Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
d : array
Comoving distance in Mpc between each input redshift.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
# ---------------------------------------------------------------
def comoving_transverse_distance(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
d : Quantity ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(
self, z1: u.Quantity | ArrayLike, z2: u.Quantity | ArrayLike, /
) -> u.Quantity:
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like
Input redshifts.
Returns
-------
d : Quantity ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self.Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self.hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
d : Quantity ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
d : Quantity ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(
self, z1: u.Quantity | ArrayLike, z2: u.Quantity | ArrayLike
) -> u.Quantity:
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : Quantity ['length']
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(
f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).",
AstropyUserWarning,
)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z: u.Quantity | ArrayLike, /) -> FArray:
"""Absorption distance at redshift ``z`` (eq. 4, [1]_).
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
X : array
Absorption distance (dimensionless) at each input redshift.
References
----------
.. [1] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
distmod : Quantity ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5.0 * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
V : Quantity ['volume']
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self.Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self.hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh**3 / (2.0 * Ok0) * u.Mpc**3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
dV : Quantity
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self.hubble_distance * (dm**2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Separation in transverse comoving kpc equal to an arcmin at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
d : Quantity ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / RAD_IN_ARCMIN
def kpc_proper_per_arcmin(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Separation in transverse proper kpc equal to an arcminute at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
d : Quantity ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / RAD_IN_ARCMIN
def arcsec_per_kpc_comoving(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Angular separation in arcsec equal to a comoving kpc at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
theta : Quantity ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return RAD_IN_ARCSEC / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z: u.Quantity | ArrayLike, /) -> u.Quantity:
"""Angular separation in arcsec corresponding to a proper kpc at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
.. versionchanged:: 7.0
Passing z as a keyword argument is deprecated.
.. versionchanged:: 8.0
z must be a positional argument.
Returns
-------
theta : Quantity ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return RAD_IN_ARCSEC / self.angular_diameter_distance(z).to(u.kpc)
@dataclass_decorator
| FLRW |
python | getsentry__sentry | src/sentry/event_manager.py | {
"start": 12553,
"end": 98679
} | class ____:
"""
Handles normalization in both the store endpoint and the save task. The
intention is to swap this class out with a reimplementation in Rust.
"""
def __init__(
self,
data: MutableMapping[str, Any],
version: str = "5",
project: Project | None = None,
grouping_config: GroupingConfig | None = None,
client_ip: str | None = None,
user_agent: str | None = None,
auth: Any | None = None,
key: Any | None = None,
content_encoding: str | None = None,
is_renormalize: bool = False,
remove_other: bool | None = None,
project_config: Any | None = None,
sent_at: datetime | None = None,
):
self._data: MutableMapping[str, Any] = data
self.version = version
self._project = project
# if not explicitly specified try to get the grouping from project_config
if grouping_config is None and project_config is not None:
config = project_config.config
grouping_config = config.get("grouping_config")
# if we still don't have a grouping also try the project
if grouping_config is None and project is not None:
grouping_config = get_grouping_config_dict_for_project(project)
self._grouping_config = grouping_config
self._client_ip = client_ip
self._user_agent = user_agent
self._auth = auth
self._key = key
self._is_renormalize = is_renormalize
self._remove_other = remove_other
self._normalized = False
self.project_config = project_config
self.sent_at = sent_at
def normalize(self, project_id: int | None = None) -> None:
with metrics.timer("events.store.normalize.duration"):
self._normalize_impl(project_id=project_id)
def _normalize_impl(self, project_id: int | None = None) -> None:
if self._project and project_id and project_id != self._project.id:
raise RuntimeError(
"Initialized EventManager with one project ID and called save() with another one"
)
if self._normalized:
raise RuntimeError("Already normalized")
self._normalized = True
from sentry_relay.processing import StoreNormalizer
rust_normalizer = StoreNormalizer(
project_id=self._project.id if self._project else project_id,
client_ip=self._client_ip,
client=self._auth.client if self._auth else None,
key_id=str(self._key.id) if self._key else None,
grouping_config=self._grouping_config,
protocol_version=str(self.version) if self.version is not None else None,
is_renormalize=self._is_renormalize,
remove_other=self._remove_other,
normalize_user_agent=True,
sent_at=self.sent_at.isoformat() if self.sent_at is not None else None,
json_dumps=orjson.dumps,
**DEFAULT_STORE_NORMALIZER_ARGS,
)
pre_normalize_type = self._data.get("type")
self._data = rust_normalizer.normalize_event(dict(self._data), json_loads=orjson.loads)
# XXX: This is a hack to make generic events work (for now?). I'm not sure whether we should
# include this in the rust normalizer, since we don't want people sending us these via the
# sdk.
if pre_normalize_type in ("generic", "feedback"):
self._data["type"] = pre_normalize_type
def get_data(self) -> MutableMapping[str, Any]:
return self._data
@sentry_sdk.trace
def save(
self,
project_id: int | None = None,
project: Project | None = None,
raw: bool = False,
assume_normalized: bool = False,
start_time: float | None = None,
cache_key: str | None = None,
skip_send_first_transaction: bool = False,
attachments: list[CachedAttachment] | None = None,
) -> Event:
"""
After normalizing and processing an event, save adjacent models such as
releases and environments to postgres and write the event into
eventstream. From there it will be picked up by Snuba and
post-processing.
We re-insert events with duplicate IDs into Snuba, which is responsible
for deduplicating events. Since deduplication in Snuba is on the primary
key (based on event ID, project ID and day), events with same IDs are only
deduplicated if their timestamps fall on the same day. The latest event
always wins and overwrites the value of events received earlier in that day.
Since we increment counters and frequencies here before events get inserted
to eventstream these numbers may be larger than the total number of
events if we receive duplicate event IDs that fall on the same day
(that do not hit cache first).
"""
if project is None:
assert project_id is not None
project = resolve_project(project_id)
projects = {project.id: project}
# Normalize if needed
if not self._normalized:
if not assume_normalized:
self.normalize(project_id=project.id)
self._normalized = True
job: dict[str, Any] = {
"data": self._data,
"project_id": project.id,
"raw": raw,
"start_time": start_time,
}
# After calling _pull_out_data we get some keys in the job like the platform
_pull_out_data([job], projects)
# Sometimes projects get created without a platform (e.g. through the API), in which case we
# attempt to set it based on the first event
_set_project_platform_if_needed(project, job["event"])
event_type = self._data.get("type")
if event_type == "transaction":
job["data"]["project"] = project.id
jobs = save_transaction_events([job], projects, skip_send_first_transaction)
return jobs[0]["event"]
elif event_type == "generic":
job["data"]["project"] = project.id
jobs = save_generic_events([job], projects)
return jobs[0]["event"]
else:
project = job["event"].project
job["in_grouping_transition"] = is_in_transition(project)
metric_tags = {
"platform": job["event"].platform or "unknown",
"sdk": normalized_sdk_tag_from_event(job["event"].data),
"in_transition": job["in_grouping_transition"],
"split_enhancements": get_enhancements_version(project) == 3,
}
# This metric allows differentiating from all calls to the `event_manager.save` metric
# and adds support for differentiating based on platforms
with metrics.timer("event_manager.save_error_events", tags=metric_tags):
return self.save_error_events(
project, job, projects, metric_tags, attachments or [], raw, cache_key
)
@sentry_sdk.tracing.trace
def save_error_events(
self,
project: Project,
job: Job,
projects: ProjectsMapping,
metric_tags: MutableTags,
attachments: list[CachedAttachment],
raw: bool = False,
cache_key: str | None = None,
) -> Event:
jobs = [job]
is_reprocessed = is_reprocessed_event(job["data"])
_get_or_create_release_many(jobs, projects)
_get_event_user_many(jobs, projects)
job["project_key"] = None
if job["key_id"] is not None:
try:
job["project_key"] = ProjectKey.objects.get_from_cache(id=job["key_id"])
except ProjectKey.DoesNotExist:
pass
_derive_plugin_tags_many(jobs, projects)
_derive_interface_tags_many(jobs)
_derive_client_error_sampling_rate(jobs, projects)
try:
group_info = assign_event_to_group(event=job["event"], job=job, metric_tags=metric_tags)
except HashDiscarded as e:
if features.has("organizations:grouptombstones-hit-counter", project.organization):
increment_group_tombstone_hit_counter(
getattr(e, "tombstone_id", None), job["event"]
)
discard_event(job, attachments)
raise
if not group_info:
return job["event"]
# store a reference to the group id to guarantee validation of isolation
# XXX(markus): No clue what this does
job["event"].data.bind_ref(job["event"])
_get_or_create_environment_many(jobs, projects)
_get_or_create_group_environment_many(jobs)
_get_or_create_release_associated_models(jobs, projects)
_increment_release_associated_counts_many(jobs, projects)
_get_or_create_group_release_many(jobs)
_tsdb_record_all_metrics(jobs)
if attachments:
attachments = filter_attachments_for_group(attachments, job)
# XXX: DO NOT MUTATE THE EVENT PAYLOAD AFTER THIS POINT
_materialize_event_metrics(jobs)
for attachment in attachments:
key = f"bytes.stored.{attachment.type}"
old_bytes = job["event_metrics"].get(key) or 0
job["event_metrics"][key] = old_bytes + attachment.size
_nodestore_save_many(jobs=jobs, app_feature="errors")
if not raw:
if not project.first_event:
project.update(first_event=job["event"].datetime)
first_event_received.send_robust(
project=project, event=job["event"], sender=Project
)
if has_event_minified_stack_trace(job["event"]):
set_project_flag_and_signal(
project,
"has_minified_stack_trace",
first_event_with_minified_stack_trace_received,
event=job["event"],
)
if is_reprocessed:
safe_execute(
reprocessing2.buffered_delete_old_primary_hash,
project_id=job["event"].project_id,
group_id=reprocessing2.get_original_group_id(job["event"]),
event_id=job["event"].event_id,
datetime=job["event"].datetime,
old_primary_hash=reprocessing2.get_original_primary_hash(job["event"]),
current_primary_hash=job["event"].get_primary_hash(),
)
_eventstream_insert_many(jobs)
# Do this last to ensure signals get emitted even if connection to the
# file store breaks temporarily.
#
# We do not need this for reprocessed events as for those we update the
# group_id on existing models in post_process_group, which already does
# this because of indiv. attachments.
if not is_reprocessed and attachments:
save_attachments(cache_key, attachments, job)
metric_tags = {"from_relay": str("_relay_processed" in job["data"])}
metrics.timing(
"events.latency",
job["received_timestamp"] - job["recorded_timestamp"],
tags=metric_tags,
)
metrics.distribution(
"events.size.data.post_save", job["event"].size, tags=metric_tags, unit="byte"
)
metrics.incr(
"events.post_save.normalize.errors",
amount=len(job["data"].get("errors") or ()),
tags=metric_tags,
)
_track_outcome_accepted_many(jobs)
self._data = job["event"].data.data
return job["event"]
@sentry_sdk.tracing.trace
def _pull_out_data(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
"""
Update every job in the list with required information and store it in the nodestore.
A bunch of (probably) CPU bound stuff.
"""
for job in jobs:
job["project_id"] = int(job["project_id"])
data = job["data"]
# Pull the toplevel data we're interested in
transaction_name = data.get("transaction")
if transaction_name:
transaction_name = force_str(transaction_name)
job["transaction"] = transaction_name
key_id = None if data is None else data.get("key_id")
if key_id is not None:
key_id = int(key_id)
job["key_id"] = key_id
job["logger_name"] = logger_name = data.get("logger")
job["level"] = level = data.get("level")
job["release"] = data.get("release")
job["dist"] = data.get("dist")
job["environment"] = environment = data.get("environment")
job["recorded_timestamp"] = data.get("timestamp")
# Stores the event in the nodestore
job["event"] = event = _get_event_instance(job["data"], project_id=job["project_id"])
# Overwrite the data key with the event's updated data
job["data"] = data = event.data.data
event._project_cache = project = projects[job["project_id"]]
job["category"] = index_data_category(data.get("type"), project.organization)
job["platform"] = event.platform
# Some of the data that are toplevel attributes are duplicated
# into tags (logger, level, environment, transaction). These are
# different from legacy attributes which are normalized into tags
# ahead of time (site, server_name).
setdefault_path(data, "tags", value=[])
set_tag(data, "level", level)
if logger_name:
set_tag(data, "logger", logger_name)
if environment:
set_tag(data, "environment", environment)
if transaction_name:
set_tag(data, "transaction", transaction_name)
job["received_timestamp"] = job["event"].data.get("received") or float(
job["event"].datetime.strftime("%s")
)
job["groups"] = []
def _set_project_platform_if_needed(project: Project, event: Event) -> None:
if project.platform:
return
if event.platform not in VALID_PLATFORMS or event.get_tag("sample_event") == "yes":
return
try:
updated = Project.objects.filter(
Q(id=project.id) & (Q(platform__isnull=True) | Q(platform=""))
).update(platform=event.platform)
if updated:
create_system_audit_entry(
organization=project.organization,
target_object=project.id,
event=audit_log.get_event_id("PROJECT_EDIT"),
data={**project.get_audit_log_data(), "platform": event.platform},
)
metrics.incr(
"issues.infer_project_platform.success",
sample_rate=1.0,
tags={
"reason": "new_project" if not project.first_event else "backfill",
"platform": event.platform,
},
)
except Exception:
logger.exception("Failed to infer and set project platform")
@sentry_sdk.tracing.trace
def _get_or_create_release_many(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
for job in jobs:
data = job["data"]
if not data.get("release"):
return
project = projects[job["project_id"]]
date = job["event"].datetime
try:
release = Release.get_or_create(
project=project,
version=data["release"],
date_added=date,
)
except ValidationError:
logger.exception(
"Failed creating Release due to ValidationError",
extra={"project": project, "version": data["release"]},
)
release = None
job["release"] = release
if not release:
return
# Don't allow a conflicting 'release' tag
pop_tag(data, "release")
set_tag(data, "sentry:release", release.version)
if data.get("dist"):
job["dist"] = release.add_dist(data["dist"], date)
# don't allow a conflicting 'dist' tag
pop_tag(job["data"], "dist")
set_tag(job["data"], "sentry:dist", job["dist"].name)
@sentry_sdk.tracing.trace
def _get_event_user_many(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
for job in jobs:
data = job["data"]
user = _get_event_user(projects[job["project_id"]], data)
if user:
pop_tag(data, "user")
set_tag(data, "sentry:user", user.tag_value)
job["user"] = user
@sentry_sdk.tracing.trace
def _derive_plugin_tags_many(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
# XXX: We ought to inline or remove this one for sure
plugins_for_projects = {p.id: plugins.for_project(p, version=None) for p in projects.values()}
for job in jobs:
for plugin in plugins_for_projects[job["project_id"]]:
added_tags = safe_execute(plugin.get_tags, job["event"])
if added_tags:
data = job["data"]
# plugins should not override user provided tags
for key, value in added_tags:
if get_tag(data, key) is None:
set_tag(data, key, value)
def _derive_interface_tags_many(jobs: Sequence[Job]) -> None:
# XXX: We ought to inline or remove this one for sure
for job in jobs:
data = job["data"]
for path, iface in job["event"].interfaces.items():
for k, v in iface.iter_tags():
set_tag(data, k, v)
# Get rid of ephemeral interface data
if iface.ephemeral:
data.pop(iface.path, None)
def _derive_client_error_sampling_rate(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
for job in jobs:
if job["project_id"] in options.get("issues.client_error_sampling.project_allowlist"):
try:
client_sample_rate = (
job["data"]
.get("contexts", {})
.get("error_sampling", {})
.get("client_sample_rate")
)
if client_sample_rate is not None and isinstance(client_sample_rate, (int, float)):
if 0 < client_sample_rate <= 1:
job["data"]["sample_rate"] = client_sample_rate
else:
logger.warning(
"Client sent invalid error sample_rate outside valid range (0-1)",
extra={
"project_id": job["project_id"],
"client_sample_rate": client_sample_rate,
},
)
metrics.incr("issues.client_error_sampling.invalid_range")
except (KeyError, TypeError, AttributeError):
pass
def _materialize_metadata_many(jobs: Sequence[Job]) -> None:
for job in jobs:
# we want to freeze not just the metadata and type in but also the
# derived attributes. The reason for this is that we push this
# data into kafka for snuba processing and our postprocessing
# picks up the data right from the snuba topic. For most usage
# however the data is dynamically overridden by Event.title and
# Event.location (See Event.as_dict)
#
# We also need to ensure the culprit is accurately reflected at
# the point of metadata materialization as we need to ensure that
# processing happens before.
data = job["data"]
event_type = get_event_type(data)
event_metadata = event_type.get_metadata(data)
job["event_metadata"] = dict(event_metadata)
data.update(materialize_metadata(data, event_type, event_metadata))
job["culprit"] = data["culprit"]
def _get_group_processing_kwargs(job: Job) -> dict[str, Any]:
"""
Pull together all the metadata used when creating a group or updating a group's metadata based
on a new event.
Note: Must be called *after* grouping has run, because the grouping process can affect the title
(by setting `main_exception_id` or by setting the title directly using a custom fingerprint
rule).
"""
_materialize_metadata_many([job])
event_data = job["event"].data
event_metadata = job["event_metadata"]
group_metadata = materialize_metadata(
event_data,
# In principle the group gets the same metadata as the event, so common
# attributes can be defined in eventtypes.
get_event_type(event_data),
event_metadata,
)
group_metadata["last_received"] = job["received_timestamp"]
kwargs = {
"data": group_metadata,
"platform": job["platform"],
"message": job["event"].search_message,
"logger": job["logger_name"],
"level": LOG_LEVELS_MAP.get(job["level"]),
"last_seen": job["event"].datetime,
"first_seen": job["event"].datetime,
"active_at": job["event"].datetime,
"culprit": job["culprit"],
}
if job["release"]:
kwargs["first_release"] = job["release"]
return kwargs
@sentry_sdk.tracing.trace
def _get_or_create_environment_many(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
for job in jobs:
job["environment"] = Environment.get_or_create(
project=projects[job["project_id"]], name=job["environment"]
)
@sentry_sdk.tracing.trace
def _get_or_create_group_environment_many(jobs: Sequence[Job]) -> None:
for job in jobs:
_get_or_create_group_environment(
job["environment"], job["release"], job["groups"], job["event"].datetime
)
def _get_or_create_group_environment(
environment: Environment,
release: Release | None,
groups: Sequence[GroupInfo],
event_datetime: datetime,
) -> None:
for group_info in groups:
group_info.is_new_group_environment = GroupEnvironment.get_or_create(
group_id=group_info.group.id,
environment_id=environment.id,
defaults={"first_release": release or None, "first_seen": event_datetime},
)[1]
def _get_or_create_release_associated_models(
jobs: Sequence[Job], projects: ProjectsMapping
) -> None:
# XXX: This is possibly unnecessarily detached from
# _get_or_create_release_many, but we do not want to destroy order of
# execution right now
for job in jobs:
release = job["release"]
if not release:
continue
project = projects[job["project_id"]]
environment = job["environment"]
date = job["event"].datetime
ReleaseEnvironment.get_or_create(
project=project, release=release, environment=environment, datetime=date
)
ReleaseProjectEnvironment.get_or_create(
project=project, release=release, environment=environment, datetime=date
)
def _increment_release_associated_counts_many(
jobs: Sequence[Job], projects: ProjectsMapping
) -> None:
for job in jobs:
_increment_release_associated_counts(
projects[job["project_id"]], job["environment"], job["release"], job["groups"]
)
def _increment_release_associated_counts(
project: Project,
environment: Environment,
release: Release | None,
groups: Sequence[GroupInfo],
) -> None:
if not release:
return
rp_new_groups = 0
rpe_new_groups = 0
for group_info in groups:
if group_info.is_new:
rp_new_groups += 1
if group_info.is_new_group_environment:
rpe_new_groups += 1
if rp_new_groups:
buffer_incr(
ReleaseProject,
{"new_groups": rp_new_groups},
{"release_id": release.id, "project_id": project.id},
)
if rpe_new_groups:
buffer_incr(
ReleaseProjectEnvironment,
{"new_issues_count": rpe_new_groups},
{
"project_id": project.id,
"release_id": release.id,
"environment_id": environment.id,
},
)
def _get_or_create_group_release_many(jobs: Sequence[Job]) -> None:
for job in jobs:
_get_or_create_group_release(
job["environment"], job["release"], job["event"], job["groups"]
)
def _get_or_create_group_release(
environment: Environment,
release: Release | None,
event: BaseEvent,
groups: Sequence[GroupInfo],
) -> None:
if release:
for group_info in groups:
group_info.group_release = GroupRelease.get_or_create(
group=group_info.group,
release=release,
environment=environment,
datetime=event.datetime,
)
def _tsdb_record_all_metrics(jobs: Sequence[Job]) -> None:
"""
Do all tsdb-related things for save_event in here s.t. we can potentially
put everything in a single redis pipeline someday.
"""
# XXX: validate whether anybody actually uses those metrics
for job in jobs:
incrs = []
frequencies = []
records = []
incrs.append((TSDBModel.project, job["project_id"]))
event = job["event"]
release = job["release"]
environment = job["environment"]
user = job["user"]
for group_info in job["groups"]:
incrs.append((TSDBModel.group, group_info.group.id))
frequencies.append(
(
TSDBModel.frequent_environments_by_group,
{group_info.group.id: {environment.id: 1}},
)
)
if group_info.group_release:
frequencies.append(
(
TSDBModel.frequent_releases_by_group,
{group_info.group.id: {group_info.group_release.id: 1}},
)
)
if user:
records.append(
(TSDBModel.users_affected_by_group, group_info.group.id, (user.tag_value,))
)
if release:
incrs.append((TSDBModel.release, release.id))
if user:
project_id = job["project_id"]
records.append((TSDBModel.users_affected_by_project, project_id, (user.tag_value,)))
if incrs:
tsdb.backend.incr_multi(incrs, timestamp=event.datetime, environment_id=environment.id)
if records:
tsdb.backend.record_multi(
records, timestamp=event.datetime, environment_id=environment.id
)
if frequencies:
tsdb.backend.record_frequency_multi(frequencies, timestamp=event.datetime)
def _nodestore_save_many(jobs: Sequence[Job], app_feature: str) -> None:
inserted_time = datetime.now(timezone.utc).timestamp()
for job in jobs:
# Write the event to Nodestore
subkeys = {}
event = job["event"]
# We only care about `unprocessed` for error events
if event.get_event_type() not in ("transaction", "generic") and job["groups"]:
unprocessed = event_processing_store.get(
cache_key_for_event({"project": event.project_id, "event_id": event.event_id}),
unprocessed=True,
)
if unprocessed is not None:
subkeys["unprocessed"] = unprocessed
if app_feature:
event_size = 0
event_metrics = job.get("event_metrics")
if event_metrics:
event_size = event_metrics.get("bytes.stored.event", 0)
record(
resource_id=settings.COGS_EVENT_STORE_LABEL,
app_feature=app_feature,
amount=event_size,
usage_type=UsageUnit.BYTES,
)
job["event"].data["nodestore_insert"] = inserted_time
job["event"].data.save(subkeys=subkeys)
def _eventstream_insert_many(jobs: Sequence[Job]) -> None:
for job in jobs:
if job["event"].project_id == settings.SENTRY_PROJECT:
metrics.incr(
"internal.captured.eventstream_insert",
tags={"event_type": job["event"].data.get("type") or "null"},
)
# XXX: Temporary hack so that we keep this group info working for error issues. We'll need
# to change the format of eventstream to be able to handle data for multiple groups
if not job["groups"]:
group_states: list[GroupState] | None = None
is_new = False
is_regression = False
is_new_group_environment = False
else:
# error issues
group_info = job["groups"][0]
is_new = group_info.is_new
is_regression = group_info.is_regression
is_new_group_environment = group_info.is_new_group_environment
# performance issues with potentially multiple groups to a transaction
group_states = [
{
"id": gi.group.id,
"is_new": gi.is_new,
"is_regression": gi.is_regression,
"is_new_group_environment": gi.is_new_group_environment,
}
for gi in job["groups"]
if gi is not None
]
# Skip running grouping for "transaction" events:
primary_hash = (
None if job["data"].get("type") == "transaction" else job["event"].get_primary_hash()
)
eventstream.backend.insert(
event=job["event"],
is_new=is_new,
is_regression=is_regression,
is_new_group_environment=is_new_group_environment,
primary_hash=primary_hash,
received_timestamp=job["received_timestamp"],
# We are choosing to skip consuming the event back
# in the eventstream if it's flagged as raw.
# This means that we want to publish the event
# through the event stream, but we don't care
# about post processing and handling the commit.
skip_consume=job.get("raw", False),
group_states=group_states,
)
def _track_outcome_accepted_many(jobs: Sequence[Job]) -> None:
for job in jobs:
event = job["event"]
outcome_aggregator.track_outcome_aggregated(
org_id=event.project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.ACCEPTED,
reason=None,
timestamp=to_datetime(job["start_time"]),
category=job["category"],
quantity=1,
)
def _get_event_instance(data: MutableMapping[str, Any], project_id: int) -> Event:
return eventstore.backend.create_event(
project_id=project_id,
event_id=data["event_id"],
group_id=None,
data=EventDict(data, skip_renormalization=True),
)
def _get_event_user(project: Project, data: Mapping[str, Any]) -> EventUser | None:
with metrics.timer("event_manager.get_event_user") as metrics_tags:
return _get_event_user_impl(project, data, metrics_tags)
def _get_event_user_impl(
project: Project, data: Mapping[str, Any], metrics_tags: MutableTags
) -> EventUser | None:
user_data = data.get("user")
if not user_data:
metrics_tags["event_has_user"] = "false"
return None
metrics_tags["event_has_user"] = "true"
ip_address = user_data.get("ip_address")
if ip_address:
try:
ipaddress.ip_address(str(ip_address))
except ValueError:
ip_address = None
euser = EventUser(
project_id=project.id,
user_ident=user_data.get("id"),
email=user_data.get("email"),
username=user_data.get("username"),
ip_address=ip_address,
name=user_data.get("name"),
)
return euser
def get_event_type(data: Mapping[str, Any]) -> EventType:
return eventtypes.get(data.get("type", "default"))()
EventMetadata = dict[str, Any]
def materialize_metadata(
data: Mapping[str, Any], event_type: EventType, event_metadata: dict[str, Any]
) -> EventMetadata:
"""Returns the materialized metadata to be merged with group or
event data. This currently produces the keys `type`, `culprit`,
`metadata`, `title` and `location`.
"""
# XXX(markus): Ideally this wouldn't take data or event_type, and instead
# calculate culprit + type from event_metadata
# Don't clobber existing metadata
try:
event_metadata.update(data.get("metadata", {}))
except TypeError:
# On a small handful of occasions, the line above has errored with `TypeError: 'NoneType'
# object is not iterable`, even though it's clear from looking at the local variable values
# in the event in Sentry that this shouldn't be possible.
logger.exception(
"Non-None being read as None",
extra={
"data is None": data is None,
"event_metadata is None": event_metadata is None,
"data.get": data.get,
"event_metadata.update": event_metadata.update,
"data.get('metadata', {})": data.get("metadata", {}),
},
)
return {
"type": event_type.key,
"culprit": get_culprit(data),
"metadata": event_metadata,
"title": event_type.get_title(event_metadata),
"location": event_type.get_location(event_metadata),
}
def get_culprit(data: Mapping[str, Any]) -> str:
"""Helper to calculate the default culprit"""
return str(
force_str(data.get("culprit") or data.get("transaction") or generate_culprit(data) or "")
)
@sentry_sdk.tracing.trace
def assign_event_to_group(
event: Event,
job: Job,
metric_tags: MutableTags,
) -> GroupInfo | None:
project = event.project
secondary = NULL_GROUPHASH_INFO
# Try looking for an existing group using the current grouping config
primary = get_hashes_and_grouphashes(job, run_primary_grouping, metric_tags)
# If we've found one, great. No need to do any more calculations
if primary.existing_grouphash:
group_info = handle_existing_grouphash(job, primary.existing_grouphash, primary.grouphashes)
result = "found_primary"
maybe_send_seer_for_new_model_training(event, primary.existing_grouphash, primary.variants)
# If we haven't, try again using the secondary config. (If there is no secondary config, or
# we're out of the transition period, we'll get back the empty `NULL_GROUPHASH_INFO`.)
else:
secondary = get_hashes_and_grouphashes(job, maybe_run_secondary_grouping, metric_tags)
all_grouphashes = primary.grouphashes + secondary.grouphashes
if secondary.existing_grouphash:
group_info = handle_existing_grouphash(
job, secondary.existing_grouphash, all_grouphashes
)
result = "found_secondary"
maybe_send_seer_for_new_model_training(
event, secondary.existing_grouphash, secondary.variants
)
# If we still haven't found a group, ask Seer for a match (if enabled for the event's platform)
else:
seer_matched_grouphash = maybe_check_seer_for_matching_grouphash(
event, primary.grouphashes[0], primary.variants, all_grouphashes
)
if seer_matched_grouphash:
group_info = handle_existing_grouphash(job, seer_matched_grouphash, all_grouphashes)
# If we *still* haven't found a group into which to put the event, create a new group
else:
group_info = create_group_with_grouphashes(job, all_grouphashes)
result = "no_match"
# From here on out, we're just doing housekeeping
# Background grouping is a way for us to get performance metrics for a new
# config without having it actually affect on how events are grouped. It runs
# either before or after the main grouping logic, depending on the option value.
maybe_run_background_grouping(project, job)
record_hash_calculation_metrics(
project, primary.config, primary.hashes, secondary.config, secondary.hashes, result
)
# Now that we've used the current and possibly secondary grouping config(s) to calculate the
# hashes, we're free to perform a config update if needed. Future events will use the new
# config, but will also be grandfathered into the current config for a set period, so as not to
# erroneously create new groups.
update_or_set_grouping_config_if_needed(project, "ingest")
# The only way there won't be group info is we matched to a performance, cron, replay, or
# other-non-error-type group because of a hash collision - exceedingly unlikely, and not
# something we've ever observed, but theoretically possible.
if group_info:
event.group = group_info.group
job["groups"] = [group_info]
return group_info
@sentry_sdk.tracing.trace
def get_hashes_and_grouphashes(
job: Job,
hash_calculation_function: Callable[
[Project, Job, MutableTags],
tuple[GroupingConfig, list[str], dict[str, BaseVariant]],
],
metric_tags: MutableTags,
) -> GroupHashInfo:
"""
Calculate hashes for the job's event, create corresponding `GroupHash` entries if they don't yet
exist, and determine if there's an existing group associated with any of the hashes.
If the callback determines that it doesn't need to run its calculations (as may be the case with
secondary grouping), this will return an empty list of grouphashes (so iteration won't break)
and Nones for everything else.
"""
event = job["event"]
project = event.project
# These will come back as Nones if the calculation decides it doesn't need to run
grouping_config, hashes, variants = hash_calculation_function(project, job, metric_tags)
if hashes:
grouphashes = get_or_create_grouphashes(
event, project, variants, hashes, grouping_config["id"]
)
existing_grouphash = find_grouphash_with_group(grouphashes)
return GroupHashInfo(grouping_config, variants, hashes, grouphashes, existing_grouphash)
else:
return NULL_GROUPHASH_INFO
@sentry_sdk.tracing.trace
def handle_existing_grouphash(
job: Job,
existing_grouphash: GroupHash,
all_grouphashes: list[GroupHash],
) -> GroupInfo | None:
"""
Handle the case where an incoming event matches an existing group, by assigning the event to the
group, updating the group metadata with data from the event, and linking any newly-calculated
grouphashes to the group.
"""
# There is a race condition here where two processes could "steal"
# hashes from each other. In practice this should not be user-visible
# as group creation is synchronized, meaning the only way hashes could
# jump between groups is if there were two processes that:
#
# 1) have BOTH found an existing group
# (otherwise at least one of them would be in the group creation
# codepath which has transaction isolation/acquires row locks)
# 2) AND are looking at the same set, or an overlapping set of hashes
# (otherwise they would not operate on the same rows)
# 3) yet somehow also retrieve different groups here
# (otherwise the update would not change anything)
#
# We think this is a very unlikely situation. A previous version of
# this function had races around group creation which made this race
# more user visible. For more context, see 84c6f75a and d0e22787, as
# well as GH-5085.
group = Group.objects.get(id=existing_grouphash.group_id)
# As far as we know this has never happened, but in theory at least, the error event hashing
# algorithm and other event hashing algorithms could come up with the same hash value in the
# same project and our hash could have matched to a non-error group. Just to be safe, we make
# sure that's not the case before proceeding.
if is_non_error_type_group(group):
return None
# There may still be hashes that we did not use to find an existing
# group. A classic example is when grouping makes changes to the
# app-hash (changes to in_app logic), but the system hash stays
# stable and is used to find an existing group. Associate any new
# hashes with the group such that event saving continues to be
# resilient against grouping algorithm changes.
add_group_id_to_grouphashes(group, all_grouphashes)
is_regression = _process_existing_aggregate(
group=group,
event=job["event"],
incoming_group_values=_get_group_processing_kwargs(job),
release=job["release"],
)
# Ensure the group has a DetectorGroup association for existing groups
ensure_association_with_detector(group)
return GroupInfo(group=group, is_new=False, is_regression=is_regression)
def create_group_with_grouphashes(job: Job, grouphashes: list[GroupHash]) -> GroupInfo | None:
"""
Create a group from the data in `job` and link it to the given grouphashes.
In very rare circumstances, we can end up in a race condition with another process trying to
create the same group. If the current process loses the race, this function will update the
group the other process just created, rather than creating a group itself.
"""
event = job["event"]
project = event.project
# If the load-shed killswitch is enabled, this will raise a `HashDiscarded` error to pop us out
# of this function all the way back to `save_error_events`, preventing group creation
check_for_group_creation_load_shed(project, event)
with (
sentry_sdk.start_span(op="event_manager.create_group_transaction") as span,
metrics.timer("event_manager.create_group_transaction") as metrics_timer_tags,
transaction.atomic(router.db_for_write(GroupHash)),
):
# These values will get overridden with whatever happens inside the lock if we do manage to
# acquire it, so it should only end up with `wait-for-lock` if we don't
span.set_tag("outcome", "wait_for_lock")
metrics_timer_tags["outcome"] = "wait_for_lock"
# If we're in this branch, we checked our grouphashes and didn't find one with a group
# attached. We thus want to create a new group, but we need to guard against another
# event with the same hash coming in before we're done here and also thinking it needs
# to create a new group. To prevent this, we're using double-checked locking
# (https://en.wikipedia.org/wiki/Double-checked_locking).
# First, try to lock the relevant rows in the `GroupHash` table. If another (identically
# hashed) event is also in the process of creating a group and has grabbed the lock
# before us, we'll block here until it's done. If not, we've now got the lock and other
# identically-hashed events will have to wait for us.
grouphashes = list(
GroupHash.objects.filter(
id__in=[h.id for h in grouphashes],
).select_for_update()
)
# Now check again to see if any of our grouphashes have a group. In the first race
# condition scenario above, we'll have been blocked long enough for the other event to
# have created the group and updated our grouphashes with a group id, which means this
# time, we'll find something.
existing_grouphash = find_grouphash_with_group(grouphashes)
# If we still haven't found a matching grouphash, we're now safe to go ahead and create
# the group.
if existing_grouphash is None:
span.set_tag("outcome", "new_group")
metrics_timer_tags["outcome"] = "new_group"
record_new_group_metrics(event)
group = _create_group(project, event, **_get_group_processing_kwargs(job))
associate_new_group_with_detector(group)
add_group_id_to_grouphashes(group, grouphashes)
return GroupInfo(group=group, is_new=True, is_regression=False)
# On the other hand, if we did in fact end up on the losing end of a race condition, treat
# this the same way we would if we'd found a grouphash to begin with (and never landed in
# this function at all)
else:
# TODO: should we be setting tags here, too?
return handle_existing_grouphash(job, existing_grouphash, grouphashes)
def _create_group(
project: Project,
event: Event,
*,
first_release: Release | None = None,
**group_creation_kwargs: Any,
) -> Group:
short_id = _get_next_short_id(project)
# it's possible the release was deleted between
# when we queried for the release and now, so
# make sure it still exists
group_creation_kwargs["first_release_id"] = (
Release.objects.filter(id=first_release.id).values_list("id", flat=True).first()
if first_release
else None
)
group_creation_kwargs["substatus"] = GroupSubStatus.NEW
group_data = group_creation_kwargs.pop("data", {})
# add sdk tag to metadata
group_data.setdefault("metadata", {}).update(sdk_metadata_from_event(event))
# add severity to metadata for alert filtering
severity: Mapping[str, Any] = {}
try:
group_type = group_creation_kwargs.get("type", None)
severity = _get_severity_metadata_for_group(event, project.id, group_type)
group_data["metadata"].update(severity)
except Exception as e:
logger.exception(
"Failed to get severity metadata for group",
repr(e),
extra={"event_id": event.event_id},
)
# the kwargs only include priority for non-error issue platform events, which takes precedence.
priority = group_creation_kwargs.get("priority", None)
if priority is None:
priority = _get_priority_for_group(severity, group_creation_kwargs)
group_creation_kwargs["priority"] = priority
group_data["metadata"]["initial_priority"] = priority
group_creation_kwargs["data"] = group_data
# Set initial times_seen
group_creation_kwargs["times_seen"] = 1
# If the project is in the allowlist, use the client sample rate to weight the times_seen
if project.id in options.get("issues.client_error_sampling.project_allowlist"):
group_creation_kwargs["times_seen"] = _get_error_weighted_times_seen(event)
try:
with transaction.atomic(router.db_for_write(Group)):
# This is the 99.999% path. The rest of the function is all to handle a very rare and
# very confounding bug which keeps projects from creating new groups.
group = Group.objects.create(
project=project,
short_id=short_id,
**group_creation_kwargs,
)
# Attempt to handle The Mysterious Case of the Stuck Project Counter
except IntegrityError as err:
if not _is_stuck_counter_error(err, project, short_id):
raise
# Note: There is a potential race condition here, if two events simultaneously try to fix
# the counter. Our hunch is that the only effect of that would be to over-increment, which
# shouldn't cause any problems. Nonetheless, if we run into trouble with this workaround,
# that's one thing to further investigate.
new_short_id = _handle_stuck_project_counter(project, short_id)
# Now that we've theoretically unstuck the counter, try again to create the group
try:
with transaction.atomic(router.db_for_write(Group)):
group = Group.objects.create(
project=project,
short_id=new_short_id,
**group_creation_kwargs,
)
except Exception:
# Maybe the stuck counter was hiding some other error
logger.exception("Error after unsticking project counter")
raise
create_open_period(group=group, start_time=group.first_seen)
return group
def _get_error_weighted_times_seen(event: BaseEvent) -> int:
if event.get_event_type() in ("error", "default"):
error_sample_rate = event.data.get("sample_rate")
if error_sample_rate is not None and error_sample_rate > 0:
return int(1 / error_sample_rate)
return 1
def _is_stuck_counter_error(err: Exception, project: Project, short_id: int) -> bool:
"""Decide if this is `UniqueViolation` error on the `Group` table's project and short id values."""
return isinstance(err.__cause__, psycopg2.errors.UniqueViolation) and any(
s in err.args[0]
for s in (
f"Key (project_id, short_id)=({project.id}, {short_id}) already exists.",
'duplicate key value violates unique constraint "sentry_groupedmessage_project_id_short_id',
)
)
def _handle_stuck_project_counter(project: Project, current_short_id: int) -> int:
"""
Sometimes, for reasons unknown, a project's `Counter` value falls behind its latest group `short_id` value.
When that happens, that incorrect counter value leads us to try to create groups with `short_id`s which
are already taken.
This handles that case by updating the counter's value to the latest group `short_id`, and then returns
the new value.
"""
new_short_id = current_short_id
# Ordinarily running max on this many rows would be prohibitively expensive, but a) this is
# a very rare case (< 20 ever that we know of), and b) project and short id are indexed
# together in order to enforce the unique constraint which got us here in the first place,
# so it's faster than it otherwise might be. We can time it just in case, though.
with metrics.timer("stuck_project.max_short_id_query"):
max_short_id_for_project = Group.objects.filter(project_id=project.id).aggregate(
Max("short_id")
)["short_id__max"]
# Add 1 because we're trying to mimic a value which would already have been incremented
correct_value = max_short_id_for_project + 1
if current_short_id < correct_value:
difference = correct_value - current_short_id
# `_get_next_short_id` corrects the `Counter` value before it returns the new short_id
new_short_id = _get_next_short_id(project, delta=difference)
logger.info(
"Fixed stuck counter value.", extra={"project": project.id, "difference": difference}
)
metrics.incr(
"stuck_project.fixed_counter", tags={"difference": difference}, sample_rate=1.0
)
return new_short_id
def _get_next_short_id(project: Project, delta: int = 1) -> int:
try:
short_id = project.next_short_id(delta=delta)
except OperationalError:
metrics.incr("next_short_id.timeout")
sentry_sdk.capture_message("short_id.timeout")
raise HashDiscarded("Timeout when getting next_short_id", reason="timeout")
return short_id
def _handle_regression(group: Group, event: BaseEvent, release: Release | None) -> bool | None:
if not group.is_resolved():
return None
# we only mark it as a regression if the event's release is newer than
# the release which we originally marked this as resolved
elif GroupResolution.has_resolution(group, release):
return None
elif has_pending_commit_resolution(group):
return None
if not plugin_is_regression(group, event):
return None
# we now think its a regression, rely on the database to validate that
# no one beat us to this
date = max(event.datetime, group.last_seen)
is_regression = bool(
Group.objects.filter(
id=group.id,
# ensure we can't update things if the status has been set to
# ignored
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
)
.exclude(
# add to the regression window to account for races here
active_at__gte=date
- timedelta(seconds=5)
)
.update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED,
substatus=GroupSubStatus.REGRESSED,
)
)
group.active_at = date
group.status = GroupStatus.UNRESOLVED
group.substatus = GroupSubStatus.REGRESSED
# groups may have been updated already from a separate event that groups to the same group
# only fire these signals the first time the row was actually updated
if is_regression:
issue_unresolved.send_robust(
project=group.project,
user=None,
group=group,
transition_type="automatic",
sender="handle_regression",
)
if not options.get("groups.enable-post-update-signal"):
post_save.send_robust(
sender=Group,
instance=group,
created=False,
update_fields=["last_seen", "active_at", "status", "substatus"],
)
follows_semver = False
resolved_in_activity = None
if is_regression and release:
resolution = None
# resolutions are only valid if the state of the group is still
# resolved -- if it were to change the resolution should get removed
try:
resolution = GroupResolution.objects.get(group=group)
except GroupResolution.DoesNotExist:
affected = False
else:
cursor = connection.cursor()
# delete() API does not return affected rows
cursor.execute("DELETE FROM sentry_groupresolution WHERE id = %s", [resolution.id])
affected = cursor.rowcount > 0
if affected and resolution:
# if we had to remove the GroupResolution (i.e. we beat the
# the queue to handling this) then we need to also record
# the corresponding event
try:
resolved_in_activity = Activity.objects.filter(
group=group,
type=ActivityType.SET_RESOLVED_IN_RELEASE.value,
ident=resolution.id,
).order_by("-datetime")[0]
except IndexError:
# XXX: handle missing data, as its not overly important
pass
else:
try:
# We should only update last activity version prior to the regression in the
# case where we have "Resolved in upcoming release" i.e. version == ""
# We also should not override the `data` attribute here because it might have
# a `current_release_version` for semver releases and we wouldn't want to
# lose that
if resolved_in_activity.data["version"] == "":
resolved_in_activity.update(
data={**resolved_in_activity.data, "version": release.version}
)
except KeyError:
# Safeguard in case there is no "version" key. However, should not happen
resolved_in_activity.update(data={"version": release.version})
# Record how we compared the two releases
follows_semver = follows_semver_versioning_scheme(
project_id=group.project.id,
org_id=group.organization.id,
release_version=release.version,
)
if is_regression:
activity_data: dict[str, str | bool] = {
"event_id": event.event_id,
"version": release.version if release else "",
}
if resolved_in_activity and release:
activity_data.update(
{
"follows_semver": follows_semver,
"resolved_in_version": resolved_in_activity.data.get(
"version", release.version
),
}
)
activity = Activity.objects.create_group_activity(
group,
ActivityType.SET_REGRESSION,
data=activity_data,
)
record_group_history(group, GroupHistoryStatus.REGRESSED, actor=None, release=release)
kick_off_status_syncs.apply_async(
kwargs={"project_id": group.project_id, "group_id": group.id}
)
create_open_period(group, activity.datetime)
return is_regression
def _is_placeholder_title(title):
return title in PLACEHOLDER_EVENT_TITLES
def _is_real_title(title):
return bool(title) and title not in PLACEHOLDER_EVENT_TITLES
def _get_updated_group_title(existing_container, incoming_container):
"""
Given either `group.data` or `group.data["metadata"]`, in both existing and incoming forms, pick
the correct title to use when updating the group. Uses the incoming title (or `None` if there
isn't one) except in the case where a placeholder title (`<unlabeled event>`, `<untitled>`,
etc) would be replacing a non-placeholder title (either `None` or a real title).
This stems from an incident during which we were interpreting error events as default-type
events and thereby overwriting good titles with placeholder ones and inserting placeholder
titles where there shouldn't have been a title at all. (The second case matters because
default-type and error-type events differ in where they include a `title` attribute, and we
count on the lack of a `title` attribute in certain cases as well as the presence of one.) This
prevents that from happening in the future and will delete errant placeholder titles by
overwriting them with `None`.
"""
existing_title = existing_container.get("title")
incoming_title = incoming_container.get("title")
return (
incoming_title
if (
# Real titles beat both placeholder and non-existent titles
_is_real_title(incoming_title)
or
# Conversely, placeholder titles lose to both real titles and lack of a title (the
# latter in order to fix the regression caused by error events being interpreted as
# default-type events)
_is_placeholder_title(existing_title)
)
else existing_title
)
def _process_existing_aggregate(
group: Group,
event: BaseEvent,
incoming_group_values: Mapping[str, Any],
release: Release | None,
) -> bool:
last_seen = max(event.datetime, group.last_seen)
updated_group_values: dict[str, Any] = {"last_seen": last_seen}
# Unclear why this is necessary, given that it's also in `updated_group_values`, but removing
# it causes unrelated tests to fail. Hard to say if that's the tests or the removal, though.
group.last_seen = updated_group_values["last_seen"]
if (
event.search_message
and event.search_message != group.message
and not _is_placeholder_title(event.search_message)
and event.get_event_type() != TransactionEvent.key
):
updated_group_values["message"] = event.search_message
if group.level != incoming_group_values["level"]:
updated_group_values["level"] = incoming_group_values["level"]
if group.culprit != incoming_group_values["culprit"]:
updated_group_values["culprit"] = incoming_group_values["culprit"]
# If the new event has a timestamp earlier than our current `fist_seen` value (which can happen,
# for example because of misaligned internal clocks on two different host machines or because of
# race conditions) then we want to use the current event's time
if group.first_seen > event.datetime:
updated_group_values["first_seen"] = event.datetime
is_regression = _handle_regression(group, event, release)
existing_data = group.data
existing_metadata = group.data.get("metadata", {})
incoming_data = incoming_group_values["data"]
incoming_metadata = incoming_group_values["data"].get("metadata", {})
# Merge old and new data/metadata, keeping the existing title if the incoming title is a
# placeholder (`<unlabeled event`, `<untitled>`, etc.) and the existing one isn't. See
# `_get_updated_group_title` docstring.
updated_group_values["data"] = {
**existing_data,
**incoming_data,
"title": _get_updated_group_title(existing_data, incoming_data),
}
updated_group_values["data"]["metadata"] = {
**existing_metadata,
**incoming_metadata,
"title": _get_updated_group_title(existing_metadata, incoming_metadata),
}
initial_priority = updated_group_values["data"]["metadata"].get("initial_priority")
if initial_priority is not None:
# cast to an int, as we don't want to pickle enums into task args.
updated_group_values["data"]["metadata"]["initial_priority"] = int(initial_priority)
# We pass `times_seen` separately from all of the other columns so that `buffer_inr` knows to
# increment rather than overwrite the existing value
times_seen = 1
if group.project_id in options.get("issues.client_error_sampling.project_allowlist"):
times_seen = _get_error_weighted_times_seen(event)
buffer_incr(Group, {"times_seen": times_seen}, {"id": group.id}, updated_group_values)
return bool(is_regression)
severity_connection_pool = connection_from_url(
settings.SEER_GROUPING_URL,
retries=settings.SEER_SEVERITY_RETRIES,
timeout=settings.SEER_SEVERITY_TIMEOUT, # Defaults to 300 milliseconds
)
def _get_severity_metadata_for_group(
event: Event, project_id: int, group_type: int | None
) -> Mapping[str, Any]:
"""
Returns severity metadata for an event if all of the following are true
- the feature flag is enabled
- the event platform supports severity
- the event group type is an error
Returns {} if conditions aren't met or on exception.
"""
from sentry.receivers.rules import PLATFORMS_WITH_PRIORITY_ALERTS
if killswitch_matches_context(
"issues.severity.skip-seer-requests", {"project_id": event.project_id}
):
logger.warning(
"get_severity_metadata_for_group.seer_killswitch_enabled",
extra={"event_id": event.event_id, "project_id": project_id},
)
metrics.incr("issues.severity.seer_killswitch_enabled")
return {}
seer_based_priority_enabled = features.has(
"organizations:seer-based-priority", event.project.organization, actor=None
)
if not seer_based_priority_enabled:
return {}
feature_enabled = features.has("projects:first-event-severity-calculation", event.project)
if not feature_enabled:
return {}
is_supported_platform = (
any(event.platform.startswith(platform) for platform in PLATFORMS_WITH_PRIORITY_ALERTS)
if event.platform
else False
)
if not is_supported_platform:
return {}
is_error_group = group_type == ErrorGroupType.type_id if group_type else True
if not is_error_group:
return {}
passthrough_data = options.get(
"issues.severity.seer-circuit-breaker-passthrough-limit",
CircuitBreakerPassthrough(limit=1, window=10),
)
if circuit_breaker_activated("sentry.seer.severity", passthrough_data=passthrough_data):
logger.warning(
"get_severity_metadata_for_group.circuit_breaker_activated",
extra={"event_id": event.event_id, "project_id": project_id},
)
return {}
from sentry import ratelimits as ratelimiter
ratelimit = options.get("issues.severity.seer-global-rate-limit")
# This is temporary until we update the option values to be a dict
if "limit" not in ratelimit or "window" not in ratelimit:
return {}
if ratelimiter.backend.is_limited(
"seer:severity-calculation:global-limit",
limit=ratelimit["limit"],
window=ratelimit["window"],
):
logger.warning(
"get_severity_metadata_for_group.rate_limited_globally",
extra={"event_id": event.event_id, "project_id": project_id},
)
metrics.incr("issues.severity.rate_limited_globally")
return {}
ratelimit = options.get("issues.severity.seer-project-rate-limit")
# This is temporary until we update the option values to be a dict
if "limit" not in ratelimit or "window" not in ratelimit:
return {}
if ratelimiter.backend.is_limited(
f"seer:severity-calculation:{project_id}",
limit=ratelimit["limit"],
window=ratelimit["window"],
):
logger.warning(
"get_severity_metadata_for_group.rate_limited_for_project",
extra={"event_id": event.event_id, "project_id": project_id},
)
metrics.incr("issues.severity.rate_limited_for_project", tags={"project_id": project_id})
return {}
try:
severity, reason = _get_severity_score(event)
return {
"severity": severity,
"severity_reason": reason,
}
except Exception as e:
logger.warning("Failed to calculate severity score for group", repr(e))
update_severity_error_count()
metrics.incr("issues.severity.error")
return {}
def _get_priority_for_group(severity: Mapping[str, Any], kwargs: Mapping[str, Any]) -> int:
"""
Returns priority for an event based on severity score and log level.
"""
try:
level = kwargs.get("level", None)
severity_score = severity.get("severity", None)
if level in [logging.INFO, logging.DEBUG]:
return PriorityLevel.LOW
elif level == logging.FATAL:
return PriorityLevel.HIGH
elif level == logging.WARNING:
if severity_score is None or severity_score < HIGH_SEVERITY_THRESHOLD:
return PriorityLevel.MEDIUM
return PriorityLevel.HIGH # severity_score >= HIGH_SEVERITY_THRESHOLD
elif level == logging.ERROR:
if severity_score is None or severity_score >= HIGH_SEVERITY_THRESHOLD:
return PriorityLevel.HIGH
return PriorityLevel.MEDIUM # severity_score < HIGH_SEVERITY_THRESHOLD
logger.warning("Unknown log level %s or severity score %s", level, severity_score)
return PriorityLevel.MEDIUM
except Exception as e:
logger.exception(
"Failed to calculate priority for group",
repr(e),
extra={
"severity": severity,
"kwargs": kwargs,
},
)
return PriorityLevel.MEDIUM
def update_severity_error_count(reset=False) -> None:
timeout = 60 * 60 # 1 hour
if reset:
cache.set(SEER_ERROR_COUNT_KEY, 0, timeout=timeout)
return
try:
cache.incr(SEER_ERROR_COUNT_KEY)
cache.touch(SEER_ERROR_COUNT_KEY, timeout=timeout)
except ValueError:
cache.set(SEER_ERROR_COUNT_KEY, 1, timeout=timeout)
def _get_severity_score(event: Event) -> tuple[float, str]:
# Short circuit the severity value if we know the event is fatal or info/debug
level = str(event.data.get("level", "error"))
if LOG_LEVELS_MAP[level] == logging.FATAL:
return 1.0, "log_level_fatal"
if LOG_LEVELS_MAP[level] <= logging.INFO:
return 0.0, "log_level_info"
op = "event_manager._get_severity_score"
logger_data = {"event_id": event.data["event_id"], "op": op}
severity = 1.0
reason = None
event_type = get_event_type(event.data)
metadata = event_type.get_metadata(event.data)
exception_type = metadata.get("type")
exception_value = metadata.get("value")
if exception_type:
title = exception_type
if exception_value:
title += f": {exception_value}"
# We truncate the title to 128 characters as any more than that is unlikely to be helpful
# and would slow down the model.
title = trim(title, 128)
else:
# Fall back to using just the title for events without an exception.
title = event.title
# If all we have is `<unlabeled event>` (or one of its equally unhelpful friends), bail
if title in PLACEHOLDER_EVENT_TITLES:
logger_data.update({"event_type": event_type.key, "title": title})
logger.warning(
"Unable to get severity score because of unusable `message` value '%s'",
title,
extra=logger_data,
)
return 0.0, "bad_title"
payload = {
"message": title,
"has_stacktrace": int(has_stacktrace(event.data)),
"handled": is_handled(event.data),
}
if options.get("processing.severity-backlog-test.timeout"):
payload["trigger_timeout"] = True
if options.get("processing.severity-backlog-test.error"):
payload["trigger_error"] = True
logger_data["payload"] = payload
with sentry_sdk.start_span(op=op):
try:
with metrics.timer(op):
timeout = options.get(
"issues.severity.seer-timeout",
settings.SEER_SEVERITY_TIMEOUT,
)
response = make_signed_seer_api_request(
severity_connection_pool,
"/v0/issues/severity-score",
body=orjson.dumps(payload),
timeout=timeout,
)
severity = orjson.loads(response.data).get("severity")
reason = "ml"
except MaxRetryError:
reason = "microservice_max_retry"
update_severity_error_count()
metrics.incr("issues.severity.error", tags={"reason": "max_retries"})
logger.exception("Seer severity microservice max retries exceeded")
except TimeoutError:
reason = "microservice_timeout"
update_severity_error_count()
metrics.incr("issues.severity.error", tags={"reason": "timeout"})
logger.exception("Seer severity microservice timeout")
except Exception:
reason = "microservice_error"
update_severity_error_count()
metrics.incr("issues.severity.error", tags={"reason": "unknown"})
logger.exception("Seer severity microservice error")
sentry_sdk.capture_exception()
else:
update_severity_error_count(reset=True)
return severity, reason
Attachment = CachedAttachment
@sentry_sdk.tracing.trace
def discard_event(job: Job, attachments: Sequence[Attachment]) -> None:
"""
Refunds consumed quotas for an event and its attachments.
For the event and each dropped attachment, an outcome
FILTERED(discarded-hash) is emitted.
:param job: The job context container.
:param attachments: The full list of attachments to filter.
"""
project = job["event"].project
quotas.backend.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=job["category"],
quantity=1,
)
track_outcome(
org_id=project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.DISCARDED_HASH,
timestamp=to_datetime(job["start_time"]),
event_id=job["event"].event_id,
category=job["category"],
)
attachment_quantity = 0
for attachment in attachments:
# Quotas are counted with at least ``1`` for attachments.
attachment_quantity += attachment.size or 1
track_outcome(
org_id=project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.DISCARDED_HASH,
timestamp=to_datetime(job["start_time"]),
event_id=job["event"].event_id,
category=DataCategory.ATTACHMENT,
quantity=attachment.size,
)
if attachment_quantity:
quotas.backend.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=DataCategory.ATTACHMENT,
quantity=attachment_quantity,
)
metrics.incr(
"events.discarded",
skip_internal=True,
tags={
"platform": job["platform"],
"sdk": normalized_sdk_tag_from_event(job["event"].data),
},
)
@sentry_sdk.tracing.trace
def filter_attachments_for_group(attachments: list[Attachment], job: Job) -> list[Attachment]:
"""
Removes crash reports exceeding the group-limit.
If the project or organization is configured to limit the amount of crash
reports per group, the number of stored crashes is limited. This requires
`event.group` to be set.
Emits one outcome per removed attachment.
:param attachments: The full list of attachments to filter.
:param job: The job context container.
"""
event = job["event"]
project = event.project
# The setting is both an organization and project setting. The project
# setting strictly overrides the organization setting, unless set to the
# default.
max_crashreports = get_max_crashreports(project, allow_none=True)
if max_crashreports is None:
max_crashreports = get_max_crashreports(project.organization)
# The number of crash reports is cached per group
crashreports_key = get_crashreport_key(event.group_id)
# Only fetch the number of stored crash reports if there is a crash report
# in the list of attachments. Otherwise, we won't require this number.
if any(attachment.type in CRASH_REPORT_TYPES for attachment in attachments):
cached_reports = get_stored_crashreports(crashreports_key, event, max_crashreports)
else:
cached_reports = 0
stored_reports = cached_reports
filtered = []
refund_quantity = 0
for attachment in attachments:
# If the attachment is a crash report (e.g. minidump), we need to honor
# the store_crash_reports setting. Otherwise, we assume that the client
# has already verified PII and just store the attachment.
if attachment.type in CRASH_REPORT_TYPES:
if crashreports_exceeded(stored_reports, max_crashreports):
# Indicate that the crash report has been removed due to a limit
# on the maximum number of crash reports. If this flag is True,
# it indicates that there are *other* events in the same group
# that store a crash report. This flag will therefore *not* be
# set if storage of crash reports is completely disabled.
if max_crashreports > 0:
job["data"]["metadata"]["stripped_crash"] = True
track_outcome(
org_id=event.project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.CRASH_REPORT_LIMIT,
timestamp=to_datetime(job["start_time"]),
event_id=event.event_id,
category=DataCategory.ATTACHMENT,
quantity=attachment.size,
)
# Quotas are counted with at least ``1`` for attachments.
refund_quantity += attachment.size or 1
# this instructs the attachment to be removed from storage:
attachment.rate_limited = True
continue
stored_reports += 1
filtered.append(attachment)
# Check if we have exceeded the stored crash reports count. If so, we
# persist the current maximum (not the actual number!) into the cache. Next
# time when loading from the cache, we will validate that this number has
# not changed, or otherwise re-fetch from the database.
if crashreports_exceeded(stored_reports, max_crashreports) and stored_reports > cached_reports:
cache.set(crashreports_key, max_crashreports, CRASH_REPORT_TIMEOUT)
if refund_quantity:
quotas.backend.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=DataCategory.ATTACHMENT,
quantity=refund_quantity,
)
return filtered
@sentry_sdk.tracing.trace
def save_attachment(
cache_key: str | None,
attachment: Attachment,
project: Project,
event_id: str,
key_id: int | None = None,
group_id: int | None = None,
start_time: float | None = None,
) -> None:
"""
Persists a cached event attachments into the file store.
Emits one outcome, either ACCEPTED on success or INVALID(missing_chunks) if
retrieving the attachment data fails.
:param cache_key: The cache key at which the attachment is stored for
debugging purposes.
:param attachment: The ``CachedAttachment`` instance to store.
:param project: The project model that this attachment belongs to.
:param event_id: Identifier of the event that this attachment belongs to.
The event does not have to be stored yet.
:param key_id: Optional identifier of the DSN that was used to ingest
the attachment.
:param group_id: Optional group identifier for the event. May be empty if
the event has not been stored yet, or if it is not
grouped.
:param start_time: UNIX Timestamp (float) when the attachment was ingested.
If missing, the current time is used.
"""
if start_time is not None:
timestamp = to_datetime(start_time)
else:
timestamp = datetime.now(timezone.utc)
try:
attachment.stored_id or attachment.load_data(project)
except MissingAttachmentChunks:
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=key_id,
outcome=Outcome.INVALID,
reason="missing_chunks",
timestamp=timestamp,
event_id=event_id,
category=DataCategory.ATTACHMENT,
)
logger.exception("Missing chunks for cache_key=%s", cache_key)
return
from sentry import ratelimits as ratelimiter
is_limited, _, _ = ratelimiter.backend.is_limited_with_value(
key="event_attachment.save_per_sec",
limit=options.get("sentry.save-event-attachments.project-per-sec-limit"),
project=project,
window=1,
)
rate_limit_tag = "per_sec"
if not is_limited:
is_limited, _, _ = ratelimiter.backend.is_limited_with_value(
key="event_attachment.save_5_min",
limit=options.get("sentry.save-event-attachments.project-per-5-minute-limit"),
project=project,
window=5 * 60,
)
rate_limit_tag = "per_five_min"
if is_limited:
metrics.incr(
"event_manager.attachments.rate_limited", tags={"rate_limit_type": rate_limit_tag}
)
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=key_id,
outcome=Outcome.RATE_LIMITED,
reason="rate_limited",
timestamp=timestamp,
event_id=event_id,
category=DataCategory.ATTACHMENT,
quantity=attachment.size or 1,
)
return
file = EventAttachment.putfile(project.id, attachment)
EventAttachment.objects.create(
# lookup:
project_id=project.id,
group_id=group_id,
event_id=event_id,
# metadata:
type=attachment.type,
name=attachment.name,
content_type=file.content_type,
size=file.size,
sha1=file.sha1,
# storage:
blob_path=file.blob_path,
)
track_outcome(
org_id=project.organization_id,
project_id=project.id,
key_id=key_id,
outcome=Outcome.ACCEPTED,
reason=None,
timestamp=timestamp,
event_id=event_id,
category=DataCategory.ATTACHMENT,
quantity=attachment.size or 1,
)
def save_attachments(cache_key: str | None, attachments: list[Attachment], job: Job) -> None:
"""
Persists cached event attachments into the file store.
Emits one outcome per attachment, either ACCEPTED on success or
INVALID(missing_chunks) if retrieving the attachment fails.
:param cache_key: The cache key at which the attachment is stored for
debugging purposes.
:param attachments: A filtered list of attachments to save.
:param job: The job context container.
"""
event = job["event"]
for attachment in attachments:
save_attachment(
cache_key,
attachment,
event.project,
event.event_id,
key_id=job["key_id"],
group_id=event.group_id,
start_time=job["start_time"],
)
@sentry_sdk.tracing.trace
def _materialize_event_metrics(jobs: Sequence[Job]) -> None:
for job in jobs:
# Ensure the _metrics key exists. This is usually created during
# and prefilled with ingestion sizes.
event_metrics = job["event"].data.get("_metrics") or {}
job["event"].data["_metrics"] = event_metrics
# Capture the actual size that goes into node store.
event_metrics["bytes.stored.event"] = len(
orjson.dumps(dict(job["event"].data.items())).decode()
)
for metric_name in ("flag.processing.error", "flag.processing.fatal"):
if event_metrics.get(metric_name):
metrics.incr(f"event_manager.save.event_metrics.{metric_name}")
job["event_metrics"] = event_metrics
@sentry_sdk.tracing.trace
def _calculate_span_grouping(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
for job in jobs:
# Make sure this snippet doesn't crash ingestion
# as the feature is under development.
try:
event = job["event"]
groupings = event.get_span_groupings()
groupings.write_to_event(event.data)
metrics.distribution("save_event.transaction.span_count", len(groupings.results))
unique_default_hashes = set(groupings.results.values())
metrics.incr(
"save_event.transaction.span_group_count.default",
amount=len(unique_default_hashes),
tags={
"platform": job["platform"] or "unknown",
"sdk": normalized_sdk_tag_from_event(event.data),
},
)
except Exception:
sentry_sdk.capture_exception()
@sentry_sdk.tracing.trace
def _detect_performance_problems(jobs: Sequence[Job], projects: ProjectsMapping) -> None:
for job in jobs:
if job["data"].get("_performance_issues_spans"):
job["performance_problems"] = []
else:
job["performance_problems"] = detect_performance_problems(
job["data"], projects[job["project_id"]]
)
INSIGHT_MODULE_TO_PROJECT_FLAG_NAME: dict[InsightModules, str] = {
InsightModules.HTTP: "has_insights_http",
InsightModules.DB: "has_insights_db",
InsightModules.ASSETS: "has_insights_assets",
InsightModules.APP_START: "has_insights_app_start",
InsightModules.SCREEN_LOAD: "has_insights_screen_load",
InsightModules.VITAL: "has_insights_vitals",
InsightModules.CACHE: "has_insights_caches",
InsightModules.QUEUE: "has_insights_queues",
InsightModules.AGENTS: "has_insights_agent_monitoring",
InsightModules.MCP: "has_insights_mcp",
}
@sentry_sdk.tracing.trace
def _record_transaction_info(
jobs: Sequence[Job], projects: ProjectsMapping, skip_send_first_transaction: bool
) -> None:
for job in jobs:
try:
event = job["event"]
project = event.project
with sentry_sdk.start_span(op="event_manager.record_transaction_name_for_clustering"):
record_transaction_name_for_clustering(project, event.data)
record_event_processed(project, event)
if not skip_send_first_transaction:
set_project_flag_and_signal(
project,
"has_transactions",
first_transaction_received,
event=event,
)
spans = [FilterSpan.from_span_v1(span) for span in job["data"]["spans"]]
for module in insights_modules(spans):
set_project_flag_and_signal(
project,
INSIGHT_MODULE_TO_PROJECT_FLAG_NAME[module],
first_insight_span_received,
module=module,
)
if job["release"]:
environment = job["data"].get("environment") or None # coorce "" to None
record_latest_release(project, job["release"], environment)
record_release_received(project, job["release"].version)
except Exception:
sentry_sdk.capture_exception()
| EventManager |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py | {
"start": 39383,
"end": 41134
} | class ____(test.TestCase):
def _compare(self, x, reduction_axes, keepdims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keepdims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keepdims)
with self.cached_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = math_ops.reduce_all(x, reduction_axes, keepdims)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.session():
v = math_ops.reduce_all([True, True],
constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, True)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
| AllReductionTest |
python | lepture__authlib | tests/flask/test_oauth2/test_code_challenge.py | {
"start": 548,
"end": 824
} | class ____(CodeGrantMixin, grants.AuthorizationCodeGrant):
TOKEN_ENDPOINT_AUTH_METHODS = ["client_secret_basic", "client_secret_post", "none"]
def save_authorization_code(self, code, request):
return save_authorization_code(code, request)
| AuthorizationCodeGrant |
python | openai__openai-python | src/openai/types/model_deleted.py | {
"start": 149,
"end": 228
} | class ____(BaseModel):
id: str
deleted: bool
object: str
| ModelDeleted |
python | tensorflow__tensorflow | tensorflow/python/ops/variables.py | {
"start": 5160,
"end": 6850
} | class ____(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
ONLY_FIRST_TOWER = 3 # DEPRECATED
def __hash__(self):
return hash(self.value)
# LINT.ThenChange(//tensorflow/core/framework/variable.proto)
#
# Note that we are currently relying on the integer values of the Python enums
# matching the integer values of the proto enums.
if VariableAggregationV2.__doc__ is not None:
VariableAggregation.__doc__ = (
VariableAggregationV2.__doc__
+ "* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\n "
)
def validate_synchronization_aggregation_trainable(synchronization, aggregation,
trainable, name):
"""Given user-provided variable properties, sets defaults and validates."""
if aggregation is None:
aggregation = VariableAggregation.NONE
else:
if not isinstance(aggregation,
(VariableAggregation, VariableAggregationV2)):
try:
aggregation = VariableAggregationV2(aggregation)
except ValueError:
raise ValueError(
"Invalid variable aggregation mode: {} for variable: {}".format(
aggregation, name))
if synchronization is None:
synchronization = VariableSynchronization.AUTO
else:
try:
synchronization = VariableSynchronization(synchronization)
except ValueError:
raise ValueError(
"Invalid variable synchronization mode: {} for variable: {}".format(
synchronization, name))
if trainable is None:
trainable = synchronization != VariableSynchronization.ON_READ
return synchronization, aggregation, trainable
| VariableAggregation |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/issue_9340.py | {
"start": 434,
"end": 469
} | class ____(DeclarativeBase): ...
| Base |
python | fastai__fastai | fastai/vision/gan.py | {
"start": 5873,
"end": 7133
} | class ____(GANModule):
"Wrapper around `crit_loss_func` and `gen_loss_func`"
def __init__(self,
gen_loss_func:Callable, # Generator loss function
crit_loss_func:Callable, # Critic loss function
gan_model:GANModule # The GAN model
):
super().__init__()
store_attr('gen_loss_func,crit_loss_func,gan_model')
def generator(self,
output, # Generator outputs
target # Real images
):
"Evaluate the `output` with the critic then uses `self.gen_loss_func` to evaluate how well the critic was fooled by `output`"
fake_pred = self.gan_model.critic(output)
self.gen_loss = self.gen_loss_func(fake_pred, output, target)
return self.gen_loss
def critic(self,
real_pred, # Critic predictions for real images
input # Input noise vector to pass into generator
):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.crit_loss_func`."
fake = self.gan_model.generator(input).requires_grad_(False)
fake_pred = self.gan_model.critic(fake)
self.crit_loss = self.crit_loss_func(real_pred, fake_pred)
return self.crit_loss
# %% ../../nbs/24_vision.gan.ipynb 24
| GANLoss |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 24071,
"end": 29766
} | class ____(torch.nn.Module):
def forward(self, primals_1: "f32[8]"):
partitioned_fw_subgraph_0_0 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_4 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_0, 'partitioned_fw_subgraph_0_0', primals_1); partitioned_fw_subgraph_0_0 = None
getitem_7: "b8[8]" = invoke_subgraph_4[2]
getitem_6: "f32[8]" = invoke_subgraph_4[1]
getitem: "f32[8]" = invoke_subgraph_4[0]; invoke_subgraph_4 = None
partitioned_fw_subgraph_1_0 = self.partitioned_fw_subgraph_1_0
invoke_subgraph_6 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_1_0, 'partitioned_fw_subgraph_1_0', primals_1); partitioned_fw_subgraph_1_0 = primals_1 = None
getitem_8: "f32[8]" = invoke_subgraph_6[1]
getitem_1: "f32[8]" = invoke_subgraph_6[0]; invoke_subgraph_6 = None
add: "f32[8]" = torch.ops.aten.add.Tensor(getitem, getitem_1); getitem = getitem_1 = None
return (add, getitem_7, getitem_6, getitem_8)
class partitioned_fw_subgraph_0_0(torch.nn.Module):
def forward(self, primals_0: "f32[8]"):
sin: "f32[8]" = torch.ops.aten.sin.default(primals_0)
inductor_seeds_default: "i64[1]" = torch.ops.prims.inductor_seeds.default(1, device(type='cpu'))
inductor_lookup_seed_default: "i64[]" = torch.ops.prims.inductor_lookup_seed.default(inductor_seeds_default, 0); inductor_seeds_default = None
inductor_random_default: "f32[8]" = torch.ops.prims.inductor_random.default([8], inductor_lookup_seed_default, 'rand'); inductor_lookup_seed_default = None
gt: "b8[8]" = torch.ops.aten.gt.Scalar(inductor_random_default, 0.5); inductor_random_default = None
mul: "f32[8]" = torch.ops.aten.mul.Tensor(gt, sin); sin = None
mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(mul, 2.0); mul = None
return (mul_1, primals_0, gt)
class partitioned_fw_subgraph_1_0(torch.nn.Module):
def forward(self, primals_0: "f32[8]"):
sin: "f32[8]" = torch.ops.aten.sin.default(primals_0)
return (sin, primals_0)
""",
ignore_empty_lines=True,
)
@inductor_config.patch("fx_graph_cache", False)
def test_dropout_checks_joint_graph_inference(self):
# Checks that joint graph results in inductor seeds for just the inference graph
@nested_compile_region
def gn(x):
return torch.nn.functional.dropout(torch.sin(x), p=0.5)
def fn(x):
return gn(x)
backend = InductorAndRecordGraphs()
x = torch.randn(8, requires_grad=False)
torch.compile(fn, backend=backend, fullgraph=True)(x)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(
backend.inductor_graphs[0].print_readable(print_output=False)
),
"""\
class <lambda>(torch.nn.Module):
def forward(self, arg0_1: "f32[8]"):
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', arg0_1); repeated_subgraph0 = arg0_1 = None
getitem: "f32[8]" = invoke_subgraph[0]; invoke_subgraph = None
return (getitem,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[8]"):
inductor_seeds_default: "i64[1]" = torch.ops.prims.inductor_seeds.default(1, device(type='cpu'))
inductor_lookup_seed_default: "i64[]" = torch.ops.prims.inductor_lookup_seed.default(inductor_seeds_default, 0); inductor_seeds_default = None
inductor_random_default: "f32[8]" = torch.ops.prims.inductor_random.default([8], inductor_lookup_seed_default, 'rand'); inductor_lookup_seed_default = None
gt: "b8[8]" = torch.ops.aten.gt.Scalar(inductor_random_default, 0.5); inductor_random_default = None
sin: "f32[8]" = torch.ops.aten.sin.default(arg0_1); arg0_1 = None
mul: "f32[8]" = torch.ops.aten.mul.Tensor(gt, sin); gt = sin = None
mul_1: "f32[8]" = torch.ops.aten.mul.Tensor(mul, 2.0); mul = None
return (mul_1,)
""",
ignore_empty_lines=True,
)
def test_dedupe(self):
@nested_compile_region
def gn(x, y):
return torch.mul(x, y)
def fn(x, y):
a = gn(x, y)
return gn(a, y)
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ref = fn(x, y)
x_clone = x.detach().clone().requires_grad_(True)
y_clone = y.detach().clone().requires_grad_(True)
backend = AotEagerAndRecordGraphs()
res = torch.compile(fn, backend=backend, fullgraph=True)(x_clone, y_clone)
# Run backward
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
self.assertEqual(y.grad, y_clone.grad)
# Check that the Dynamo and AOT graphs have just one subgraph module
self.assertEqual(len(backend.graphs), 1)
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
self.count_unique_get_attr_nodes(backend.graphs[0], [], 1)
self.count_unique_get_attr_nodes(backend.fw_graphs[0], [], 1)
self.count_unique_get_attr_nodes(backend.bw_graphs[0], [], 1)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 46391,
"end": 53448
} | class ____(nn.Module):
"""
Convolutional backbone using the modeling_d_fine_resnet.py.
nn.BatchNorm2d layers are replaced by DFineFrozenBatchNorm2d as defined above.
https://github.com/lyuwenyu/RT-DETR/blob/main/DFine_pytorch/src/nn/backbone/presnet.py#L142
"""
def __init__(self, config):
super().__init__()
backbone = load_backbone(config)
if config.freeze_backbone_batch_norms:
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = self.model.channels
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
def get_contrastive_denoising_training_group(
targets,
num_classes,
num_queries,
class_embed,
num_denoising_queries=100,
label_noise_ratio=0.5,
box_noise_scale=1.0,
):
"""
Creates a contrastive denoising training group using ground-truth samples. It adds noise to labels and boxes.
Args:
targets (`list[dict]`):
The target objects, each containing 'class_labels' and 'boxes' for objects in an image.
num_classes (`int`):
Total number of classes in the dataset.
num_queries (`int`):
Number of query slots in the transformer.
class_embed (`callable`):
A function or a model layer to embed class labels.
num_denoising_queries (`int`, *optional*, defaults to 100):
Number of denoising queries.
label_noise_ratio (`float`, *optional*, defaults to 0.5):
Ratio of noise applied to labels.
box_noise_scale (`float`, *optional*, defaults to 1.0):
Scale of noise applied to bounding boxes.
Returns:
`tuple` comprising various elements:
- **input_query_class** (`torch.FloatTensor`) --
Class queries with applied label noise.
- **input_query_bbox** (`torch.FloatTensor`) --
Bounding box queries with applied box noise.
- **attn_mask** (`torch.FloatTensor`) --
Attention mask for separating denoising and reconstruction queries.
- **denoising_meta_values** (`dict`) --
Metadata including denoising positive indices, number of groups, and split sizes.
"""
if num_denoising_queries <= 0:
return None, None, None, None
num_ground_truths = [len(t["class_labels"]) for t in targets]
device = targets[0]["class_labels"].device
max_gt_num = max(num_ground_truths)
if max_gt_num == 0:
return None, None, None, None
num_groups_denoising_queries = num_denoising_queries // max_gt_num
num_groups_denoising_queries = 1 if num_groups_denoising_queries == 0 else num_groups_denoising_queries
# pad gt to max_num of a batch
batch_size = len(num_ground_truths)
input_query_class = torch.full([batch_size, max_gt_num], num_classes, dtype=torch.int32, device=device)
input_query_bbox = torch.zeros([batch_size, max_gt_num, 4], device=device)
pad_gt_mask = torch.zeros([batch_size, max_gt_num], dtype=torch.bool, device=device)
for i in range(batch_size):
num_gt = num_ground_truths[i]
if num_gt > 0:
input_query_class[i, :num_gt] = targets[i]["class_labels"]
input_query_bbox[i, :num_gt] = targets[i]["boxes"]
pad_gt_mask[i, :num_gt] = 1
# each group has positive and negative queries.
input_query_class = input_query_class.tile([1, 2 * num_groups_denoising_queries])
input_query_bbox = input_query_bbox.tile([1, 2 * num_groups_denoising_queries, 1])
pad_gt_mask = pad_gt_mask.tile([1, 2 * num_groups_denoising_queries])
# positive and negative mask
negative_gt_mask = torch.zeros([batch_size, max_gt_num * 2, 1], device=device)
negative_gt_mask[:, max_gt_num:] = 1
negative_gt_mask = negative_gt_mask.tile([1, num_groups_denoising_queries, 1])
positive_gt_mask = 1 - negative_gt_mask
# contrastive denoising training positive index
positive_gt_mask = positive_gt_mask.squeeze(-1) * pad_gt_mask
denoise_positive_idx = torch.nonzero(positive_gt_mask)[:, 1]
denoise_positive_idx = torch.split(
denoise_positive_idx, [n * num_groups_denoising_queries for n in num_ground_truths]
)
# total denoising queries
num_denoising_queries = torch_int(max_gt_num * 2 * num_groups_denoising_queries)
if label_noise_ratio > 0:
mask = torch.rand_like(input_query_class, dtype=torch.float) < (label_noise_ratio * 0.5)
# randomly put a new one here
new_label = torch.randint_like(mask, 0, num_classes, dtype=input_query_class.dtype)
input_query_class = torch.where(mask & pad_gt_mask, new_label, input_query_class)
if box_noise_scale > 0:
known_bbox = center_to_corners_format(input_query_bbox)
diff = torch.tile(input_query_bbox[..., 2:] * 0.5, [1, 1, 2]) * box_noise_scale
rand_sign = torch.randint_like(input_query_bbox, 0, 2) * 2.0 - 1.0
rand_part = torch.rand_like(input_query_bbox)
rand_part = (rand_part + 1.0) * negative_gt_mask + rand_part * (1 - negative_gt_mask)
rand_part *= rand_sign
known_bbox += rand_part * diff
known_bbox.clip_(min=0.0, max=1.0)
input_query_bbox = corners_to_center_format(known_bbox)
input_query_bbox = inverse_sigmoid(input_query_bbox)
input_query_class = class_embed(input_query_class)
target_size = num_denoising_queries + num_queries
attn_mask = torch.full([target_size, target_size], 0, dtype=torch.float, device=device)
# match query cannot see the reconstruction
attn_mask[num_denoising_queries:, :num_denoising_queries] = -torch.inf
# reconstructions cannot see each other
for i in range(num_groups_denoising_queries):
idx_block_start = max_gt_num * 2 * i
idx_block_end = max_gt_num * 2 * (i + 1)
attn_mask[idx_block_start:idx_block_end, :idx_block_start] = -torch.inf
attn_mask[idx_block_start:idx_block_end, idx_block_end:num_denoising_queries] = -torch.inf
denoising_meta_values = {
"dn_positive_idx": denoise_positive_idx,
"dn_num_group": num_groups_denoising_queries,
"dn_num_split": [num_denoising_queries, num_queries],
}
return input_query_class, input_query_bbox, attn_mask, denoising_meta_values
@auto_docstring(
custom_intro="""
RT-DETR Model (consisting of a backbone and encoder-decoder) outputting raw hidden states without any head on top.
"""
)
| DFineConvEncoder |
python | kubernetes-client__python | kubernetes/client/models/v1_service_cidr_list.py | {
"start": 383,
"end": 6926
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ServiceCIDR]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ServiceCIDRList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ServiceCIDRList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ServiceCIDRList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ServiceCIDRList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ServiceCIDRList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ServiceCIDRList. # noqa: E501
items is the list of ServiceCIDRs. # noqa: E501
:return: The items of this V1ServiceCIDRList. # noqa: E501
:rtype: list[V1ServiceCIDR]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ServiceCIDRList.
items is the list of ServiceCIDRs. # noqa: E501
:param items: The items of this V1ServiceCIDRList. # noqa: E501
:type: list[V1ServiceCIDR]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ServiceCIDRList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ServiceCIDRList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ServiceCIDRList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ServiceCIDRList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ServiceCIDRList. # noqa: E501
:return: The metadata of this V1ServiceCIDRList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ServiceCIDRList.
:param metadata: The metadata of this V1ServiceCIDRList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServiceCIDRList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServiceCIDRList):
return True
return self.to_dict() != other.to_dict()
| V1ServiceCIDRList |
python | sympy__sympy | sympy/physics/mechanics/tests/test_actuator.py | {
"start": 650,
"end": 4834
} | class ____:
@pytest.fixture(autouse=True)
def _linear_pathway_fixture(self):
self.force = Symbol('F')
self.pA = Point('pA')
self.pB = Point('pB')
self.pathway = LinearPathway(self.pA, self.pB)
self.q1 = dynamicsymbols('q1')
self.q2 = dynamicsymbols('q2')
self.q3 = dynamicsymbols('q3')
self.q1d = dynamicsymbols('q1', 1)
self.q2d = dynamicsymbols('q2', 1)
self.q3d = dynamicsymbols('q3', 1)
self.N = ReferenceFrame('N')
def test_is_actuator_base_subclass(self):
assert issubclass(ForceActuator, ActuatorBase)
@pytest.mark.parametrize(
'force, expected_force',
[
(1, S.One),
(S.One, S.One),
(Symbol('F'), Symbol('F')),
(dynamicsymbols('F'), dynamicsymbols('F')),
(Symbol('F')**2 + Symbol('F'), Symbol('F')**2 + Symbol('F')),
]
)
def test_valid_constructor_force(self, force, expected_force):
instance = ForceActuator(force, self.pathway)
assert isinstance(instance, ForceActuator)
assert hasattr(instance, 'force')
assert isinstance(instance.force, ExprType)
assert instance.force == expected_force
@pytest.mark.parametrize('force', [None, 'F'])
def test_invalid_constructor_force_not_sympifyable(self, force):
with pytest.raises(SympifyError):
_ = ForceActuator(force, self.pathway)
@pytest.mark.parametrize(
'pathway',
[
LinearPathway(Point('pA'), Point('pB')),
]
)
def test_valid_constructor_pathway(self, pathway):
instance = ForceActuator(self.force, pathway)
assert isinstance(instance, ForceActuator)
assert hasattr(instance, 'pathway')
assert isinstance(instance.pathway, LinearPathway)
assert instance.pathway == pathway
def test_invalid_constructor_pathway_not_pathway_base(self):
with pytest.raises(TypeError):
_ = ForceActuator(self.force, None)
@pytest.mark.parametrize(
'property_name, fixture_attr_name',
[
('force', 'force'),
('pathway', 'pathway'),
]
)
def test_properties_are_immutable(self, property_name, fixture_attr_name):
instance = ForceActuator(self.force, self.pathway)
value = getattr(self, fixture_attr_name)
with pytest.raises(AttributeError):
setattr(instance, property_name, value)
def test_repr(self):
actuator = ForceActuator(self.force, self.pathway)
expected = "ForceActuator(F, LinearPathway(pA, pB))"
assert repr(actuator) == expected
def test_to_loads_static_pathway(self):
self.pB.set_pos(self.pA, 2*self.N.x)
actuator = ForceActuator(self.force, self.pathway)
expected = [
(self.pA, - self.force*self.N.x),
(self.pB, self.force*self.N.x),
]
assert actuator.to_loads() == expected
def test_to_loads_2D_pathway(self):
self.pB.set_pos(self.pA, 2*self.q1*self.N.x)
actuator = ForceActuator(self.force, self.pathway)
expected = [
(self.pA, - self.force*(self.q1/sqrt(self.q1**2))*self.N.x),
(self.pB, self.force*(self.q1/sqrt(self.q1**2))*self.N.x),
]
assert actuator.to_loads() == expected
def test_to_loads_3D_pathway(self):
self.pB.set_pos(
self.pA,
self.q1*self.N.x - self.q2*self.N.y + 2*self.q3*self.N.z,
)
actuator = ForceActuator(self.force, self.pathway)
length = sqrt(self.q1**2 + self.q2**2 + 4*self.q3**2)
pO_force = (
- self.force*self.q1*self.N.x/length
+ self.force*self.q2*self.N.y/length
- 2*self.force*self.q3*self.N.z/length
)
pI_force = (
self.force*self.q1*self.N.x/length
- self.force*self.q2*self.N.y/length
+ 2*self.force*self.q3*self.N.z/length
)
expected = [
(self.pA, pO_force),
(self.pB, pI_force),
]
assert actuator.to_loads() == expected
| TestForceActuator |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 28638,
"end": 29941
} | class ____(util.MdCase):
"""Test snippet URL cases."""
extension = [
'pymdownx.snippets', 'pymdownx.superfences'
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'url_download': True,
'dedent_subsections': True
}
}
@patch('urllib.request.urlopen')
def test_url_sections(self, mock_urlopen):
"""Test specifying a section in a URL."""
with open('tests/test_extensions/_snippets/indented.txt', 'rb') as f:
content = f.read()
length = len(content)
cm = MagicMock()
cm.status = 200
cm.code = 200
cm.read.return_value = content
cm.headers = {'content-length': length}
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
self.check_markdown(
R'''
```
--8<-- "https://test.com/myfile.md:py-section"
```
''',
'''
<div class="highlight"><pre><span></span><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre></div>
''',
True
)
| TestURLDedentSnippets |
python | pytest-dev__pytest-cov | src/pytest_cov/plugin.py | {
"start": 6915,
"end": 16205
} | class ____:
"""Use coverage package to produce code coverage reports.
Delegates all work to a particular implementation based on whether
this test process is centralised, a distributed master or a
distributed worker.
"""
def __init__(self, options: argparse.Namespace, pluginmanager, start=True, no_cov_should_warn=False):
"""Creates a coverage pytest plugin.
We read the rc file that coverage uses to get the data file
name. This is needed since we give coverage through it's API
the data file name.
"""
# Our implementation is unknown at this time.
self.pid = None
self.cov_controller = None
self.cov_report = StringIO()
self.cov_total = None
self.failed = False
self._started = False
self._start_path = None
self._disabled = False
self.options = options
self._wrote_heading = False
is_dist = getattr(options, 'numprocesses', False) or getattr(options, 'distload', False) or getattr(options, 'dist', 'no') != 'no'
if getattr(options, 'no_cov', False):
self._disabled = True
return
if not self.options.cov_report:
self.options.cov_report = ['term']
elif len(self.options.cov_report) == 1 and '' in self.options.cov_report:
self.options.cov_report = {}
self.options.cov_source = _prepare_cov_source(self.options.cov_source)
# import engine lazily here to avoid importing
# it for unit tests that don't need it
from . import engine
if is_dist and start:
self.start(engine.DistMaster)
elif start:
self.start(engine.Central)
# worker is started in pytest hook
def start(self, controller_cls: type['CovController'], config=None, nodeid=None):
if config is None:
# fake config option for engine
class Config:
option = self.options
config = Config()
self.cov_controller = controller_cls(self.options, config, nodeid)
self.cov_controller.start()
self._started = True
self._start_path = Path.cwd()
cov_config = self.cov_controller.cov.config
if self.options.cov_fail_under is None and hasattr(cov_config, 'fail_under'):
self.options.cov_fail_under = cov_config.fail_under
if self.options.cov_precision is None:
self.options.cov_precision = getattr(cov_config, 'precision', 0)
def _is_worker(self, session):
return getattr(session.config, 'workerinput', None) is not None
def pytest_sessionstart(self, session):
"""At session start determine our implementation and delegate to it."""
if self.options.no_cov:
# Coverage can be disabled because it does not cooperate with debuggers well.
self._disabled = True
return
# import engine lazily here to avoid importing
# it for unit tests that don't need it
from . import engine
self.pid = os.getpid()
if self._is_worker(session):
nodeid = session.config.workerinput.get('workerid', session.nodeid)
self.start(engine.DistWorker, session.config, nodeid)
elif not self._started:
self.start(engine.Central)
if self.options.cov_context == 'test':
session.config.pluginmanager.register(TestContextPlugin(self.cov_controller), '_cov_contexts')
@pytest.hookimpl(optionalhook=True)
def pytest_configure_node(self, node):
"""Delegate to our implementation.
Mark this hook as optional in case xdist is not installed.
"""
if not self._disabled:
self.cov_controller.configure_node(node)
@pytest.hookimpl(optionalhook=True)
def pytest_testnodedown(self, node, error):
"""Delegate to our implementation.
Mark this hook as optional in case xdist is not installed.
"""
if not self._disabled:
self.cov_controller.testnodedown(node, error)
def _should_report(self):
needed = self.options.cov_report or self.options.cov_fail_under
return needed and not (self.failed and self.options.no_cov_on_fail)
# we need to wrap pytest_runtestloop. by the time pytest_sessionfinish
# runs, it's too late to set testsfailed
@pytest.hookimpl(wrapper=True)
def pytest_runtestloop(self, session):
if self._disabled:
return (yield)
# we add default warning configuration to prevent certain warnings to bubble up as errors due to rigid filterwarnings configuration
for _, message, category, _, _ in warnings.filters:
if category is ResourceWarning and message in (COVERAGE_SQLITE_WARNING_RE, COVERAGE_SQLITE_WARNING_RE2):
break
else:
warnings.filterwarnings('default', 'unclosed database in <sqlite3.Connection object at', ResourceWarning)
for _, _, category, _, _ in warnings.filters:
if category is PytestCovWarning:
break
else:
warnings.simplefilter('once', PytestCovWarning)
from coverage.exceptions import CoverageWarning
for _, _, category, _, _ in warnings.filters:
if category is CoverageWarning:
break
else:
warnings.simplefilter('once', CoverageWarning)
result = yield
self.failed = bool(session.testsfailed)
if self.cov_controller is not None:
self.cov_controller.finish()
if not self._is_worker(session) and self._should_report():
# import coverage lazily here to avoid importing
# it for unit tests that don't need it
from coverage.misc import CoverageException
from coverage.results import display_covered
from coverage.results import should_fail_under
try:
self.cov_total = self.cov_controller.summary(self.cov_report)
except CoverageException as exc:
message = f'Failed to generate report: {exc}\n'
session.config.pluginmanager.getplugin('terminalreporter').write(f'\nWARNING: {message}\n', red=True, bold=True)
warnings.warn(CovReportWarning(message), stacklevel=1)
self.cov_total = 0
assert self.cov_total is not None, 'Test coverage should never be `None`'
cov_fail_under = self.options.cov_fail_under
cov_precision = self.options.cov_precision
if cov_fail_under is None or self.options.collectonly:
return
if should_fail_under(self.cov_total, cov_fail_under, cov_precision):
message = 'Coverage failure: total of {total} is less than fail-under={fail_under:.{p}f}'.format(
total=display_covered(self.cov_total, cov_precision),
fail_under=cov_fail_under,
p=cov_precision,
)
session.config.pluginmanager.getplugin('terminalreporter').write(f'\nERROR: {message}\n', red=True, bold=True)
# make sure we get the EXIT_TESTSFAILED exit code
session.testsfailed += 1
return result
def write_heading(self, terminalreporter):
if not self._wrote_heading:
terminalreporter.write_sep('=', 'tests coverage')
self._wrote_heading = True
def pytest_terminal_summary(self, terminalreporter):
if self._disabled:
if self.options.no_cov_should_warn:
self.write_heading(terminalreporter)
message = 'Coverage disabled via --no-cov switch!'
terminalreporter.write(f'WARNING: {message}\n', red=True, bold=True)
warnings.warn(CovDisabledWarning(message), stacklevel=1)
return
if self.cov_controller is None:
return
if self.cov_total is None:
# we shouldn't report, or report generation failed (error raised above)
return
report = self.cov_report.getvalue()
if report:
self.write_heading(terminalreporter)
terminalreporter.write(report)
if self.options.cov_fail_under is not None and self.options.cov_fail_under > 0:
self.write_heading(terminalreporter)
failed = self.cov_total < self.options.cov_fail_under
markup = {'red': True, 'bold': True} if failed else {'green': True}
message = '{fail}Required test coverage of {required}% {reached}. Total coverage: {actual:.2f}%\n'.format(
required=self.options.cov_fail_under,
actual=self.cov_total,
fail='FAIL ' if failed else '',
reached='not reached' if failed else 'reached',
)
terminalreporter.write(message, **markup)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
if item.get_closest_marker('no_cover') or 'no_cover' in getattr(item, 'fixturenames', ()):
self.cov_controller.pause()
yield
self.cov_controller.resume()
else:
yield
| CovPlugin |
python | Pylons__pyramid | src/pyramid/predicates.py | {
"start": 306,
"end": 561
} | class ____:
def __init__(self, val, config):
self.val = bool(val)
def text(self):
return 'xhr = %s' % self.val
phash = text
def __call__(self, context, request):
return bool(request.is_xhr) is self.val
| XHRPredicate |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 1099,
"end": 1257
} | class ____(BaseMiddleware):
def process_exception(self, request, exception):
raise Exception("from process-exception")
| ProcessExceptionExcMiddleware |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 31068,
"end": 34863
} | class ____(Indexer):
dim_indexers: list[IntDimIndexer | SliceDimIndexer | IntArrayDimIndexer | BoolArrayDimIndexer]
shape: tuple[int, ...]
chunk_shape: tuple[int, ...]
is_advanced: bool
drop_axes: tuple[int, ...]
def __init__(self, selection: Selection, shape: tuple[int, ...], chunk_grid: ChunkGrid) -> None:
chunk_shape = get_chunk_shape(chunk_grid)
# handle ellipsis
selection = replace_ellipsis(selection, shape)
# normalize list to array
selection = replace_lists(selection)
# setup per-dimension indexers
dim_indexers: list[
IntDimIndexer | SliceDimIndexer | IntArrayDimIndexer | BoolArrayDimIndexer
] = []
for dim_sel, dim_len, dim_chunk_len in zip(selection, shape, chunk_shape, strict=True):
dim_indexer: IntDimIndexer | SliceDimIndexer | IntArrayDimIndexer | BoolArrayDimIndexer
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif isinstance(dim_sel, slice):
dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_integer_array(dim_sel):
dim_indexer = IntArrayDimIndexer(dim_sel, dim_len, dim_chunk_len)
elif is_bool_array(dim_sel):
dim_indexer = BoolArrayDimIndexer(dim_sel, dim_len, dim_chunk_len)
else:
raise IndexError(
"unsupported selection item for orthogonal indexing; "
"expected integer, slice, integer array or Boolean "
f"array, got {type(dim_sel)!r}"
)
dim_indexers.append(dim_indexer)
shape = tuple(s.nitems for s in dim_indexers if not isinstance(s, IntDimIndexer))
is_advanced = not is_basic_selection(selection)
if is_advanced:
drop_axes = tuple(
i
for i, dim_indexer in enumerate(dim_indexers)
if isinstance(dim_indexer, IntDimIndexer)
)
else:
drop_axes = ()
object.__setattr__(self, "dim_indexers", dim_indexers)
object.__setattr__(self, "shape", shape)
object.__setattr__(self, "chunk_shape", chunk_shape)
object.__setattr__(self, "is_advanced", is_advanced)
object.__setattr__(self, "drop_axes", drop_axes)
def __iter__(self) -> Iterator[ChunkProjection]:
for dim_projections in itertools.product(*self.dim_indexers):
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection: tuple[Selector, ...] | npt.NDArray[Any] = tuple(
p.dim_chunk_sel for p in dim_projections
)
out_selection: tuple[Selector, ...] | npt.NDArray[Any] = tuple(
p.dim_out_sel for p in dim_projections if p.dim_out_sel is not None
)
# handle advanced indexing arrays orthogonally
if self.is_advanced:
# N.B., numpy doesn't support orthogonal indexing directly as yet,
# so need to work around via np.ix_. Also np.ix_ does not support a
# mixture of arrays and slices or integers, so need to convert slices
# and integers into ranges.
chunk_selection = ix_(chunk_selection, self.chunk_shape)
# special case for non-monotonic indices
if not is_basic_selection(out_selection):
out_selection = ix_(out_selection, self.shape)
is_complete_chunk = all(p.is_complete_chunk for p in dim_projections)
yield ChunkProjection(chunk_coords, chunk_selection, out_selection, is_complete_chunk)
@dataclass(frozen=True)
| OrthogonalIndexer |
python | PyCQA__pylint | tests/message/unittest_message_definition_store.py | {
"start": 7526,
"end": 10805
} | class ____:
@staticmethod
def _compare_messages(
desc: str, msg: MessageDefinition, checkerref: bool = False
) -> None:
assert desc == msg.format_help(checkerref=checkerref)
def test_message_help(self, store: MessageDefinitionStore) -> None:
message_definition = store.get_message_definitions("W1234")[0]
self._compare_messages(
""":msg-symbol (W1234): *message*
msg description. This message belongs to the achecker checker.""",
message_definition,
checkerref=True,
)
self._compare_messages(
""":msg-symbol (W1234): *message*
msg description.""",
message_definition,
checkerref=False,
)
def test_message_help_minmax(self, store: MessageDefinitionStore) -> None:
# build the message manually to be python version independent
message_definition = store.get_message_definitions("E1234")[0]
self._compare_messages(
""":duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message belongs to the achecker checker. It can't be emitted when using
Python >= 2.6.""",
message_definition,
checkerref=True,
)
self._compare_messages(
""":duplicate-keyword-arg (E1234): *Duplicate keyword argument %r in %s call*
Used when a function call passes the same keyword argument multiple times.
This message can't be emitted when using Python >= 2.6.""",
message_definition,
checkerref=False,
)
def test_list_messages(store: MessageDefinitionStore) -> None:
output = StringIO()
with redirect_stdout(output):
store.list_messages()
# cursory examination of the output: we're mostly testing it completes
assert ":msg-symbol (W1234): *message*" in output.getvalue()
def test_renamed_message_register(store: MessageDefinitionStore) -> None:
assert store.get_message_definitions("W0001")[0].symbol == "msg-symbol"
assert store.get_message_definitions("old-symbol")[0].symbol == "msg-symbol"
def test_multiple_child_of_old_name(store: MessageDefinitionStore) -> None:
"""We can define multiple name with the same old name."""
class FamillyChecker(BaseChecker):
def __init__(self) -> None:
super().__init__(PyLinter())
name = "famillychecker"
msgs = {
"W1235": (
"Child 1",
"child-one",
"Child one description.",
{"old_names": [("C1234", "mother")]},
),
"W1236": (
"Child 2",
"child-two",
"Child two description",
{"old_names": [("C1234", "mother")]},
),
}
store.register_messages_from_checker(FamillyChecker())
mother = store.get_message_definitions("C1234")
child = store.get_message_definitions("W1235")
other_child = store.get_message_definitions("W1236")
assert len(mother) == 2
assert len(child) == 1
assert len(other_child) == 1
assert child[0] in mother
assert other_child[0] in mother
| TestMessageDefinitionStore |
python | apache__thrift | test/py/TestFrozen.py | {
"start": 4557,
"end": 4699
} | class ____(TestFrozenBase):
def protocol(self, trans):
return TBinaryProtocol.TBinaryProtocolFactory().getProtocol(trans)
| TestFrozen |
python | doocs__leetcode | solution/2900-2999/2906.Construct Product Matrix/Solution.py | {
"start": 0,
"end": 550
} | class ____:
def constructProductMatrix(self, grid: List[List[int]]) -> List[List[int]]:
n, m = len(grid), len(grid[0])
p = [[0] * m for _ in range(n)]
mod = 12345
suf = 1
for i in range(n - 1, -1, -1):
for j in range(m - 1, -1, -1):
p[i][j] = suf
suf = suf * grid[i][j] % mod
pre = 1
for i in range(n):
for j in range(m):
p[i][j] = p[i][j] * pre % mod
pre = pre * grid[i][j] % mod
return p
| Solution |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 1520,
"end": 1668
} | class ____(serializers.ModelSerializer):
class Meta:
model = AnotherUniquenessModel
fields = '__all__'
| AnotherUniquenessSerializer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/slots1.py | {
"start": 931,
"end": 1138
} | class ____(Slots1):
__slots__ = ["ddd", "eee"]
def __init__(self):
self.bbb = 1
self.ccc = 1
self.ddd = 1
# This should generate an error
self.fff = 1
| Slots1_1 |
python | sympy__sympy | sympy/assumptions/relation/binrel.py | {
"start": 381,
"end": 3852
} | class ____(Predicate):
"""
Base class for all binary relational predicates.
Explanation
===========
Binary relation takes two arguments and returns ``AppliedBinaryRelation``
instance. To evaluate it to boolean value, use :obj:`~.ask()` or
:obj:`~.refine()` function.
You can add support for new types by registering the handler to dispatcher.
See :obj:`~.Predicate()` for more information about predicate dispatching.
Examples
========
Applying and evaluating to boolean value:
>>> from sympy import Q, ask, sin, cos
>>> from sympy.abc import x
>>> Q.eq(sin(x)**2+cos(x)**2, 1)
Q.eq(sin(x)**2 + cos(x)**2, 1)
>>> ask(_)
True
You can define a new binary relation by subclassing and dispatching.
Here, we define a relation $R$ such that $x R y$ returns true if
$x = y + 1$.
>>> from sympy import ask, Number, Q
>>> from sympy.assumptions import BinaryRelation
>>> class MyRel(BinaryRelation):
... name = "R"
... is_reflexive = False
>>> Q.R = MyRel()
>>> @Q.R.register(Number, Number)
... def _(n1, n2, assumptions):
... return ask(Q.zero(n1 - n2 - 1), assumptions)
>>> Q.R(2, 1)
Q.R(2, 1)
Now, we can use ``ask()`` to evaluate it to boolean value.
>>> ask(Q.R(2, 1))
True
>>> ask(Q.R(1, 2))
False
``Q.R`` returns ``False`` with minimum cost if two arguments have same
structure because it is antireflexive relation [1] by
``is_reflexive = False``.
>>> ask(Q.R(x, x))
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Reflexive_relation
"""
is_reflexive: Optional[bool] = None
is_symmetric: Optional[bool] = None
def __call__(self, *args):
if not len(args) == 2:
raise TypeError(f"Q.{self.name} takes two arguments, but got {len(args)}.")
return AppliedBinaryRelation(self, *args)
@property
def reversed(self):
if self.is_symmetric:
return self
return None
@property
def negated(self):
return None
def _compare_reflexive(self, lhs, rhs):
# quick exit for structurally same arguments
# do not check != here because it cannot catch the
# equivalent arguments with different structures.
# reflexivity does not hold to NaN
if lhs is S.NaN or rhs is S.NaN:
return None
reflexive = self.is_reflexive
if reflexive is None:
pass
elif reflexive and (lhs == rhs):
return True
elif not reflexive and (lhs == rhs):
return False
return None
def eval(self, args, assumptions=True):
# quick exit for structurally same arguments
ret = self._compare_reflexive(*args)
if ret is not None:
return ret
# don't perform simplify on args here. (done by AppliedBinaryRelation._eval_ask)
# evaluate by multipledispatch
lhs, rhs = args
ret = self.handler(lhs, rhs, assumptions=assumptions)
if ret is not None:
return ret
# check reversed order if the relation is reflexive
if self.is_reflexive:
types = (type(lhs), type(rhs))
if self.handler.dispatch(*types) is not self.handler.dispatch(*reversed(types)):
ret = self.handler(rhs, lhs, assumptions=assumptions)
return ret
| BinaryRelation |
python | django__django | tests/forms_tests/tests/test_renderers.py | {
"start": 1245,
"end": 1352
} | class ____(SharedTests, SimpleTestCase):
renderer = Jinja2
expected_widget_dir = "jinja2"
| Jinja2Tests |
python | pypa__pip | src/pip/_internal/utils/misc.py | {
"start": 10755,
"end": 15844
} | class ____(StringIO):
orig_stream: TextIO
@classmethod
def from_stream(cls, orig_stream: TextIO) -> StreamWrapper:
ret = cls()
ret.orig_stream = orig_stream
return ret
# compileall.compile_dir() needs stdout.encoding to print to stdout
# type ignore is because TextIOBase.encoding is writeable
@property
def encoding(self) -> str: # type: ignore
return self.orig_stream.encoding
# Simulates an enum
def enum(*sequential: Any, **named: Any) -> type[Any]:
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = {value: key for key, value in enums.items()}
enums["reverse_mapping"] = reverse
return type("Enum", (), enums)
def build_netloc(host: str, port: int | None) -> str:
"""
Build a netloc from a host-port pair
"""
if port is None:
return host
if ":" in host:
# Only wrap host with square brackets when it is IPv6
host = f"[{host}]"
return f"{host}:{port}"
def build_url_from_netloc(netloc: str, scheme: str = "https") -> str:
"""
Build a full URL from a netloc.
"""
if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc:
# It must be a bare IPv6 address, so wrap it with brackets.
netloc = f"[{netloc}]"
return f"{scheme}://{netloc}"
def parse_netloc(netloc: str) -> tuple[str | None, int | None]:
"""
Return the host-port pair from a netloc.
"""
url = build_url_from_netloc(netloc)
parsed = urllib.parse.urlparse(url)
return parsed.hostname, parsed.port
def split_auth_from_netloc(netloc: str) -> NetlocTuple:
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
"""
if "@" not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit("@", 1)
pw: str | None = None
if ":" in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user, pw = auth.split(":", 1)
else:
user, pw = auth, None
user = urllib.parse.unquote(user)
if pw is not None:
pw = urllib.parse.unquote(pw)
return netloc, (user, pw)
def redact_netloc(netloc: str) -> str:
"""
Replace the sensitive data in a netloc with "****", if it exists.
For example:
- "user:pass@example.com" returns "user:****@example.com"
- "accesstoken@example.com" returns "****@example.com"
"""
netloc, (user, password) = split_auth_from_netloc(netloc)
if user is None:
return netloc
if password is None:
user = "****"
password = ""
else:
user = urllib.parse.quote(user)
password = ":****"
return f"{user}{password}@{netloc}"
def _transform_url(
url: str, transform_netloc: Callable[[str], tuple[Any, ...]]
) -> tuple[str, NetlocTuple]:
"""Transform and replace netloc in a url.
transform_netloc is a function taking the netloc and returning a
tuple. The first element of this tuple is the new netloc. The
entire tuple is returned.
Returns a tuple containing the transformed url as item 0 and the
original tuple returned by transform_netloc as item 1.
"""
purl = urllib.parse.urlsplit(url)
netloc_tuple = transform_netloc(purl.netloc)
# stripped url
url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment)
surl = urllib.parse.urlunsplit(url_pieces)
return surl, cast("NetlocTuple", netloc_tuple)
def _get_netloc(netloc: str) -> NetlocTuple:
return split_auth_from_netloc(netloc)
def _redact_netloc(netloc: str) -> tuple[str]:
return (redact_netloc(netloc),)
def split_auth_netloc_from_url(
url: str,
) -> tuple[str, str, tuple[str | None, str | None]]:
"""
Parse a url into separate netloc, auth, and url with no auth.
Returns: (url_without_auth, netloc, (username, password))
"""
url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
return url_without_auth, netloc, auth
def remove_auth_from_url(url: str) -> str:
"""Return a copy of url with 'username:password@' removed."""
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
return _transform_url(url, _get_netloc)[0]
def redact_auth_from_url(url: str) -> str:
"""Replace the password in a given url with ****."""
return _transform_url(url, _redact_netloc)[0]
def redact_auth_from_requirement(req: Requirement) -> str:
"""Replace the password in a given requirement url with ****."""
if not req.url:
return str(req)
return str(req).replace(req.url, redact_auth_from_url(req.url))
@dataclass(frozen=True)
| StreamWrapper |
python | pydantic__pydantic | tests/typechecking/misc.py | {
"start": 33,
"end": 79
} | class ____(BaseModel):
a: int
b: int
| Sub |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 15805,
"end": 16865
} | class ____:
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(_sizeof_void_p() - 1)
) & ~(_sizeof_void_p() - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
| InstanceProxy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 12824,
"end": 12932
} | class ____(AdsInsights):
breakdowns = ["product_id"]
action_breakdowns = []
| AdsInsightsActionProductID |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 661,
"end": 702
} | class ____(list[T_contra]):
pass
| Class2 |
python | django-haystack__django-haystack | test_haystack/elasticsearch_tests/test_elasticsearch_backend.py | {
"start": 1710,
"end": 1933
} | class ____(ElasticsearchMockSearchIndex):
def prepare_text(self, obj):
if obj.author == "daniel3":
raise SkipDocument
return "Indexed!\n%s" % obj.id
| ElasticsearchMockSearchIndexWithSkipDocument |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.