language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/deletions/test_workflow.py | {
"start": 397,
"end": 2961
} | class ____(HybridCloudTestMixin):
def tasks(self) -> ContextManager[None]:
return TaskRunner()
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.organization = Factories.create_organization()
self.project = Factories.create_project(organization=self.organization)
self.workflow = Factories.create_workflow()
self.workflow_trigger = Factories.create_data_condition_group(
organization=self.organization
)
self.workflow.when_condition_group = self.workflow_trigger
self.workflow.save()
self.action_filter = Factories.create_data_condition_group(organization=self.organization)
self.action = Factories.create_action()
self.action_and_filter = Factories.create_data_condition_group_action(
condition_group=self.action_filter,
action=self.action,
)
self.second_action = Factories.create_action()
self.second_action_and_filter = Factories.create_data_condition_group_action(
condition_group=self.action_filter,
action=self.second_action,
)
self.workflow_actions = Factories.create_workflow_data_condition_group(
workflow=self.workflow,
condition_group=self.action_filter,
)
self.trigger_condition = Factories.create_data_condition(
condition_group=self.workflow_trigger,
comparison=1,
condition_result=True,
)
self.action_condition = Factories.create_data_condition(
condition_group=self.action_filter,
comparison=1,
condition_result=True,
)
self.workflow.status = ObjectStatus.PENDING_DELETION
self.workflow.save()
@pytest.mark.parametrize(
"instance_attr",
[
"workflow",
"action",
"second_action",
"workflow_trigger",
"action_filter",
"action_and_filter",
"second_action_and_filter",
"workflow_actions",
"trigger_condition",
"action_condition",
],
)
def test_delete_workflow(self, instance_attr: str) -> None:
instance = getattr(self, instance_attr)
instance_id = instance.id
cls = instance.__class__
self.ScheduledDeletion.schedule(instance=self.workflow, days=0)
with self.tasks():
run_scheduled_deletions()
assert not cls.objects.filter(id=instance_id).exists()
| TestDeleteWorkflow |
python | doocs__leetcode | solution/0300-0399/0358.Rearrange String k Distance Apart/Solution.py | {
"start": 0,
"end": 485
} | class ____:
def rearrangeString(self, s: str, k: int) -> str:
cnt = Counter(s)
pq = [(-v, c) for c, v in cnt.items()]
heapify(pq)
q = deque()
ans = []
while pq:
v, c = heappop(pq)
ans.append(c)
q.append((v + 1, c))
if len(q) >= k:
e = q.popleft()
if e[0]:
heappush(pq, e)
return "" if len(ans) < len(s) else "".join(ans)
| Solution |
python | automl__auto-sklearn | test/test_metric/test_metrics.py | {
"start": 295,
"end": 14880
} | class ____(unittest.TestCase):
def test_needs_X(self):
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])
def dummy_metric(y_true, y_pred, X_data=None, **kwargs):
if not np.array_equal(np.array([45]), X_data):
raise ValueError(f"is {X_data}")
return 1
scorer = autosklearn.metrics._PredictScorer(
"accuracy", dummy_metric, 1, 0, 1, {}, needs_X=True
)
scorer(y_true, y_pred, X_data=np.array([45]))
scorer_nox = autosklearn.metrics._PredictScorer(
"accuracy", dummy_metric, 1, 0, 1, {}, needs_X=False
)
with self.assertRaises(ValueError) as cm:
scorer_nox(y_true, y_pred, X_data=np.array([32]))
the_exception = cm.exception
# X_data is not forwarded
self.assertEqual(the_exception.args[0], "is None")
scorer_nox = autosklearn.metrics._PredictScorer(
"accuracy", sklearn.metrics.accuracy_score, 1, 0, 1, {}, needs_X=False
)
scorer_nox(y_true, y_pred, X_data=np.array([32]))
@pytest.mark.parametrize(
"y_pred, y_true, scorer, expected_score",
[
(
np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.accuracy,
1.0,
),
(
np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.accuracy,
0.5,
),
(
np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.balanced_accuracy,
0.5,
),
(
np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
np.array([0, 1, 2]),
autosklearn.metrics.accuracy,
1.0,
),
(
np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
np.array([0, 1, 2]),
autosklearn.metrics.accuracy,
0.333333333,
),
(
np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]),
np.array([0, 1, 2]),
autosklearn.metrics.accuracy,
0.333333333,
),
(
np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]),
np.array([0, 1, 2]),
autosklearn.metrics.balanced_accuracy,
0.333333333,
),
(
np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]),
np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
autosklearn.metrics.accuracy,
1.0,
),
(
np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
autosklearn.metrics.accuracy,
0.25,
),
(
np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]),
np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
autosklearn.metrics.accuracy,
0.25,
),
(
np.arange(0, 1.01, 0.1),
np.arange(0, 1.01, 0.1),
autosklearn.metrics.r2,
1.0,
),
(
np.ones(np.arange(0, 1.01, 0.1).shape) * np.mean(np.arange(0, 1.01, 0.1)),
np.arange(0, 1.01, 0.1),
autosklearn.metrics.r2,
0.0,
),
(
np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.log_loss,
0.0,
),
(
np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
np.array([0, 1, 2]),
autosklearn.metrics.log_loss,
0.0,
),
(
np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.roc_auc,
1.0,
),
(
np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.roc_auc,
0.5,
),
(
np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]),
np.array([0, 0, 1, 1]),
autosklearn.metrics.roc_auc,
0.5,
),
(
np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]),
np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
autosklearn.metrics.roc_auc,
1.0,
),
(
np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),
np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
autosklearn.metrics.roc_auc,
0.5,
),
(
np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]),
np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),
autosklearn.metrics.roc_auc,
0.5,
),
],
)
def test_scorer(
y_pred: np.ndarray,
y_true: np.ndarray,
scorer: autosklearn.metrics.Scorer,
expected_score: float,
) -> None:
"""
Expects
-------
* Expected scores are equal to scores gained from implementing assembled scorers.
"""
result_score = scorer(y_true, y_pred)
assert expected_score == pytest.approx(result_score)
@pytest.mark.parametrize(
"y_pred, y_true, expected_score",
[
(
np.arange(0, 1.01, 0.1) + 1.0,
np.arange(0, 1.01, 0.1),
-9.0,
),
(
np.arange(0, 1.01, 0.1) + 0.5,
np.arange(0, 1.01, 0.1),
-1.5,
),
(
np.arange(0, 1.01, 0.1),
np.arange(0, 1.01, 0.1),
1.0,
),
],
)
def test_sign_flip(
y_pred: np.array,
y_true: np.array,
expected_score: float,
) -> None:
"""
Expects
-------
* Flipping greater_is_better for r2_score result in flipped signs of its output.
"""
greater_true_scorer = autosklearn.metrics.make_scorer(
"r2", sklearn.metrics.r2_score, greater_is_better=True
)
greater_true_score = greater_true_scorer(y_true, y_pred)
assert expected_score == pytest.approx(greater_true_score)
greater_false_scorer = autosklearn.metrics.make_scorer(
"r2", sklearn.metrics.r2_score, greater_is_better=False
)
greater_false_score = greater_false_scorer(y_true, y_pred)
assert (expected_score * -1.0) == pytest.approx(greater_false_score)
def test_regression_metrics():
"""
Expects
-------
* Test metrics do not change output for autosklearn.metrics.REGRESSION_METRICS.
"""
for metric, scorer in autosklearn.metrics.REGRESSION_METRICS.items():
y_true = np.random.random(100).reshape((-1, 1))
y_pred = y_true.copy() + np.random.randn(100, 1) * 0.1
if metric == "mean_squared_log_error":
y_true = np.abs(y_true)
y_pred = np.abs(y_pred)
y_true_2 = y_true.copy()
y_pred_2 = y_pred.copy()
assert np.isfinite(scorer(y_true_2, y_pred_2))
np.testing.assert_array_almost_equal(y_true, y_true_2, err_msg=metric)
np.testing.assert_array_almost_equal(y_pred, y_pred_2, err_msg=metric)
def test_classification_metrics():
"""
Expects
-------
* Test metrics do not change output for autosklearn.metrics.CLASSIFICATION_METRICS.
"""
for metric, scorer in autosklearn.metrics.CLASSIFICATION_METRICS.items():
y_true = np.random.randint(0, 2, size=(100, 1))
y_pred = np.random.random(200).reshape((-1, 2))
y_pred = np.array([y_pred[i] / np.sum(y_pred[i]) for i in range(100)])
y_true_2 = y_true.copy()
y_pred_2 = y_pred.copy()
try:
assert np.isfinite(scorer(y_true_2, y_pred_2))
np.testing.assert_array_almost_equal(y_true, y_true_2, err_msg=metric)
np.testing.assert_array_almost_equal(y_pred, y_pred_2, err_msg=metric)
except ValueError as e:
if (
e.args[0] == "Samplewise metrics are not available outside"
" of multilabel classification."
):
pass
else:
raise e
def test_regression_all():
"""
Expects
-------
* Correct scores from REGRESSION_METRICS.
"""
for metric, scorer in autosklearn.metrics.REGRESSION_METRICS.items():
if scorer.name == "mean_squared_log_error":
continue
y_true = np.array([1, 2, 3, 4])
y_pred_list = [
np.array([1, 2, 3, 4]),
np.array([3, 4, 5, 6]),
np.array([-1, 0, -1, 0]),
np.array([-5, 10, 7, -3]),
]
score_list = [scorer(y_true, y_pred) for y_pred in y_pred_list]
assert scorer._optimum == pytest.approx(score_list[0])
assert score_list == sorted(score_list, reverse=True)
def test_classification_binary():
"""
Expects
-------
* Correct scores from CLASSIFICATION_METRICS for binary classification.
"""
for metric, scorer in autosklearn.metrics.CLASSIFICATION_METRICS.items():
# Skip functions not applicable for binary classification.
# TODO: Average precision should work for binary classification,
# TODO: but its behavior is not right. When y_pred is completely
# TODO: wrong, it does return 0.5, but when it is not completely
# TODO: wrong, it returns value smaller than 0.5.
if metric in [
"average_precision",
"precision_samples",
"recall_samples",
"f1_samples",
]:
continue
y_true = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
y_pred_list = [
np.array(
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
),
np.array(
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]]
),
np.array(
[[0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]
),
np.array(
[[1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]
),
]
score_list = [scorer(y_true, y_pred) for y_pred in y_pred_list]
assert scorer._optimum == pytest.approx(score_list[0])
assert score_list == sorted(score_list, reverse=True)
def test_classification_multiclass():
"""
Expects
-------
* Correct scores from CLASSIFICATION_METRICS for multiclass classification.
"""
# The last check in this test has a mismatch between the number of
# labels predicted in y_pred and the number of labels in y_true.
# This triggers several warnings but we are aware.
#
# TODO convert to pytest with fixture
#
# This test should be parameterized so we can identify which metrics
# cause which warning specifically and rectify if needed.
ignored_warnings = [(UserWarning, "y_pred contains classes not in y_true")]
for metric, scorer in autosklearn.metrics.CLASSIFICATION_METRICS.items():
# Skip functions not applicable for multiclass classification.
if metric in [
"roc_auc",
"average_precision",
"precision",
"recall",
"f1",
"precision_samples",
"recall_samples",
"f1_samples",
]:
continue
y_true = np.array([0.0, 0.0, 1.0, 1.0, 2.0])
y_pred_list = [
np.array(
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.array(
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.array(
[
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
]
),
np.array(
[
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
]
),
]
score_list = [scorer(y_true, y_pred) for y_pred in y_pred_list]
assert scorer._optimum == pytest.approx(score_list[0])
assert score_list == sorted(score_list, reverse=True)
# less labels in the targets than in the predictions
y_true = np.array([0.0, 0.0, 1.0, 1.0])
y_pred = np.array(
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
)
with warnings.catch_warnings():
for category, message in ignored_warnings:
warnings.filterwarnings("ignore", category=category, message=message)
score = scorer(y_true, y_pred)
assert np.isfinite(score)
def test_classification_multilabel():
"""
Expects
-------
* Correct scores from CLASSIFICATION_METRICS for multi-label classification.
"""
for metric, scorer in autosklearn.metrics.CLASSIFICATION_METRICS.items():
# Skip functions not applicable for multi-label classification.
if metric in [
"roc_auc",
"log_loss",
"precision",
"recall",
"f1",
"balanced_accuracy",
]:
continue
y_true = np.array([[1, 0, 0], [1, 1, 0], [0, 1, 1], [1, 1, 1]])
y_pred_list = [
np.array([[1, 0, 0], [1, 1, 0], [0, 1, 1], [1, 1, 1]]),
np.array([[1, 0, 0], [0, 0, 1], [0, 1, 1], [1, 1, 1]]),
np.array([[1, 0, 0], [0, 0, 1], [1, 0, 1], [1, 1, 0]]),
np.array([[0, 1, 1], [0, 0, 1], [1, 0, 0], [0, 0, 0]]),
]
score_list = [scorer(y_true, y_pred) for y_pred in y_pred_list]
assert scorer._optimum == pytest.approx(score_list[0])
assert score_list == sorted(score_list, reverse=True)
| TestScorer |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_failure_policy_rule.py | {
"start": 383,
"end": 7330
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'action': 'str',
'on_exit_codes': 'V1PodFailurePolicyOnExitCodesRequirement',
'on_pod_conditions': 'list[V1PodFailurePolicyOnPodConditionsPattern]'
}
attribute_map = {
'action': 'action',
'on_exit_codes': 'onExitCodes',
'on_pod_conditions': 'onPodConditions'
}
def __init__(self, action=None, on_exit_codes=None, on_pod_conditions=None, local_vars_configuration=None): # noqa: E501
"""V1PodFailurePolicyRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._action = None
self._on_exit_codes = None
self._on_pod_conditions = None
self.discriminator = None
self.action = action
if on_exit_codes is not None:
self.on_exit_codes = on_exit_codes
if on_pod_conditions is not None:
self.on_pod_conditions = on_pod_conditions
@property
def action(self):
"""Gets the action of this V1PodFailurePolicyRule. # noqa: E501
Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. # noqa: E501
:return: The action of this V1PodFailurePolicyRule. # noqa: E501
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this V1PodFailurePolicyRule.
Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are: - FailJob: indicates that the pod's job is marked as Failed and all running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the counter towards the .backoffLimit is incremented. Additional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule. # noqa: E501
:param action: The action of this V1PodFailurePolicyRule. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and action is None: # noqa: E501
raise ValueError("Invalid value for `action`, must not be `None`") # noqa: E501
self._action = action
@property
def on_exit_codes(self):
"""Gets the on_exit_codes of this V1PodFailurePolicyRule. # noqa: E501
:return: The on_exit_codes of this V1PodFailurePolicyRule. # noqa: E501
:rtype: V1PodFailurePolicyOnExitCodesRequirement
"""
return self._on_exit_codes
@on_exit_codes.setter
def on_exit_codes(self, on_exit_codes):
"""Sets the on_exit_codes of this V1PodFailurePolicyRule.
:param on_exit_codes: The on_exit_codes of this V1PodFailurePolicyRule. # noqa: E501
:type: V1PodFailurePolicyOnExitCodesRequirement
"""
self._on_exit_codes = on_exit_codes
@property
def on_pod_conditions(self):
"""Gets the on_pod_conditions of this V1PodFailurePolicyRule. # noqa: E501
Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed. # noqa: E501
:return: The on_pod_conditions of this V1PodFailurePolicyRule. # noqa: E501
:rtype: list[V1PodFailurePolicyOnPodConditionsPattern]
"""
return self._on_pod_conditions
@on_pod_conditions.setter
def on_pod_conditions(self, on_pod_conditions):
"""Sets the on_pod_conditions of this V1PodFailurePolicyRule.
Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed. # noqa: E501
:param on_pod_conditions: The on_pod_conditions of this V1PodFailurePolicyRule. # noqa: E501
:type: list[V1PodFailurePolicyOnPodConditionsPattern]
"""
self._on_pod_conditions = on_pod_conditions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodFailurePolicyRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodFailurePolicyRule):
return True
return self.to_dict() != other.to_dict()
| V1PodFailurePolicyRule |
python | doocs__leetcode | solution/1900-1999/1901.Find a Peak Element II/Solution.py | {
"start": 0,
"end": 370
} | class ____:
def findPeakGrid(self, mat: List[List[int]]) -> List[int]:
l, r = 0, len(mat) - 1
while l < r:
mid = (l + r) >> 1
j = mat[mid].index(max(mat[mid]))
if mat[mid][j] > mat[mid + 1][j]:
r = mid
else:
l = mid + 1
return [l, mat[l].index(max(mat[l]))]
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py | {
"start": 89262,
"end": 94344
} | class ____(GoogleCloudBaseOperator):
"""
Updates a tag template.
This method cannot be used to update the fields of a template. The tag
template fields are represented as separate resources and should be updated using their own
create/update/delete methods.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogUpdateTagTemplateOperator`
:param tag_template: Required. The template to update. The "name" field must be set.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.TagTemplate`
:param update_mask: The field mask specifies the parts of the template to overwrite.
If absent or empty, all of the allowed fields above will be updated.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param location: Required. The location of the tag template to rename.
:param tag_template_id: Optional. The tag template ID for the entry that is being updated.
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"tag_template",
"update_mask",
"location",
"tag_template_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogTagTemplateLink(),)
def __init__(
self,
*,
tag_template: dict | TagTemplate,
update_mask: dict | FieldMask,
location: str | None = None,
tag_template_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.tag_template = tag_template
self.update_mask = update_mask
self.location = location
self.tag_template_id = tag_template_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.update_tag_template(
tag_template=self.tag_template,
update_mask=self.update_mask,
location=self.location,
tag_template_id=self.tag_template_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
location_id, tag_template_id = result.name.split("/")[3::2]
DataCatalogTagTemplateLink.persist(
context=context,
tag_template_id=self.tag_template_id or tag_template_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogUpdateAspectTypeOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| CloudDataCatalogUpdateTagTemplateOperator |
python | getsentry__sentry | src/sentry/api/permissions.py | {
"start": 2984,
"end": 4628
} | class ____(BasePermission):
"""
Permissions work depending on the type of authentication:
- A user inherits permissions based on their membership role. These are
still dictated as common scopes, but they can't be checked until the
has_object_permission hook is called.
- ProjectKeys (legacy) are granted only project based scopes. This
- APIKeys specify their scope, and work as expected.
"""
scope_map: dict[str, Sequence[str]] = {
"HEAD": (),
"GET": (),
"POST": (),
"PUT": (),
"PATCH": (),
"DELETE": (),
}
def has_permission(self, request: Request, view: APIView) -> bool:
# session-based auth has all scopes for a logged in user
if not request.auth:
return request.user.is_authenticated
if is_org_auth_token_auth(request.auth):
# Ensure we always update the last used date for the org auth token.
# At this point, we don't have the projects yet, so we only update the org auth token's
# last used date, clearing the project_last_used_id. We call this method again in endpoints
# where a project is available to update the project_last_used_id.
update_org_auth_token_last_used(request.auth, [])
assert request.method is not None
allowed_scopes = set(self.scope_map.get(request.method, []))
current_scopes = request.auth.get_scopes()
return any(s in allowed_scopes for s in current_scopes)
def has_object_permission(self, request: Request, view: APIView, obj: Any) -> bool:
return False
| ScopedPermission |
python | numba__numba | numba/tests/test_svml.py | {
"start": 11118,
"end": 15726
} | class ____(TestCase):
""" Tests SVML behaves as expected """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
def compile(self, func, *args, **kwargs):
assert not kwargs
sig = tuple([numba.typeof(x) for x in args])
std = njit(sig)(func)
fast = njit(sig, fastmath=True)(func)
return std.overloads[sig], fast.overloads[sig]
def copy_args(self, *args):
if not args:
return tuple()
new_args = []
for x in args:
if isinstance(x, np.ndarray):
new_args.append(x.copy('k'))
elif isinstance(x, np.number):
new_args.append(x.copy())
elif isinstance(x, numbers.Number):
new_args.append(x)
else:
raise ValueError('Unsupported argument type encountered')
return tuple(new_args)
def check_result(self, pyfunc, *args, **kwargs):
jitstd, jitfast = self.compile(pyfunc, *args)
# python result
py_expected = pyfunc(*self.copy_args(*args))
# jit result
jitstd_result = jitstd.entry_point(*self.copy_args(*args))
# fastmath result
jitfast_result = jitfast.entry_point(*self.copy_args(*args))
# assert numerical equality
np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs)
np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs)
def check_asm(self, pyfunc, *args, **kwargs):
std_pattern = kwargs.pop('std_pattern', None)
fast_pattern = kwargs.pop('fast_pattern', None)
# look for specific patterns in the asm for a given target
# recompile for overridden CPU
jitstd, jitfast = self.compile(pyfunc, *args)
if std_pattern:
self.check_svml_presence(jitstd, std_pattern)
if fast_pattern:
self.check_svml_presence(jitfast, fast_pattern)
def check(self, pyfunc, *args, what="both", **kwargs):
assert what in ("both", "result", "asm")
if what == "both" or what == "result":
self.check_result(pyfunc, *args, **kwargs)
if what == "both" or what == "asm":
self.check_asm(pyfunc, *args, **kwargs)
def check_svml_presence(self, func, pattern):
asm = func.library.get_asm_str()
self.assertIn(pattern, asm)
@TestCase.run_test_in_subprocess(envvars=_skylake_axv512_envvars)
def test_scalar_context_asm(self):
# SVML will not be used.
pat = '$_sin' if config.IS_OSX else '$sin'
self.check(math_sin_scalar, 7., what="asm", std_pattern=pat)
self.check(math_sin_scalar, 7., what="asm", fast_pattern=pat)
def test_scalar_context_result(self):
# checks result for test_scalar_context_asm
self.check(math_sin_scalar, 7., what="result")
@TestCase.run_test_in_subprocess(envvars=_skylake_axv512_envvars)
def test_svml_asm(self):
# loops both with and without fastmath should use SVML.
# The high accuracy routines are dropped if `fastmath` is set
std = "__svml_sin8_ha,"
fast = "__svml_sin8," # No `_ha`!
self.check(math_sin_loop, 10, what="asm", std_pattern=std,
fast_pattern=fast)
def test_svml_result(self):
# checks result for test_svml_asm
self.check(math_sin_loop, 10, what="result")
@TestCase.run_test_in_subprocess(envvars={'NUMBA_DISABLE_INTEL_SVML': "1",
**_skylake_axv512_envvars})
def test_svml_disabled(self):
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
sig = (numba.int32,)
std = njit(sig)(math_sin_loop)
fast = njit(sig, fastmath=True)(math_sin_loop)
fns = std.overloads[sig], fast.overloads[sig]
# assert no SVML call is present in the asm
for fn in fns:
asm = fn.library.get_asm_str()
self.assertNotIn('__svml_sin', asm)
def test_svml_working_in_non_isolated_context(self):
@njit(fastmath={'fast'}, error_model="numpy")
def impl(n):
x = np.empty(n * 8, dtype=np.float64)
ret = np.empty_like(x)
for i in range(ret.size):
ret[i] += math.cosh(x[i])
return ret
impl(1)
self.assertTrue('intel_svmlcc' in impl.inspect_llvm(impl.signatures[0]))
if __name__ == '__main__':
unittest.main()
| TestSVML |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 113198,
"end": 116699
} | class ____(Response):
"""
Response of models.set_ready endpoint.
:param updated: Number of models updated (0 or 1)
:type updated: int
:param published_task: Result of publishing of the model's associated task (if
exists). Returned only if the task was published successfully as part of the
model publishing.
:type published_task: dict
"""
_service = "models"
_action = "set_ready"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"published_task": {
"description": "Result of publishing of the model's associated task (if exists). Returned only if the task was published successfully as part of the model publishing.",
"properties": {
"data": {
"description": "Data returned from the task publishing operation.",
"properties": {
"committed_versions_results": {
"description": "Committed versions results",
"items": {
"additionalProperties": True,
"type": "object",
},
"type": "array",
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": "object",
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": "integer",
},
},
"type": "object",
},
"id": {"description": "Task id", "type": "string"},
},
"type": ["object", "null"],
},
"updated": {
"description": "Number of models updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, published_task: Optional[dict] = None, **kwargs: Any) -> None:
super(SetReadyResponse, self).__init__(**kwargs)
self.updated = updated
self.published_task = published_task
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("published_task")
def published_task(self) -> Optional[dict]:
return self._property_published_task
@published_task.setter
def published_task(self, value: Optional[dict]) -> None:
if value is None:
self._property_published_task = None
return
self.assert_isinstance(value, "published_task", (dict,))
self._property_published_task = value
| SetReadyResponse |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 65544,
"end": 65870
} | class ____(themeable):
def __init__(self):
msg = (
"Themeable 'legend_title_align' is deprecated. Use the "
"horizontal and vertical alignment parameters ha & va "
"of 'element_text' with 'lenged_title'."
)
warn(msg, FutureWarning, stacklevel=1)
| legend_title_align |
python | sympy__sympy | sympy/core/numbers.py | {
"start": 40279,
"end": 58510
} | class ____(Number):
"""Represents rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
See Also
========
sympy.core.sympify.sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
is_number = True
__slots__ = ('p', 'q')
p: int
q: int
is_Rational = True
@cacheit
def __new__(cls, p, q=None, gcd=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, SYMPY_INTS):
pass
else:
if isinstance(p, (float, Float)):
return Rational(*_as_integer_ratio(p))
if not isinstance(p, str):
try:
p = sympify(p)
except (SympifyError, SyntaxError):
pass # error will raise below
else:
if p.count('/') > 1:
raise TypeError('invalid input: %s' % p)
p = p.replace(' ', '')
pq = p.rsplit('/', 1)
if len(pq) == 2:
p, q = pq
fp = fractions.Fraction(p)
fq = fractions.Fraction(q)
p = fp/fq
try:
p = fractions.Fraction(p)
except ValueError:
pass # error will raise below
else:
return cls._new(p.numerator, p.denominator, 1)
if not isinstance(p, Rational):
raise TypeError('invalid input: %s' % p)
q = 1
Q = 1
if not isinstance(p, SYMPY_INTS):
p = Rational(p)
Q *= p.q
p = p.p
else:
p = int(p)
if not isinstance(q, SYMPY_INTS):
q = Rational(q)
p *= q.q
Q *= q.p
else:
Q *= int(q)
q = Q
if gcd is not None:
sympy_deprecation_warning(
"gcd is deprecated in Rational, use nsimplify instead",
deprecated_since_version="1.11",
active_deprecations_target="deprecated-rational-gcd",
stacklevel=4,
)
return cls._new(p, q, gcd)
# p and q are now ints
return cls._new(p, q)
@classmethod
def _new(cls, p, q, gcd=None):
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
return S.ComplexInfinity
if q < 0:
q = -q
p = -p
if gcd is None:
gcd = igcd(abs(p), q)
if gcd > 1:
p //= gcd
q //= gcd
return cls.from_coprime_ints(p, q)
@classmethod
def from_coprime_ints(cls, p: int, q: int) -> Rational:
"""Create a Rational from a pair of coprime integers.
Both ``p`` and ``q`` should be strictly of type ``int``.
The caller should ensure that ``gcd(p,q) == 1`` and ``q > 0``.
This may be more efficient than ``Rational(p, q)``. The validity of the
arguments may or may not be checked so it should not be relied upon to
pass unvalidated or invalid arguments to this function.
"""
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
Examples
========
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
f = fractions.Fraction(self.p, self.q)
return Rational(f.limit_denominator(fractions.Fraction(int(max_denominator))))
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational._new(self.p + self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
#TODO: this can probably be optimized more
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
return Number.__add__(self, other)
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational._new(self.p - self.q*other.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __rsub__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational._new(self.q*other.p - self.p, self.q, 1)
elif isinstance(other, Rational):
return Rational(self.q*other.p - self.p*other.q, self.q*other.q)
elif isinstance(other, Float):
return -self + other
else:
return Number.__rsub__(self, other)
return Number.__rsub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational._new(self.p*other.p, self.q, igcd(other.p, self.q))
elif isinstance(other, Rational):
return Rational._new(self.p*other.p, self.q*other.q, igcd(self.p, other.q)*igcd(self.q, other.p))
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
return Number.__mul__(self, other)
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __truediv__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
if self.p and other.p == S.Zero:
return S.ComplexInfinity
else:
return Rational._new(self.p, self.q*other.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational._new(self.p*other.q, self.q*other.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__truediv__(self, other)
return Number.__truediv__(self, other)
@_sympifyit('other', NotImplemented)
def __rtruediv__(self, other):
if global_parameters.evaluate:
if isinstance(other, Integer):
return Rational._new(other.p*self.q, self.p, igcd(self.p, other.p))
elif isinstance(other, Rational):
return Rational._new(other.p*self.q, other.q*self.p, igcd(self.p, other.p)*igcd(self.q, other.q))
elif isinstance(other, Float):
return other*(1/self)
else:
return Number.__rtruediv__(self, other)
return Number.__rtruediv__(self, other)
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if global_parameters.evaluate:
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
precision=other._prec)
return Number.__mod__(self, other)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_extended_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
return S.NegativeOne**expt*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational._new(self.p**expt.p, self.q**expt.p, 1)
if isinstance(expt, Rational):
intpart = expt.p // expt.q
if intpart:
intpart += 1
remfracpart = intpart*expt.q - expt.p
ratfracpart = Rational(remfracpart, expt.q)
if self.p != 1:
return Integer(self.p)**expt*Integer(self.q)**ratfracpart*Rational._new(1, self.q**intpart, 1)
return Integer(self.q)**ratfracpart*Rational._new(1, self.q**intpart, 1)
else:
remfracpart = expt.q - expt.p
ratfracpart = Rational(remfracpart, expt.q)
if self.p != 1:
return Integer(self.p)**expt*Integer(self.q)**ratfracpart*Rational._new(1, self.q, 1)
return Integer(self.q)**ratfracpart*Rational._new(1, self.q, 1)
if self.is_extended_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -int(-p//q)
return int(p//q)
def floor(self):
return Integer(self.p // self.q)
def ceiling(self):
return -Integer(-self.p // self.q)
def __floor__(self):
return self.floor()
def __ceil__(self):
return self.ceiling()
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if not isinstance(other, Number):
# S(0) == S.false is False
# S(0) == False is True
return False
if other.is_NumberSymbol:
if other.is_irrational:
return False
return other.__eq__(self)
if other.is_Rational:
# a Rational is always in reduced form so will never be 2/4
# so we can just check equivalence of args
return self.p == other.p and self.q == other.q
return False
def __ne__(self, other):
return not self == other
def _Rrel(self, other, attr):
# if you want self < other, pass self, other, __gt__
try:
other = _sympify(other)
except SympifyError:
return NotImplemented
if other.is_Number:
op = None
s, o = self, other
if other.is_NumberSymbol or other.is_Float:
op = getattr(o, attr)
elif other.is_Rational:
s, o = Integer(s.p*o.q), Integer(s.q*o.p)
op = getattr(o, attr)
if op:
return op(s)
if o.is_number and o.is_extended_real:
return Integer(s.p), s.q*o
def __gt__(self, other):
rv = self._Rrel(other, '__lt__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__gt__(*rv)
def __ge__(self, other):
rv = self._Rrel(other, '__le__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__ge__(*rv)
def __lt__(self, other):
rv = self._Rrel(other, '__gt__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__lt__(*rv)
def __le__(self, other):
rv = self._Rrel(other, '__ge__')
if rv is None:
rv = self, other
elif not isinstance(rv, tuple):
return rv
return Expr.__le__(*rv)
def __hash__(self):
return super().__hash__()
def __format__(self, format_spec):
return format(fractions.Fraction(self.p, self.q), format_spec)
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory.factor_ import factorrat
return factorrat(self, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
@property
def numerator(self):
return self.p
@property
def denominator(self):
return self.q
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other == S.Zero:
return other
return Rational(
igcd(self.p, other.p),
ilcm(self.q, other.q))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p // igcd(self.p, other.p) * other.p,
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
@overload
def as_coeff_Mul(self, rational: Literal[True]) -> tuple[Rational, Expr]: ...
@overload
def as_coeff_Mul(self, rational: bool = False) -> tuple["Number", Expr]: ...
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product."""
return self, S.One
def as_coeff_Add(self, rational=False):
"""Efficiently extract the coefficient of a summation."""
return self, S.Zero
| Rational |
python | ray-project__ray | rllib/utils/replay_buffers/tests/test_multi_agent_episode_buffer.py | {
"start": 257,
"end": 18367
} | class ____(unittest.TestCase):
@staticmethod
def _get_episode(episode_len=None, id_=None):
initial_observation = [{"agent_1": 0.0, "agent_2": 0.0}]
initial_infos = [{"agent_1": {}, "agent_2": {}}]
eps = MultiAgentEpisode(
id_=id_,
observations=initial_observation,
infos=initial_infos,
agent_module_ids={"agent_1": "module_1", "agent_2": "module_2"},
)
ts = np.random.randint(1, 200) if episode_len is None else episode_len
for t in range(ts):
eps.add_env_step(
observations={"agent_1": float(t + 1), "agent_2": float(t + 1)},
actions={"agent_1": int(t), "agent_2": int(t)},
rewards={"agent_1": 0.1 * (t + 1), "agent_2": 0.1 * (t + 1)},
infos={"agent_1": {}, "agent_2": {}},
)
eps.is_terminated = np.random.random() > 0.5
eps.is_truncated = False if eps.is_terminated else np.random.random() > 0.8
return eps
def test_add_and_eviction_logic(self):
"""Tests episodes getting properly added to buffer and cause proper
eviction."""
# Fill a buffer till capacity (100 ts).
buffer = MultiAgentEpisodeReplayBuffer(capacity=100)
episode = self._get_episode(id_="A", episode_len=50)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 1)
self.assertTrue(buffer.get_num_timesteps() == 50)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 50)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 50)
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 1)
self.assertTrue(buffer.get_num_timesteps(module_id) == 50)
self.assertTrue(buffer.get_added_timesteps(module_id) == 50)
episode = self._get_episode(id_="B", episode_len=25)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 2)
self.assertTrue(buffer.get_num_timesteps() == 75)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 75)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 75)
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 2)
self.assertTrue(buffer.get_num_timesteps(module_id) == 75)
self.assertTrue(buffer.get_added_timesteps(module_id) == 75)
# No eviction yet (but we are full).
episode = self._get_episode(id_="C", episode_len=25)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 3)
self.assertTrue(buffer.get_num_timesteps() == 100)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 100)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 100)
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 3)
self.assertTrue(buffer.get_num_timesteps(module_id) == 100)
self.assertTrue(buffer.get_added_timesteps(module_id) == 100)
# Trigger eviction of first episode by adding a single timestep episode.
episode = self._get_episode(id_="D", episode_len=1)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 3)
self.assertTrue(buffer.get_num_timesteps() == 51)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 51)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 101)
self.assertTrue({eps.id_ for eps in buffer.episodes} == {"B", "C", "D"})
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 3)
self.assertTrue(buffer.get_num_timesteps(module_id) == 51)
self.assertTrue(buffer.get_added_timesteps(module_id) == 101)
# Add another big episode and trigger another eviction.
episode = self._get_episode(id_="E", episode_len=200)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 1)
self.assertTrue(buffer.get_num_timesteps() == 200)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 200)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 301)
self.assertTrue({eps.id_ for eps in buffer.episodes} == {"E"})
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 1)
self.assertTrue(buffer.get_num_timesteps(module_id) == 200)
self.assertTrue(buffer.get_added_timesteps(module_id) == 301)
# Add another small episode and trigger another eviction.
episode = self._get_episode(id_="F", episode_len=2)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 1)
self.assertTrue(buffer.get_num_timesteps() == 2)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 2)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 303)
self.assertTrue({eps.id_ for eps in buffer.episodes} == {"F"})
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 1)
self.assertTrue(buffer.get_num_timesteps(module_id) == 2)
self.assertTrue(buffer.get_added_timesteps(module_id) == 303)
# Add N small episodes.
for i in range(10):
episode = self._get_episode(id_=str(i), episode_len=10)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 10)
self.assertTrue(buffer.get_num_timesteps() == 100)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 100)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 403)
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 10)
self.assertTrue(buffer.get_num_timesteps(module_id) == 100)
self.assertTrue(buffer.get_added_timesteps(module_id) == 403)
# Add a 20-ts episode and expect to have evicted 3 episodes.
episode = self._get_episode(id_="G", episode_len=21)
buffer.add(episode)
self.assertTrue(buffer.get_num_episodes() == 8)
self.assertTrue(buffer.get_num_timesteps() == 91)
self.assertTrue(buffer.get_num_agent_timesteps() == 2 * 91)
self.assertTrue(buffer.get_added_agent_timesteps() == 2 * 424)
self.assertTrue(
{eps.id_ for eps in buffer.episodes}
== {"3", "4", "5", "6", "7", "8", "9", "G"}
)
for module_id in buffer.get_module_ids():
self.assertTrue(buffer.get_num_episodes(module_id) == 8)
self.assertTrue(buffer.get_num_timesteps(module_id) == 91)
self.assertTrue(buffer.get_added_timesteps(module_id) == 424)
def test_buffer_independent_sample_logic(self):
"""Samples independently from the multi-agent buffer."""
buffer = MultiAgentEpisodeReplayBuffer(capacity=10000)
for _ in range(200):
episode = self._get_episode()
buffer.add(episode)
for i in range(1000):
sample = buffer.sample(batch_size_B=16, n_step=1)
self.assertTrue(buffer.get_sampled_timesteps() == 16 * (i + 1))
module_ids = {eps.module_id for eps in sample}
self.assertTrue("module_1" in module_ids)
self.assertTrue("module_2" in module_ids)
# For both modules, we should have 16 x (i + 1) timesteps sampled.
# Note, this must be the same here as the number of timesteps sampled
# altogether, b/c we sample both modules.
check(buffer.get_sampled_timesteps("module_1"), 16 * (i + 1))
check(buffer.get_sampled_timesteps("module_2"), 16 * (i + 1))
for eps in sample:
(
obs,
action,
reward,
next_obs,
is_terminated,
is_truncated,
weight,
n_step,
) = (
eps.get_observations(0),
eps.get_actions(-1),
eps.get_rewards(-1),
eps.get_observations(-1),
eps.is_terminated,
eps.is_truncated,
eps.get_extra_model_outputs("weights", -1),
eps.get_extra_model_outputs("n_step", -1),
)
# Make sure terminated and truncated are never both True.
assert not (is_truncated and is_terminated)
# Note, floating point numbers cannot be compared directly.
tolerance = 1e-8
# Assert that actions correspond to the observations.
check(obs, action, atol=tolerance)
# Assert that next observations are correctly one step after
# observations.
check(next_obs, obs + 1, atol=tolerance)
# Assert that the reward comes from the next observation.
check(reward * 10, next_obs, atol=tolerance)
# Furthermore, assert that the importance sampling weights are
# one for `beta=0.0`.
check(weight, 1.0, atol=tolerance)
# Assert that all n-steps are 1.0 as passed into `sample`.
check(n_step, 1.0, atol=tolerance)
# def test_buffer_synchronized_sample_logic(self):
# """Samples synchronized from the multi-agent buffer."""
# buffer = MultiAgentEpisodeReplayBuffer(capacity=10000)
# for _ in range(200):
# episode = self._get_episode()
# buffer.add(episode)
# for i in range(1000):
# sample = buffer.sample(
# batch_size_B=16, n_step=1, replay_mode="synchronized"
# )
# self.assertTrue(buffer.get_sampled_timesteps() == 16 * (i + 1))
# self.assertTrue("module_1" in sample)
# self.assertTrue("module_2" in sample)
# for module_id in sample:
# self.assertTrue(buffer.get_sampled_timesteps(module_id) == 16 *
# (i + 1))
# (
# obs,
# actions,
# rewards,
# next_obs,
# is_terminated,
# is_truncated,
# weights,
# n_steps,
# ) = (
# sample[module_id]["obs"],
# sample[module_id]["actions"],
# sample[module_id]["rewards"],
# sample[module_id]["new_obs"],
# sample[module_id]["terminateds"],
# sample[module_id]["truncateds"],
# sample[module_id]["weights"],
# sample[module_id]["n_step"],
# )
# # Make sure terminated and truncated are never both True.
# assert not np.any(np.logical_and(is_truncated, is_terminated))
# # All fields have same shape.
# assert (
# obs.shape[:2]
# == rewards.shape
# == actions.shape
# == next_obs.shape
# == is_truncated.shape
# == is_terminated.shape
# )
# # Note, floating point numbers cannot be compared directly.
# tolerance = 1e-8
# # Assert that actions correspond to the observations.
# self.assertTrue(np.all(actions - obs < tolerance))
# # Assert that next observations are correctly one step after
# # observations.
# self.assertTrue(np.all(next_obs - obs - 1 < tolerance))
# # Assert that the reward comes from the next observation.
# self.assertTrue(np.all(rewards * 10 - next_obs < tolerance))
# # Furthermore, assert that the importance sampling weights are
# # one for `beta=0.0`.
# self.assertTrue(np.all(weights - 1.0 < tolerance))
# # Assert that all n-steps are 1.0 as passed into `sample`.
# self.assertTrue(np.all(n_steps - 1.0 < tolerance))
# def test_sample_with_modules_to_sample(self):
# """Samples synchronized from the multi-agent buffer."""
# buffer = MultiAgentEpisodeReplayBuffer(capacity=10000)
# for _ in range(200):
# episode = self._get_episode()
# buffer.add(episode)
# for i in range(1000):
# sample = buffer.sample(
# batch_size_B=16,
# n_step=1,
# replay_mode="synchronized",
# modules_to_sample=["module_1"],
# )
# self.assertTrue(buffer.get_sampled_timesteps() == 16 * (i + 1))
# self.assertTrue(buffer.get_sampled_timesteps("module_2") == 0)
# self.assertTrue("module_1" in sample)
# self.assertTrue("module_2" not in sample)
# for module_id in sample:
# self.assertTrue(buffer.get_sampled_timesteps(module_id) == 16 *
# (i + 1))
# (
# obs,
# actions,
# rewards,
# next_obs,
# is_terminated,
# is_truncated,
# weights,
# n_steps,
# ) = (
# sample[module_id]["obs"],
# sample[module_id]["actions"],
# sample[module_id]["rewards"],
# sample[module_id]["new_obs"],
# sample[module_id]["terminateds"],
# sample[module_id]["truncateds"],
# sample[module_id]["weights"],
# sample[module_id]["n_step"],
# )
# # Make sure terminated and truncated are never both True.
# assert not np.any(np.logical_and(is_truncated, is_terminated))
# # All fields have same shape.
# assert (
# obs.shape[:2]
# == rewards.shape
# == actions.shape
# == next_obs.shape
# == is_truncated.shape
# == is_terminated.shape
# )
# # Note, floating point numbers cannot be compared directly.
# tolerance = 1e-8
# # Assert that actions correspond to the observations.
# self.assertTrue(np.all(actions - obs < tolerance))
# # Assert that next observations are correctly one step after
# # observations.
# self.assertTrue(np.all(next_obs - obs - 1 < tolerance))
# # Assert that the reward comes from the next observation.
# self.assertTrue(np.all(rewards * 10 - next_obs < tolerance))
# # Furthermore, assert that the importance sampling weights are
# # one for `beta=0.0`.
# self.assertTrue(np.all(weights - 1.0 < tolerance))
# # Assert that all n-steps are 1.0 as passed into `sample`.
# self.assertTrue(np.all(n_steps - 1.0 < tolerance))
def test_get_state_and_set_state(self):
"""Tests getting and setting the state of the buffer.
This test creates a buffer, fills it with episodes, gets the state of the
buffer, creates a new buffer and sets the state of the new buffer to the
state of the old buffer,and then checks that the two buffers are the same.
Checks include the properties of the buffer and any internal data structures,.
"""
# Create a buffer.
buffer = MultiAgentEpisodeReplayBuffer(capacity=10000)
# Fill it with episodes.
for _ in range(200):
episode = self._get_episode()
buffer.add(episode)
# Now get the state of the buffer.
state = buffer.get_state()
# Create a new buffer and set the state.
buffer2 = MultiAgentEpisodeReplayBuffer(capacity=10000)
buffer2.set_state(state)
# Ensure that the main properties are the same.
check(buffer.get_num_episodes(), buffer2.get_num_episodes())
check(buffer.get_num_episodes_evicted(), buffer2.get_num_episodes_evicted())
check(buffer.get_num_timesteps(), buffer2.get_num_timesteps())
check(buffer.get_added_timesteps(), buffer2.get_added_timesteps())
check(buffer.get_sampled_timesteps(), buffer2.get_sampled_timesteps())
check(buffer.get_num_agent_timesteps(), buffer2.get_num_agent_timesteps())
check(buffer.get_added_agent_timesteps(), buffer2.get_added_agent_timesteps())
check(buffer.get_module_ids(), buffer2.get_module_ids())
# Test any data structures on equality.
for module_id in buffer.get_module_ids():
check(
buffer.get_num_timesteps(module_id),
buffer2.get_num_timesteps(module_id),
)
check(
buffer.get_added_timesteps(module_id),
buffer2.get_added_timesteps(module_id),
)
check(
buffer.get_num_episodes(module_id), buffer2.get_num_episodes(module_id)
)
check(
buffer.get_num_episodes_evicted(module_id),
buffer2.get_num_episodes_evicted(module_id),
)
check(
buffer._module_to_indices[module_id],
buffer2._module_to_indices[module_id],
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestMultiAgentEpisodeReplayBuffer |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_lm1b.py | {
"start": 1112,
"end": 3535
} | class ____:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
output_vocab_size: int
share_embeddings: bool = False
logits_via_embedding: bool = False
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_layers: int = 6
qkv_dim: int = 512
mlp_dim: int = 2048
max_len: int = 2048
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
deterministic: bool = False
decode: bool = False
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
posemb_init: Callable | None = None
def shift_right(x, axis=1):
"""Shift the input to the right by padding and slicing on axis."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return lax.dynamic_slice_in_dim(padded, 0, padded.shape[axis] - 1, axis)
def shift_inputs(x, segment_ids=None, axis=1):
"""Shift inputs and replace EOS by 0 for packed inputs."""
shifted = shift_right(x, axis=axis)
# For packed targets, the first shifted token of a new sequence is made
# 0, rather than being the EOS token for the last sequence.
if segment_ids is not None:
shifted *= (segment_ids == shift_right(segment_ids, axis=axis))
return shifted
def sinusoidal_init(max_len=2048,
min_scale=1.0,
max_scale=10000.0):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input.
min_scale: float: minimum frequency-scale in sine grating.
max_scale: float: maximum frequency-scale in sine grating.
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
pe[:, :d_feature // 2] = np.sin(position * div_term)
pe[:, d_feature // 2: 2 * (d_feature // 2)] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
| TransformerConfig |
python | mlflow__mlflow | mlflow/store/artifact/databricks_sdk_artifact_repo.py | {
"start": 899,
"end": 5319
} | class ____(ArtifactRepository):
def __init__(
self, artifact_uri: str, tracking_uri: str | None = None, registry_uri: str | None = None
) -> None:
from databricks.sdk import WorkspaceClient
from databricks.sdk.config import Config
super().__init__(artifact_uri, tracking_uri, registry_uri)
supports_large_file_uploads = _sdk_supports_large_file_uploads()
wc = WorkspaceClient(
config=(
Config(enable_experimental_files_api_client=True)
if supports_large_file_uploads
else None
)
)
if supports_large_file_uploads:
# `Config` has a `multipart_upload_min_stream_size` parameter but the constructor
# doesn't set it. This is a bug in databricks-sdk.
# >>> from databricks.sdk.config import Config
# >>> config = Config(multipart_upload_chunk_size=123)
# >>> assert config.multipart_upload_chunk_size != 123
try:
wc.files._config.multipart_upload_chunk_size = (
MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE.get()
)
except AttributeError:
_logger.debug("Failed to set multipart_upload_chunk_size in Config", exc_info=True)
self.wc = wc
@property
def files_api(self) -> "FilesAPI":
return self.wc.files
def _is_dir(self, path: str) -> bool:
from databricks.sdk.errors.platform import NotFound
try:
self.files_api.get_directory_metadata(path)
except NotFound:
return False
return True
def full_path(self, artifact_path: str | None) -> str:
return f"{self.artifact_uri}/{artifact_path}" if artifact_path else self.artifact_uri
def log_artifact(self, local_file: str, artifact_path: str | None = None) -> None:
if Path(local_file).stat().st_size > 5 * (1024**3) and not _sdk_supports_large_file_uploads:
raise MlflowException.invalid_parameter_value(
"Databricks SDK version < 0.41.0 does not support uploading files larger than 5GB. "
"Please upgrade the databricks-sdk package to version >= 0.41.0."
)
with open(local_file, "rb") as f:
name = Path(local_file).name
self.files_api.upload(
self.full_path(posixpath.join(artifact_path, name) if artifact_path else name),
f,
overwrite=True,
)
def log_artifacts(self, local_dir: str, artifact_path: str | None = None) -> None:
local_dir = Path(local_dir).resolve()
futures: list[Future[None]] = []
with self._create_thread_pool() as executor:
for f in local_dir.rglob("*"):
if not f.is_file():
continue
paths: list[str] = []
if artifact_path:
paths.append(artifact_path)
if f.parent != local_dir:
paths.append(str(f.parent.relative_to(local_dir)))
fut = executor.submit(
self.log_artifact,
local_file=f,
artifact_path=posixpath.join(*paths) if paths else None,
)
futures.append(fut)
for fut in futures:
fut.result()
def list_artifacts(self, path: str | None = None) -> list[FileInfo]:
dest_path = self.full_path(path)
if not self._is_dir(dest_path):
return []
file_infos: list[FileInfo] = []
for directory_entry in self.files_api.list_directory_contents(dest_path):
relative_path = posixpath.relpath(directory_entry.path, self.artifact_uri)
file_infos.append(
FileInfo(
path=relative_path,
is_dir=directory_entry.is_directory,
file_size=directory_entry.file_size,
)
)
return sorted(file_infos, key=lambda f: f.path)
def _download_file(self, remote_file_path: str, local_path: str) -> None:
download_resp = self.files_api.download(self.full_path(remote_file_path))
with open(local_path, "wb") as f:
while chunk := download_resp.contents.read(10 * 1024 * 1024):
f.write(chunk)
| DatabricksSdkArtifactRepository |
python | python-poetry__poetry | src/poetry/mixology/term.py | {
"start": 288,
"end": 7417
} | class ____:
"""
A statement about a package which is true or false for a given selection of
package versions.
See https://github.com/dart-lang/pub/tree/master/doc/solver.md#term.
"""
def __init__(self, dependency: Dependency, is_positive: bool) -> None:
self._dependency = dependency
self._positive = is_positive
self.relation = functools.lru_cache(maxsize=None)(self._relation)
self.intersect = functools.lru_cache(maxsize=None)(self._intersect)
@property
def inverse(self) -> Term:
return Term(self._dependency, not self.is_positive())
@property
def dependency(self) -> Dependency:
return self._dependency
@property
def constraint(self) -> VersionConstraint:
return self._dependency.constraint
def is_positive(self) -> bool:
return self._positive
def satisfies(self, other: Term) -> bool:
"""
Returns whether this term satisfies another.
"""
return (
self.dependency.complete_name == other.dependency.complete_name
and self.relation(other) == SetRelation.SUBSET
)
def _relation(self, other: Term) -> str:
"""
Returns the relationship between the package versions
allowed by this term and another.
"""
if self.dependency.complete_name != other.dependency.complete_name:
raise ValueError(f"{other} should refer to {self.dependency.complete_name}")
other_constraint = other.constraint
if other.is_positive():
if self.is_positive():
if not self._compatible_dependency(other.dependency):
return SetRelation.DISJOINT
# foo ^1.5.0 is a subset of foo ^1.0.0
if other_constraint.allows_all(self.constraint):
return SetRelation.SUBSET
# foo ^2.0.0 is disjoint with foo ^1.0.0
if not self.constraint.allows_any(other_constraint):
return SetRelation.DISJOINT
return SetRelation.OVERLAPPING
else:
if not self._compatible_dependency(other.dependency):
return SetRelation.OVERLAPPING
# not foo ^1.0.0 is disjoint with foo ^1.5.0
if self.constraint.allows_all(other_constraint):
return SetRelation.DISJOINT
# not foo ^1.5.0 overlaps foo ^1.0.0
# not foo ^2.0.0 is a superset of foo ^1.5.0
return SetRelation.OVERLAPPING
else:
if self.is_positive():
if not self._compatible_dependency(other.dependency):
return SetRelation.SUBSET
# foo ^2.0.0 is a subset of not foo ^1.0.0
if not other_constraint.allows_any(self.constraint):
return SetRelation.SUBSET
# foo ^1.5.0 is disjoint with not foo ^1.0.0
if (
other_constraint.allows_all(self.constraint)
# if transitive markers are not equal we have to handle it
# as overlapping so that markers are merged later
and self.dependency.transitive_marker
== other.dependency.transitive_marker
):
return SetRelation.DISJOINT
# foo ^1.0.0 overlaps not foo ^1.5.0
return SetRelation.OVERLAPPING
else:
if not self._compatible_dependency(other.dependency):
return SetRelation.OVERLAPPING
# not foo ^1.0.0 is a subset of not foo ^1.5.0
if self.constraint.allows_all(other_constraint):
return SetRelation.SUBSET
# not foo ^2.0.0 overlaps not foo ^1.0.0
# not foo ^1.5.0 is a superset of not foo ^1.0.0
return SetRelation.OVERLAPPING
def _intersect(self, other: Term) -> Term | None:
"""
Returns a Term that represents the packages
allowed by both this term and another
"""
if self.dependency.complete_name != other.dependency.complete_name:
raise ValueError(f"{other} should refer to {self.dependency.complete_name}")
if self._compatible_dependency(other.dependency):
if self.is_positive() != other.is_positive():
# foo ^1.0.0 ∩ not foo ^1.5.0 → foo >=1.0.0 <1.5.0
positive = self if self.is_positive() else other
negative = other if self.is_positive() else self
return self._non_empty_term(
positive.constraint.difference(negative.constraint), True, other
)
elif self.is_positive():
# foo ^1.0.0 ∩ foo >=1.5.0 <3.0.0 → foo ^1.5.0
return self._non_empty_term(
self.constraint.intersect(other.constraint), True, other
)
else:
# not foo ^1.0.0 ∩ not foo >=1.5.0 <3.0.0 → not foo >=1.0.0 <3.0.0
return self._non_empty_term(
self.constraint.union(other.constraint), False, other
)
elif self.is_positive() != other.is_positive():
return self if self.is_positive() else other
else:
return None
def difference(self, other: Term) -> Term | None:
"""
Returns a Term that represents packages
allowed by this term and not by the other
"""
return self.intersect(other.inverse)
def _compatible_dependency(self, other: Dependency) -> bool:
return (
self.dependency.is_root
or other.is_root
or other.is_same_package_as(self.dependency)
or (
# we do this here to indicate direct origin dependencies are
# compatible with NVR dependencies
self.dependency.complete_name == other.complete_name
and self.dependency.is_direct_origin() != other.is_direct_origin()
)
)
def _non_empty_term(
self, constraint: VersionConstraint, is_positive: bool, other: Term
) -> Term | None:
if constraint.is_empty():
return None
# when creating a new term prefer direct-reference dependencies
dependency = (
other.dependency
if not self.dependency.is_direct_origin()
and other.dependency.is_direct_origin()
else self.dependency
)
new_dep = dependency.with_constraint(constraint)
if is_positive and other.is_positive():
new_dep.transitive_marker = self.dependency.transitive_marker.union(
other.dependency.transitive_marker
)
return Term(new_dep, is_positive)
def __str__(self) -> str:
prefix = "not " if not self.is_positive() else ""
return f"{prefix}{self._dependency}"
def __repr__(self) -> str:
return f"<Term {self!s}>"
| Term |
python | joke2k__faker | faker/providers/address/zu_ZA/__init__.py | {
"start": 45,
"end": 5185
} | class ____(AddressProvider):
"""
Address Provider for the zu_ZA locale (Zulu, South Africa).
Data sourced from:
- South African cities and towns: https://en.wikipedia.org/wiki/List_of_cities_and_towns_in_South_Africa
- South African postal codes: https://en.wikipedia.org/wiki/List_of_postal_codes_in_South_Africa
- Languages of South Africa: https://en.wikipedia.org/wiki/Languages_of_South_Africa
"""
city_formats = ("{{city_name}}",)
building_number_formats = ("%#", "%##", "%###")
postcode_formats = ("%###",) # Güncellendi: 4 haneli posta kodu için
section_formats = ("",)
street_address_formats = ("{{building_number}} {{street_name}} {{street_suffix}}",)
address_formats = ("{{street_address}}, {{city}}, {{postcode}}",)
secondary_address_formats = ("Flat #%#", "Unit #%#", "Suite #%#")
street_names = (
"Main",
"Church",
"President",
"Voortrekker",
"Nelson Mandela",
"Albertina Sisulu",
"Rivonia",
"Jan Smuts",
"Commissioner",
"Long",
"High",
"Short",
"Victoria",
"Queen",
"King",
"Oxford",
"George",
"William",
"York",
"Smith",
"Adelaide",
"Charles",
"Churchill",
"Cecil",
"Clarence",
"Edward",
"Elizabeth",
"Frere",
"Gandhi",
"Grey",
"James",
"Joseph",
"Milner",
"Napier",
"Paul Kruger",
"Prince",
"Somerset",
"Stanley",
"Thomas",
"Walter Sisulu",
"West",
)
street_suffixes = ("Umgwaqo", "Indlela", "Isitaladi", "Ithafa", "Indawo")
cities = (
"eGoli",
"eThekwini",
"iBhayi",
"iKapa",
"uMgungundlovu",
"Polokwane",
"Mbombela",
"Mahikeng",
"Kimberley",
"Bloemfontein",
"Rustenburg",
"Soweto",
"Benoni",
"Tembisa",
"Welkom",
"Vereeniging",
"Chatsworth",
"Uitenhage",
"Middelburg",
"Springs",
"Randfontein",
"Boksburg",
"Witbank",
"Klerksdorp",
"Bethlehem",
"George",
"Upington",
"Musina",
"Vanderbijlpark",
"Stellenbosch",
"Krugersdorp",
"Sasolburg",
"Centurion",
"Newcastle",
"Thohoyandou",
"Potchefstroom",
"Kathu",
"Paarl",
)
city_suffixes = ("",)
countries = (
"iNingizimu Afrika",
"Botswana",
"Lesotho",
"Namibia",
"Eswatini",
"Zimbabwe",
"Mozambique",
"Angola",
"Zambia",
"Malawi",
"Madagascar",
"Tanzania",
"Kenya",
"Nigeria",
"Ghana",
"Egypt",
"Morocco",
"Tunisia",
"Algeria",
"Ethiopia",
"Sudan",
"Somalia",
"Uganda",
"Cameroon",
"DR Congo",
"Rwanda",
"Burundi",
"Senegal",
"Mali",
"Ivory Coast",
"Niger",
"Chad",
"Mauritania",
"Eritrea",
"Djibouti",
"Cape Verde",
"Seychelles",
"Mauritius",
"Comoros",
"Gambia",
"Liberia",
"Sierra Leone",
"Benin",
"Togo",
"Equatorial Guinea",
"Gabon",
"Congo",
"Central African Republic",
"Sao Tome and Principe",
"Guinea",
"Guinea-Bissau",
"Burkina Faso",
)
provinces = (
"iMpuma-Kapa",
"Freistata",
"eGoli",
"iKwaZulu-Natali",
"Limpopo",
"iMpumalanga",
"Bokone Bophirima",
"Noord-Kaap",
"Wes-Kaap",
)
def secondary_address(self) -> str:
"""
:sample:
"""
return self.numerify(self.random_element(self.secondary_address_formats))
def building_number(self) -> str:
"""
:sample:
"""
return self.numerify(self.random_element(self.building_number_formats))
def street_name(self) -> str:
"""
:sample:
"""
return self.random_element(self.street_names)
def street_suffix(self) -> str:
"""
:sample:
"""
return self.random_element(self.street_suffixes)
def city_name(self) -> str:
"""
:sample:
"""
return self.random_element(self.cities)
def city_name_suffix(self) -> str:
"""
:sample:
"""
return self.random_element(self.city_suffixes)
def section_number(self) -> str:
"""
:sample:
"""
return self.numerify(self.random_element(self.section_formats))
def province(self) -> str:
"""
:sample:
"""
return self.random_element(self.provinces)
def administrative_unit(self) -> str:
"""
:sample:
"""
return self.random_element(self.provinces)
| Provider |
python | eth-brownie__brownie | brownie/network/transaction.py | {
"start": 2996,
"end": 60338
} | class ____:
"""Attributes and methods relating to a broadcasted transaction.
* All ether values are given as integers denominated in wei.
* Before the tx has confirmed, most attributes are set to None
* Accessing methods / attributes that query debug_traceTransaction
may be very slow if the transaction involved many steps
Attributes:
contract_name: Name of the contract called in the transaction
fn_name: Name of the method called in the transaction
txid: Transaction ID
sender: Address of the sender
receiver: Address of the receiver
value: Amount transferred
gas_price: Gas price
gas_limit: Gas limit
gas_used: Gas used
input: Hexstring input data
confirmations: The number of blocks since the transaction was confirmed
nonce: Transaction nonce
block_number: Block number this transaction was included in
timestamp: Timestamp of the block this transaction was included in
txindex: Index of the transaction within the mined block
contract_address: Address of contract deployed by the transaction
logs: Raw transaction logs
status: Transaction status: -1 pending, 0 reverted, 1 successful
Additional attributes:
(only available if debug_traceTransaction is enabled in the RPC)
events: Decoded transaction log events
trace: Expanded stack trace from debug_traceTransaction
return_value: Return value(s) from contract call
revert_msg: Error string from reverted contract all
modified_state: Boolean, did this contract write to storage?"""
# these are defined as class attributes to expose them in console completion hints
block_number: Optional[BlockNumber] = None
contract_address: Optional[ChecksumAddress] = None
contract_name: Optional[ContractName] = None
fn_name: Optional[str] = None
gas_used: Optional[int] = None
logs: Optional[List] = None
nonce: Optional[int] = None
sender = None
txid: str
txindex = None
type: int
def __init__(
self,
txid: Union[str, bytes],
sender: Any = None,
silent: bool = True,
required_confs: int = 1,
is_blocking: bool = True,
name: str = "",
revert_data: Optional[Tuple[str, int, str]] = None,
) -> None:
"""Instantiates a new TransactionReceipt object.
Args:
txid: hexstring transaction ID
sender: sender as a hex string or Account object
required_confs: the number of required confirmations before processing the receipt
is_blocking: if True, creating the object is a blocking action until the required
confirmations are received
silent: toggles console verbosity (default True)
name: contract function being called
revert_data: (revert string, program counter, revert type)
"""
self._silent = silent
if isinstance(txid, bytes):
txid = bytes_to_hexstring(txid)
# this event is set once the transaction is confirmed or dropped
# it is used to waiting during blocking transaction actions
self._confirmed = threading.Event()
# internal attributes
self._call_cost = 0
self._trace_exc: Optional[Exception] = None
self._trace_origin: Optional[str] = None
self._raw_trace: Optional[List] = None
self._trace: Optional[List] = None
self._events: Optional[EventDict] = None
self._return_value: Any = None
self._revert_msg: Optional[str] = None
self._dev_revert_msg: Optional[str] = None
self._modified_state: Optional[bool] = None
self._new_contracts: Optional[List[EthAddress]] = None
self._internal_transfers: Optional[List[Dict[str, Any]]] = None
self._subcalls: Optional[List[Dict[str, Any]]] = None
# attributes that can be set immediately
self.sender = sender
self.status = Status(-1)
self.txid = str(txid)
self.contract_name = None
self.fn_name = name
if name and "." in name:
self.contract_name, self.fn_name = name.split(".", maxsplit=1)
# avoid querying the trace to get the revert string if possible
self._revert_msg, self._revert_pc, revert_type = revert_data or (None, None, None)
if self._revert_msg is None and revert_type not in ("revert", "invalid_opcode"):
self._revert_msg = revert_type
if self._revert_pc is not None:
self._dev_revert_msg = build._get_dev_revert(self._revert_pc) or None
tx: Dict = web3.eth.get_transaction(HexBytes(self.txid))
self._set_from_tx(tx)
if not self._silent:
output_str = ""
if self.type == 2:
max_gas = tx["maxFeePerGas"] / 10**9
priority_gas = tx["maxPriorityFeePerGas"] / 10**9
output_str = (
f" Max fee: {bright_blue}{max_gas}{color} gwei"
f" Priority fee: {bright_blue}{priority_gas}{color} gwei"
)
elif self.gas_price is not None:
gas_price = self.gas_price / 10**9
output_str = f" Gas price: {bright_blue}{gas_price}{color} gwei"
print(
f"{output_str} Gas limit: {bright_blue}{self.gas_limit}{color}"
f" Nonce: {bright_blue}{self.nonce}{color}"
)
# await confirmation of tx in a separate thread which is blocking if
# required_confs > 0 or tx has already confirmed (`blockNumber` != None)
confirm_thread = threading.Thread(
target=self._await_confirmation,
args=(tx.get("blockNumber"), required_confs),
daemon=True,
)
confirm_thread.start()
if is_blocking and (required_confs > 0 or tx.get("blockNumber")):
confirm_thread.join()
def __repr__(self) -> str:
color_str = {-2: "dark white", -1: "bright yellow", 0: "bright red", 1: ""}[self.status]
return f"<Transaction '{color(color_str)}{self.txid}{color}'>"
def __hash__(self) -> int:
return hash(self.txid)
@trace_property
def events(self) -> EventDict:
if self._events is None:
if self.status:
# relay contract map so we can decode ds-note logs
addrs = {log.address for log in self.logs} if self.logs else set()
contracts = {addr: state._find_contract(addr) for addr in addrs}
self._events = _decode_logs(self.logs, contracts=contracts)
else:
self._get_trace()
# get events from the trace - handled lazily so that other
# trace operations are not blocked in case of a decoding error
initial_address = str(self.receiver or self.contract_address)
self._events = _decode_trace(self._raw_trace, initial_address)
return self._events
@trace_property
def internal_transfers(self) -> List[Dict[str, Any]]:
if not self.status:
return []
if self._internal_transfers is None:
self._expand_trace()
return self._internal_transfers
@trace_property
def modified_state(self) -> Optional[bool]:
if not self.status:
self._modified_state = False
elif self._modified_state is None:
self._get_trace()
return self._modified_state
@trace_property
def new_contracts(self) -> List[EthAddress]:
if not self.status:
return []
if self._new_contracts is None:
self._expand_trace()
return self._new_contracts
@trace_property
def return_value(self) -> Optional[str]:
if not self.status:
return None
if self._return_value is None:
self._get_trace()
return self._return_value
@trace_property
def revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._revert_msg is None:
self._get_trace()
elif self.contract_address and self._revert_msg == "out of gas":
self._get_trace()
return self._revert_msg
@trace_property
def dev_revert_msg(self) -> Optional[str]:
if self.status:
return None
if self._dev_revert_msg is None:
self._get_trace()
return self._dev_revert_msg or None
@trace_property
def subcalls(self) -> Optional[List]:
if self._subcalls is None:
self._expand_trace()
subcalls = filter(lambda s: not _is_call_to_precompile(s), self._subcalls)
return list(subcalls)
@trace_property
def trace(self) -> Optional[List]:
if self._trace is None:
self._expand_trace()
return self._trace
@property
def timestamp(self) -> Optional[int]:
if self.status < 0:
return None
return web3.eth.get_block(self.block_number)["timestamp"]
@property
def confirmations(self) -> int:
if not self.block_number:
return 0
return web3.eth.block_number - self.block_number + 1
def replace(
self,
increment: Optional[float] = None,
gas_price: Optional[Wei] = None,
silent: Optional[bool] = None,
) -> "TransactionReceipt":
"""
Rebroadcast this transaction with a higher gas price.
Exactly one of `increment` and `gas_price` must be given.
Arguments
---------
increment : float, optional
Multiplier applied to the gas price of this transaction in order
to determine the new gas price. For EIP1559 transactions the multiplier
is applied to the max_fee, the priority_fee is incremented by 1.1.
gas_price : Wei, optional
Absolute gas price to use in the replacement transaction. For EIP1559
transactions this is the new max_fee, the priority_fee is incremented
by 1.1.
silent : bool, optional
Toggle console verbosity (default is same setting as this transaction)
Returns
-------
TransactionReceipt
New transaction object
"""
if increment is None and gas_price is None:
raise ValueError("Must give one of `increment` or `gas_price`")
if gas_price is not None and increment is not None:
raise ValueError("Cannot set `increment` and `gas_price` together")
if self.status > -1:
raise ValueError("Transaction has already confirmed")
if self.gas_price is not None:
if increment is not None:
gas_price = Wei(self.gas_price * increment)
else:
gas_price = Wei(gas_price)
max_fee, priority_fee = None, None
if self.max_fee is not None and self.priority_fee is not None:
max_fee = gas_price
priority_fee = Wei(self.priority_fee * 1.1)
gas_price = None
if silent is None:
silent = self._silent
sender = self.sender
if isinstance(sender, EthAddress):
# if the transaction wasn't broadcast during this brownie session,
# check if the sender is unlocked - we might be able to replace anyway
from brownie import accounts
if sender in accounts:
sender = accounts.at(sender)
else:
raise ValueError("Sender address not in `accounts`")
return sender.transfer(
self.receiver,
self.value,
gas_limit=self.gas_limit,
gas_price=gas_price,
max_fee=max_fee,
priority_fee=priority_fee,
data=self.input,
nonce=self.nonce,
required_confs=0,
silent=silent,
)
def wait(self, required_confs: int) -> None:
if required_confs < 1:
return
if self.confirmations > required_confs:
print(f"This transaction already has {self.confirmations} confirmations.")
return
if self.nonce is not None:
# if we know the transaction nonce, it's more efficient to watch the tx count
# this (i hope) also fixes a longstanding bug that sometimes gave an incorrect
# "tx dropped without known replacement" error due to a race condition
while web3.eth.get_transaction_count(str(self.sender)) <= self.nonce:
time.sleep(1)
while True:
try:
tx: Dict = web3.eth.get_transaction(self.txid)
break
except TransactionNotFound:
if self.nonce is not None:
self.status = Status(-2)
self._confirmed.set()
return
time.sleep(1)
self._await_confirmation(tx["blockNumber"], required_confs)
def _raise_if_reverted(self, exc: Any) -> None:
if self.status or CONFIG.mode == "console":
return
if not web3.supports_traces:
# if traces are not available, do not attempt to determine the revert reason
raise exc or ValueError("Execution reverted")
if self._dev_revert_msg is None:
# no revert message and unable to check dev string - have to get trace
self._expand_trace()
if self.contract_address:
source = ""
elif CONFIG.argv["revert"]:
source = self._traceback_string()
else:
source = self._error_string(1)
contract = state._find_contract(self.receiver)
if contract:
marker = "//" if contract._build["language"] == "Solidity" else "#"
line = self._traceback_string().split("\n")[-1]
if f"{marker} dev: " in line:
self._dev_revert_msg = line[line.index(marker) + len(marker) : -5].strip()
raise exc._with_attr(
source=source, revert_msg=self._revert_msg, dev_revert_msg=self._dev_revert_msg
)
def _await_confirmation(self, block_number: int = None, required_confs: int = 1) -> None:
# await first confirmation
block_number = block_number or self.block_number
nonce_time = 0.0
sender_nonce = 0
while True:
# every 15 seconds, check if the nonce increased without a confirmation of
# this specific transaction. if this happens, the tx has likely dropped
# and we should stop waiting.
if time.time() - nonce_time > 15:
sender_nonce = web3.eth.get_transaction_count(str(self.sender))
nonce_time = time.time()
try:
receipt = web3.eth.get_transaction_receipt(HexBytes(self.txid))
except TransactionNotFound:
receipt = None
# the null blockHash check is required for older versions of Parity
# taken from `web3._utils.transactions.wait_for_transaction_receipt`
if receipt is not None and receipt["blockHash"] is not None:
break
# continuation of the nonce logic 2 sections prior. we must check the receipt
# after querying the nonce, because in the other order there is a chance that
# the tx would confirm after checking the receipt but before checking the nonce
if sender_nonce > self.nonce:
self.status = Status(-2)
self._confirmed.set()
return
if not block_number and not self._silent and required_confs > 0:
if required_confs == 1:
sys.stdout.write(f" Waiting for confirmation... {_marker[0]}\r")
else:
sys.stdout.write(
f" Required confirmations: {bright_yellow}0/"
f"{required_confs}{color} {_marker[0]}\r"
)
_marker.rotate(1)
sys.stdout.flush()
time.sleep(1)
# silence other dropped tx's immediately after confirmation to avoid output weirdness
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx._silent = True
self.block_number = receipt["blockNumber"]
# wait for more confirmations if required and handle uncle blocks
remaining_confs = required_confs
while remaining_confs > 0 and required_confs > 1:
try:
receipt = web3.eth.get_transaction_receipt(self.txid)
self.block_number = receipt["blockNumber"]
except TransactionNotFound:
if not self._silent:
sys.stdout.write(f"\r{red}Transaction was lost...{color}{' ' * 8}")
sys.stdout.flush()
# check if tx is still in mempool, this will raise otherwise
tx = web3.eth.get_transaction(self.txid)
self.block_number = None
return self._await_confirmation(tx.get("blockNumber"), required_confs)
if required_confs - self.confirmations != remaining_confs:
remaining_confs = required_confs - self.confirmations
if not self._silent:
sys.stdout.write(
f"\rRequired confirmations: {bright_yellow}{self.confirmations}/"
f"{required_confs}{color} "
)
if remaining_confs == 0:
sys.stdout.write("\n")
sys.stdout.flush()
if remaining_confs > 0:
time.sleep(1)
self._set_from_receipt(receipt)
# if coverage evaluation is active, evaluate the trace
if (
CONFIG.argv["coverage"]
and not coverage._check_cached(self.coverage_hash)
and self.trace
):
self._expand_trace()
if not self._silent and required_confs > 0:
print(self._confirm_output())
# set the confirmation event and mark other tx's with the same nonce as dropped
self._confirmed.set()
for dropped_tx in state.TxHistory().filter(
sender=self.sender, nonce=self.nonce, key=lambda k: k != self
):
dropped_tx.status = Status(-2)
dropped_tx._confirmed.set()
def _set_from_tx(self, tx: Dict) -> None:
if not self.sender:
self.sender = EthAddress(tx["from"])
self.receiver = EthAddress(tx["to"]) if tx.get("to") else None
self.value = Wei(tx["value"])
self.gas_price = tx.get("gasPrice")
self.max_fee = tx.get("maxFeePerGas")
self.priority_fee = tx.get("maxPriorityFeePerGas")
self.gas_limit = tx["gas"]
self.input = hexbytes_to_hexstring(tx["input"])
self.nonce = tx["nonce"]
self.type = int(HexBytes(tx.get("type", 0)).hex(), 16)
# if receiver is a known contract, set function name
if self.fn_name:
return
try:
contract = state._find_contract(tx.get("to"))
if contract is not None:
self.contract_name = contract._name
calldata = hexbytes_to_hexstring(tx["input"])
self.fn_name = contract.get_method(calldata)
except ContractNotFound:
# required in case the contract has self destructed
# other aspects of functionality will be broken, but this way we
# can at least return a receipt
pass
def _set_from_receipt(self, receipt: TxReceipt) -> None:
"""Sets object attributes based on the transaction receipt."""
self.block_number = receipt["blockNumber"]
self.txindex = receipt["transactionIndex"]
self.gas_used = receipt["gasUsed"]
self.logs = receipt["logs"]
self.status = Status(receipt["status"])
if "effectiveGasPrice" in receipt:
self.gas_price = receipt["effectiveGasPrice"]
self.contract_address = receipt["contractAddress"]
if self.contract_address and not self.contract_name:
self.contract_name = "UnknownContract"
base = (
f"{self.nonce}{self.block_number}{self.sender}{self.receiver}"
f"{self.value}{self.input}{int(self.status)}{self.gas_used}{self.txindex}"
)
self.coverage_hash = sha1(base.encode()).hexdigest()
if self.fn_name:
state.TxHistory()._gas(self._full_name(), receipt["gasUsed"], self.status == Status(1))
def _confirm_output(self) -> str:
status = ""
if not self.status:
revert_msg = self.revert_msg if web3.supports_traces else None
status = f"({bright_red}{revert_msg or 'reverted'}{color}) "
result = (
f"\r {self._full_name()} confirmed {status} "
f"Block: {bright_blue}{self.block_number}{color} "
f"Gas used: {bright_blue}{self.gas_used}{color} "
f"({bright_blue}{self.gas_used / self.gas_limit:.2%}{color})"
)
if self.type == 2 and self.gas_price is not None:
result += f" Gas price: {bright_blue}{self.gas_price / 10 ** 9}{color} gwei"
if self.status and self.contract_address:
result += (
f"\n {self.contract_name} deployed at: "
f"{bright_blue}{self.contract_address}{color}"
)
return result + "\n"
def _get_trace(self) -> None:
"""Retrieves the stack trace via debug_traceTransaction and finds the
return value, revert message and event logs in the trace.
"""
# check if trace has already been retrieved, or the tx warrants it
if self._raw_trace is not None:
return
self._raw_trace = []
if self.input == "0x" and self.gas_used == 21000:
self._modified_state = False
self._trace = []
return
if not web3.supports_traces:
raise RPCRequestError("Node client does not support `debug_traceTransaction`")
try:
trace = web3.provider.make_request(
# Set enableMemory to all RPC as anvil return the memory key
"debug_traceTransaction",
(self.txid, {"disableStorage": CONFIG.mode != "console", "enableMemory": True}),
)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
msg = f"Encountered a {type(e).__name__} while requesting "
msg += "`debug_traceTransaction`. The local RPC client has likely crashed."
if CONFIG.argv["coverage"]:
msg += " If the error persists, add the `skip_coverage` marker to this test."
raise RPCRequestError(msg) from None
if "error" in trace:
self._modified_state = None
self._trace_exc = RPCRequestError(trace["error"]["message"])
raise self._trace_exc
self._raw_trace = trace = trace["result"]["structLogs"]
if not trace:
self._modified_state = False
return
# different nodes return slightly different formats. its really fun to handle
# geth/nethermind returns unprefixed and with 0-padding for stack and memory
# erigon returns 0x-prefixed and without padding (but their memory values are like geth)
fix_stack = False
for step in trace:
if not step["stack"]:
continue
check = step["stack"][0]
if not isinstance(check, str):
break
if check.startswith("0x"):
fix_stack = True
break
fix_gas = isinstance(trace[0]["gas"], str)
if fix_stack or fix_gas:
for step in trace:
if fix_stack:
# for stack values, we need 32 bytes (64 chars) without the 0x prefix
# NOTE removeprefix is used for compatibility with both hexbytes<1 and >=1
step["stack"] = [
HexBytes(s).hex().removeprefix("0x").zfill(64) for s in step["stack"]
]
if fix_gas:
# handle traces where numeric values are returned as hex (Nethermind)
step["gas"] = int(step["gas"], 16)
# Check if gasCost is hex before converting.
if isinstance(step["gasCost"], str):
step["gasCost"] = int.from_bytes(
HexBytes(step["gasCost"]), "big", signed=True
)
if isinstance(step["pc"], str): # Check if pc is hex before converting.
step["pc"] = int(step["pc"], 16)
if self.status:
self._confirmed_trace(trace)
else:
self._reverted_trace(trace)
def _confirmed_trace(self, trace: Sequence) -> None:
self._modified_state = next((True for i in trace if i["op"] == "SSTORE"), False)
if trace[-1]["op"] != "RETURN" or self.contract_address:
return
contract = state._find_contract(self.receiver)
if contract:
data = _get_memory(trace[-1], -1)
fn = contract.get_method_object(self.input)
if not fn:
warn(f"Unable to find function on {contract} for input {self.input}")
return
self._return_value = fn.decode_output(data)
def _reverted_trace(self, trace: Sequence) -> None:
self._modified_state = False
if self.contract_address:
step = next((i for i in trace if i["op"] == "CODECOPY"), None)
if step is not None and int(step["stack"][-3], 16) > 24577:
self._revert_msg = "exceeds EIP-170 size limit"
self._dev_revert_msg = ""
if self._dev_revert_msg is not None:
return
# iterate over revert instructions in reverse to find revert message
for step in (i for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")):
if step["op"] == "REVERT" and int(step["stack"][-2], 16):
# get returned error string from stack
data = _get_memory(step, -1)
self._revert_msg = decode_typed_error(hexbytes_to_hexstring(data))
elif self.contract_address:
self._revert_msg = "invalid opcode" if step["op"] == "INVALID" else ""
self._dev_revert_msg = ""
return
# check for dev revert string using program counter
dev_revert = build._get_dev_revert(step["pc"]) or None
if dev_revert is not None:
self._dev_revert_msg = dev_revert
if self._revert_msg is None:
self._revert_msg = dev_revert
else:
# if none is found, expand the trace and get it from the pcMap
self._expand_trace()
try:
contract = state._find_contract(step["address"])
pc_map = contract._build["pcMap"]
# if this is the function selector revert, check for a jump
if "first_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 4
if trace[idx]["pc"] != step["pc"] - 4:
step = trace[idx]
# if this is the optimizer revert, find the actual source
if "optimizer_revert" in pc_map[step["pc"]]:
idx = trace.index(step) - 1
# look for the most recent jump
while trace[idx + 1]["op"] != "JUMPDEST":
if trace[idx]["source"] != step["source"]:
# if we find another line with a differing source offset prior
# to a JUMPDEST, the optimizer revert is also the actual revert
idx = trace.index(step)
break
idx -= 1
while not trace[idx]["source"]:
# now we're in a yul optimization, keep stepping back
# until we find a source offset
idx -= 1
# at last we have the real location of the revert
step["source"] = trace[idx]["source"]
step = trace[idx]
if "dev" in pc_map[step["pc"]]:
self._dev_revert_msg = pc_map[step["pc"]]["dev"]
else:
# extract the dev revert string from the source code
# TODO this technique appears superior to `_get_dev_revert`, and
# changes in solc 0.8.0 have necessitated it. the old approach
# of building a dev revert map should be refactored out in favor
# of this one.
source = contract._sources.get(step["source"]["filename"])
offset = step["source"]["offset"][1]
line = source[offset:].split("\n")[0]
marker = "//" if contract._build["language"] == "Solidity" else "#"
revert_str = line[line.index(marker) + len(marker) :].strip()
if revert_str.startswith("dev:"):
self._dev_revert_msg = revert_str
if self._revert_msg is None:
self._revert_msg = self._dev_revert_msg or ""
return
except (KeyError, AttributeError, TypeError, ValueError):
pass
if self._revert_msg is not None:
if self._dev_revert_msg is None:
self._dev_revert_msg = ""
return
op = next((i["op"] for i in trace[::-1] if i["op"] in ("REVERT", "INVALID")), None)
self._revert_msg = "invalid opcode" if op == "INVALID" else ""
def _expand_trace(self) -> None:
"""Adds the following attributes to each step of the stack trace:
address: The address executing this contract.
contractName: The name of the contract.
fn: The name of the function.
jumpDepth: Number of jumps made since entering this contract. The
initial value is 0.
source: {
filename: path to the source file for this step
offset: Start and end offset associated source code
}
"""
if self._raw_trace is None:
self._get_trace()
if self._trace is not None:
# in case `_get_trace` also expanded the trace, do not repeat
return
self._trace = trace = self._raw_trace
self._new_contracts = []
self._internal_transfers = []
self._subcalls = []
if self.contract_address or not trace:
coverage._add_transaction(self.coverage_hash, {})
return
if trace[0]["depth"] == 1:
self._trace_origin = "geth"
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
for t in trace:
t["depth"] = t["depth"] - 1
else:
self._trace_origin = "ganache"
if trace[0]["gasCost"] >= 21000:
# in ganache <6.10.0, gas costs are shifted by one step - we can
# identify this when the first step has a gas cost >= 21000
self._call_cost = trace[0]["gasCost"]
for i in range(len(trace) - 1):
trace[i]["gasCost"] = trace[i + 1]["gasCost"]
trace[-1]["gasCost"] = 0
else:
self._call_cost = self.gas_used - trace[0]["gas"] + trace[-1]["gas"]
# last_map gives a quick reference of previous values at each depth
last_map = {0: _get_last_map(self.receiver, self.input[:10])}
coverage_eval: Dict = {last_map[0]["name"]: {}}
precompile_contract = regex_compile(r"0x0{38}(?:0[1-9]|1[0-8])")
call_opcodes = ("CALL", "STATICCALL", "DELEGATECALL")
for i in range(len(trace)):
# if depth has increased, tx has called into a different contract
is_depth_increase = trace[i]["depth"] > trace[i - 1]["depth"]
is_subcall = trace[i - 1]["op"] in call_opcodes
if is_depth_increase or is_subcall:
step = trace[i - 1]
if step["op"] in ("CREATE", "CREATE2"):
# creating a new contract
out = next(x for x in trace[i:] if x["depth"] == step["depth"])
address = out["stack"][-1][-40:]
sig = f"<{step['op']}>"
calldata = None
self._new_contracts.append(EthAddress(address))
if int(step["stack"][-1], 16):
self._add_internal_xfer(step["address"], address, step["stack"][-1])
else:
# calling an existing contract
stack_idx = -4 if step["op"] in ("CALL", "CALLCODE") else -3
offset = int(step["stack"][stack_idx], 16)
length = int(step["stack"][stack_idx - 1], 16)
calldata = HexBytes("".join(step["memory"]))[offset : offset + length]
sig = hexbytes_to_hexstring(calldata[:4])
address = step["stack"][-2][-40:]
if is_depth_increase:
last_map[trace[i]["depth"]] = _get_last_map(address, sig)
coverage_eval.setdefault(last_map[trace[i]["depth"]]["name"], {})
self._subcalls.append(
{"from": step["address"], "to": EthAddress(address), "op": step["op"]}
)
if step["op"] in ("CALL", "CALLCODE"):
self._subcalls[-1]["value"] = int(step["stack"][-3], 16)
if is_depth_increase and calldata and last_map[trace[i]["depth"]].get("function"):
fn = last_map[trace[i]["depth"]]["function"]
self._subcalls[-1]["function"] = fn._input_sig
try:
zip_ = zip(fn.abi["inputs"], fn.decode_input(calldata))
inputs = {i[0]["name"]: i[1] for i in zip_}
self._subcalls[-1]["inputs"] = inputs
except Exception:
self._subcalls[-1]["calldata"] = hexbytes_to_hexstring(calldata)
elif calldata or is_subcall:
self._subcalls[-1]["calldata"] = hexbytes_to_hexstring(calldata)
if precompile_contract.search(str(self._subcalls[-1]["from"])) is not None:
caller = self._subcalls.pop(-2)["from"]
self._subcalls[-1]["from"] = caller
# update trace from last_map
last = last_map[trace[i]["depth"]]
trace[i].update(
address=last["address"],
contractName=last["name"],
fn=last["internal_calls"][-1],
jumpDepth=last["jumpDepth"],
source=False,
)
opcode = trace[i]["op"]
if opcode == "CALL" and int(trace[i]["stack"][-3], 16):
self._add_internal_xfer(
last["address"], trace[i]["stack"][-2][-40:], trace[i]["stack"][-3]
)
# If the function signature is not available for decoding return data attach
# the encoded data.
# If the function signature is available this will be overridden by setting
# `return_value` a few lines below.
if trace[i]["depth"] and opcode == "RETURN":
subcall: dict = next(i for i in self._subcalls[::-1] if i["to"] == last["address"])
if opcode == "RETURN":
returndata = _get_memory(trace[i], -1)
if returndata.hex().removeprefix("0x"):
subcall["returndata"] = hexbytes_to_hexstring(returndata)
try:
pc = last["pc_map"][trace[i]["pc"]]
except (KeyError, TypeError):
# we don't have enough information about this contract
continue
if trace[i]["depth"] and opcode in ("RETURN", "REVERT", "INVALID", "SELFDESTRUCT"):
subcall: dict = next(i for i in self._subcalls[::-1] if i["to"] == last["address"])
if opcode == "RETURN":
returndata = _get_memory(trace[i], -1)
if returndata:
fn = last["function"]
try:
return_values = fn.decode_output(returndata)
if len(fn.abi["outputs"]) == 1:
return_values = (return_values,)
subcall["return_value"] = return_values
except Exception:
subcall["returndata"] = hexbytes_to_hexstring(returndata)
else:
subcall["return_value"] = None
elif opcode == "SELFDESTRUCT":
subcall["selfdestruct"] = True
else:
if opcode == "REVERT":
data = _get_memory(trace[i], -1)
if len(data) > 4:
try:
subcall["revert_msg"] = decode(["string"], data[4:])[0]
except Exception:
subcall["revert_msg"] = hexbytes_to_hexstring(data)
if "revert_msg" not in subcall and "dev" in pc:
subcall["revert_msg"] = pc["dev"]
if "path" not in pc:
continue
trace[i]["source"] = {"filename": last["path_map"][pc["path"]], "offset": pc["offset"]}
if "fn" not in pc:
continue
# calculate coverage
if last["coverage"]:
if pc["path"] not in coverage_eval[last["name"]]:
coverage_eval[last["name"]][pc["path"]] = [set(), set(), set()]
if "statement" in pc:
coverage_eval[last["name"]][pc["path"]][0].add(pc["statement"])
if "branch" in pc:
if pc["op"] != "JUMPI":
last["active_branches"].add(pc["branch"])
elif "active_branches" not in last or pc["branch"] in last["active_branches"]:
# false, true
key = 1 if trace[i + 1]["pc"] == trace[i]["pc"] + 1 else 2
coverage_eval[last["name"]][pc["path"]][key].add(pc["branch"])
if "active_branches" in last:
last["active_branches"].remove(pc["branch"])
# ignore jumps with no function - they are compiler optimizations
if "jump" in pc:
# jump 'i' is calling into an internal function
if pc["jump"] == "i":
try:
fn = last["pc_map"][trace[i + 1]["pc"]]["fn"]
except (KeyError, IndexError):
continue
if fn != last["internal_calls"][-1]:
last["internal_calls"].append(fn)
last["jumpDepth"] += 1
# jump 'o' is returning from an internal function
elif last["jumpDepth"] > 0:
del last["internal_calls"][-1]
last["jumpDepth"] -= 1
coverage._add_transaction(
self.coverage_hash, dict((k, v) for k, v in coverage_eval.items() if v)
)
def _add_internal_xfer(self, from_: str, to: str, value: str) -> None:
if not value.startswith("0x"):
value = f"0x{value}"
self._internal_transfers.append(
{"from": EthAddress(from_), "to": EthAddress(to), "value": Wei(value)}
)
def _full_name(self) -> str:
if self.contract_name and self.fn_name:
return f"{self.contract_name}.{self.fn_name}"
return self.fn_name or "Transaction"
def info(self) -> None:
"""Displays verbose information about the transaction, including decoded event logs."""
result = f"Tx Hash: {self.txid}\nFrom: {self.sender}\n"
if self.contract_address and self.status:
result += f"New {self.contract_name} address: {self.contract_address}\n"
else:
result += f"To: {self.receiver}\n" f"Value: {self.value}\n"
if self.input != "0x" and int(self.input, 16):
result += f"Function: {self._full_name()}\n"
result += (
f"Block: {self.block_number}\nGas Used: "
f"{self.gas_used} / {self.gas_limit} "
f"({self.gas_used / self.gas_limit:.1%})\n"
)
if self.events:
events = list(self.events)
call_tree: List = ["--------------------------"]
while events:
idx = next(
(events.index(i) for i in events if i.address != events[0].address), len(events)
)
contract = state._find_contract(events[0].address)
if contract:
try:
name = contract.name()
except Exception:
name = contract._name
sub_tree: List = [f"{name} ({events[0].address})"]
else:
sub_tree = [f"{events[0].address}"]
for event in events[:idx]:
sub_tree.append([event.name, *(f"{k}: {v}" for k, v in event.items())])
call_tree.append(sub_tree)
events = events[idx:]
event_tree = build_tree([call_tree], multiline_pad=0, pad_depth=[0, 1])
result = f"{result}\nEvents In This Transaction\n{event_tree}"
result = color.highlight(result)
status = ""
if not self.status:
status = f"({bright_red}{self.revert_msg or 'reverted'}{color})"
print(f"Transaction was Mined {status}\n---------------------\n{result}")
def _get_trace_gas(self, start: int, stop: int) -> Tuple[int, int]:
total_gas = 0
internal_gas = 0
is_internal = True
trace = self.trace
for i in range(start, stop):
# Check if we are in a subfunction or not
if is_internal and not _step_compare(trace[i], trace[start]):
is_internal = False
# For the internal gas tracking we ignore the gas passed to an external call
if trace[i]["depth"] > trace[start]["depth"]:
internal_gas -= trace[i - 1]["gasCost"]
elif not is_internal and _step_compare(trace[i], trace[start]):
is_internal = True
total_gas += trace[i]["gasCost"]
if is_internal:
internal_gas += trace[i]["gasCost"]
# manually add gas refunds where they occur
if trace[i]["op"] == "SSTORE" and int(trace[i]["stack"][-2], 16) == 0:
# 15000 gas is refunded if a word is set to 0x0
# Note: There is currently no way to check if the value was 0x0 before.
# This will give an incorrect refund if 0x0 is assigned to 0x0.
total_gas -= 15000
if is_internal:
internal_gas -= 15000
if trace[i]["op"] == "SELFDESTRUCT":
# 24000 gas is refunded on selfdestruct
total_gas -= 24000
if is_internal:
internal_gas -= 24000
# For external calls, add the remaining gas returned back
if start > 0 and trace[start]["depth"] > trace[start - 1]["depth"]:
total_gas += trace[start - 1]["gasCost"]
internal_gas += trace[start - 1]["gasCost"]
return internal_gas, total_gas
@trace_inspection
def call_trace(self, expand: bool = False) -> None:
"""
Display the complete sequence of contracts and methods called during
the transaction. The format:
Contract.functionName [instruction] start:stop [gas used]
* start:stop are index values for the `trace` member of this object,
showing the points where the call begins and ends
* for calls that include subcalls, gas use is displayed as
[gas used in this frame / gas used in this frame + subcalls]
* Calls displayed in red ended with a `REVERT` or `INVALID` instruction.
Arguments
---------
expand : bool
If `True`, show an expanded call trace including inputs and return values
"""
trace = self.trace
key = _step_internal(
trace[0], trace[-1], 0, len(trace), self._get_trace_gas(0, len(self.trace))
)
call_tree: List = [[key]]
active_tree: List = [call_tree[0]]
# (index, depth, jumpDepth) for relevant steps in the trace
trace_index = [(0, 0, 0)] + [
(i, trace[i]["depth"], trace[i]["jumpDepth"])
for i in range(1, len(trace))
if not _step_compare(trace[i], trace[i - 1])
]
subcalls = self.subcalls[::-1]
for i, (idx, depth, jump_depth) in enumerate(trace_index[1:], start=1):
last = trace_index[i - 1]
if depth == last[1] and jump_depth < last[2]:
# returning from an internal function, reduce tree by one
active_tree.pop()
continue
elif depth < last[1]:
# returning from an external call, return tree by jumpDepth of the previous depth
active_tree = active_tree[: -(last[2] + 1)]
continue
if depth > last[1]:
# called to a new contract
end = next((x[0] for x in trace_index[i + 1 :] if x[1] < depth), len(trace))
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_external(
trace[idx],
trace[end - 1],
idx,
end,
(total_gas, internal_gas),
subcalls.pop(),
expand,
)
elif depth == last[1] and jump_depth > last[2]:
# jumped into an internal function
end = next(
(
x[0]
for x in trace_index[i + 1 :]
if x[1] < depth or (x[1] == depth and x[2] < jump_depth)
),
len(trace),
)
total_gas, internal_gas = self._get_trace_gas(idx, end)
key = _step_internal(
trace[idx], trace[end - 1], idx, end, (total_gas, internal_gas)
)
active_tree[-1].append([key])
active_tree.append(active_tree[-1][-1])
print(
f"Call trace for '{bright_blue}{self.txid}{color}':\n"
f"Initial call cost [{bright_yellow}{self._call_cost} gas{color}]"
)
print(build_tree(call_tree).rstrip())
def traceback(self) -> None:
print(self._traceback_string() or "")
@trace_inspection
def _traceback_string(self) -> str:
"""Returns an error traceback for the transaction."""
if self.status == 1:
return ""
trace = self.trace
try:
idx = next(i for i in range(len(trace)) if trace[i]["op"] in ("REVERT", "INVALID"))
trace_range = range(idx, -1, -1)
except StopIteration:
return ""
try:
result = [next(i for i in trace_range if trace[i]["source"])]
except StopIteration:
return ""
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
while True:
try:
idx = next(
i
for i in trace_range
if trace[i]["depth"] < depth
or (trace[i]["depth"] == depth and trace[i]["jumpDepth"] < jump_depth)
)
result.append(idx)
depth, jump_depth = trace[idx]["depth"], trace[idx]["jumpDepth"]
except StopIteration:
break
return f"{color}Traceback for '{bright_blue}{self.txid}{color}':\n" + "\n".join(
self._source_string(i, 0) for i in result[::-1]
)
def error(self, pad: int = 3) -> None:
print(self._error_string(pad) or "")
@trace_inspection
def _error_string(self, pad: int = 3) -> str:
"""Returns the source code that caused the transaction to revert.
Args:
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
if self.status == 1:
return ""
# if RPC returned a program counter, try to find source without querying trace
if self._revert_pc:
highlight, linenos, path, fn_name = build._get_error_source_from_pc(self._revert_pc)
if highlight:
return _format_source(highlight, linenos, path, self._revert_pc, -1, fn_name)
self._revert_pc = None
# iterate backward through the trace until a step has a source offset
trace = self.trace
trace_range = range(len(trace) - 1, -1, -1)
try:
idx = next(i for i in trace_range if trace[i]["op"] in {"REVERT", "INVALID"})
idx = next(i for i in trace_range if trace[i]["source"])
return self._source_string(idx, pad)
except StopIteration:
return ""
def source(self, idx: int, pad: int = 3) -> None:
print(self._source_string(idx, pad) or "")
@trace_inspection
def _source_string(self, idx: int, pad: int) -> str:
"""Displays the associated source code for a given stack trace step.
Args:
idx: Stack trace step index
pad: Number of unrelated lines of code to include before and after
Returns: source code string
"""
trace = self.trace[idx]
if not trace.get("source", None):
return ""
contract = state._find_contract(self.trace[idx]["address"])
source, linenos = highlight_source(
contract._sources.get(trace["source"]["filename"]), trace["source"]["offset"], pad
)
if not source:
return ""
return _format_source(
source,
linenos,
trace["source"]["filename"],
trace["pc"],
self.trace.index(trace),
trace["fn"],
)
def _format_source(source: str, linenos: Tuple, path: Path, pc: int, idx: int, fn_name: str) -> str:
ln = f" {bright_blue}{linenos[0]}"
if linenos[1] > linenos[0]:
ln = f"s{ln}{dark_white}-{bright_blue}{linenos[1]}"
return (
f"{dark_white}Trace step {bright_blue}{idx}{dark_white}, "
f"program counter {bright_blue}{pc}{dark_white}:\n {dark_white}"
f'File {bright_magenta}"{path}"{dark_white}, line{ln}'
f"{dark_white}, in {bright_cyan}{fn_name}{dark_white}:{source}"
)
def _step_compare(a: Dict, b: Dict) -> bool:
return a["depth"] == b["depth"] and a["jumpDepth"] == b["jumpDepth"]
def _step_internal(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict = None,
) -> str:
if last_step["op"] in {"REVERT", "INVALID"} and _step_compare(step, last_step):
contract_color = color("bright red")
else:
contract_color = color() if step["jumpDepth"] else color("bright cyan")
key = f"{dark_white}{contract_color}{step['fn']} {dark_white}"
left_bracket = f"{dark_white}["
right_bracket = f"{dark_white}]"
if subcall:
key = f"{key}[{color}{subcall['op']}{right_bracket} "
key = f"{key}{start}:{stop}{color}"
if gas:
if gas[0] == gas[1]:
gas_str = f"{bright_yellow}{gas[0]} gas"
else:
gas_str = f"{bright_yellow}{gas[0]} / {gas[1]} gas"
key = f"{key} {left_bracket}{gas_str}{right_bracket}{color}"
if last_step["op"] == "SELFDESTRUCT":
key = f"{key} {left_bracket}{bright_red}SELFDESTRUCT{right_bracket}{color}"
return key
def _convert_0x_to_empty_bytes(value: Any) -> Any:
# black cannot parse `0x` without any trailing zeros, so we temporarily
# replace it with an empty bytestring
final = []
for item in value:
if isinstance(item, (list, tuple)):
final.append(_convert_0x_to_empty_bytes(item))
elif str(item) == "0x":
final.append(b"")
else:
final.append(item)
return type(value)(final)
def _format(value: Any) -> str:
if isinstance(value, (list, tuple)):
try:
import black
except ImportError:
raise ImportError("You must `pip install black>=20.8b1` to use this feature")
value = _convert_0x_to_empty_bytes(value)
mode = black.FileMode(line_length=60)
value = black.format_str(str(value), mode=mode).replace('b""', "0x")
return str(value)
def _step_external(
step: Dict,
last_step: Dict,
start: Union[str, int],
stop: Union[str, int],
gas: Tuple[int, int],
subcall: Dict,
expand: bool,
) -> str:
key = _step_internal(step, last_step, start, stop, gas, subcall)
if not expand:
return key
result: List = [key, f"address: {step['address']}"]
if "value" in subcall:
result.append(f"value: {subcall['value']}")
if "inputs" not in subcall:
result.append(f"calldata: {subcall.get('calldata')}")
elif subcall["inputs"]:
result.append(
["input arguments:", *(f"{k}: {_format(v)}" for k, v in subcall["inputs"].items())]
)
else:
result.append("input arguments: None")
if "return_value" in subcall:
value = subcall["return_value"]
if isinstance(value, tuple) and len(value) > 1:
result.append(["return values:", *(_format(i) for i in value)])
else:
if isinstance(value, tuple):
value = value[0]
result.append(f"return value: {_format(value)}")
elif "returndata" in subcall:
result.append(f"returndata: {subcall['returndata']}")
if "revert_msg" in subcall:
result.append(f"revert reason: {bright_red}{subcall['revert_msg']}{color}")
return build_tree([result], multiline_pad=0).rstrip()
def _get_memory(step: Dict, idx: int) -> HexBytes:
offset = int(step["stack"][idx], 16)
length = int(step["stack"][idx - 1], 16)
data = HexBytes("".join(step["memory"]))[offset : offset + length]
# append zero-bytes if allocated memory ends before `length` bytes
data = HexBytes(data + b"\x00" * (length - len(data)))
return data
def _get_last_map(address: EthAddress, sig: str) -> Dict:
contract = state._find_contract(address)
last_map = {"address": EthAddress(address), "jumpDepth": 0, "name": None, "coverage": False}
if contract:
if contract.get_method(sig):
full_fn_name = f"{contract._name}.{contract.get_method(sig)}"
else:
full_fn_name = contract._name
last_map.update(
contract=contract,
function=contract.get_method_object(sig),
name=contract._name,
internal_calls=[full_fn_name],
path_map=contract._build.get("allSourcePaths"),
pc_map=contract._build.get("pcMap"),
)
if isinstance(contract._project, project_main.Project):
# only evaluate coverage for contracts that are part of a `Project`
last_map["coverage"] = True
if contract._build.get("language") == "Solidity":
last_map["active_branches"] = set()
else:
last_map.update(contract=None, internal_calls=[f"<UnknownContract>.{sig}"], pc_map=None)
return last_map
def _is_call_to_precompile(subcall: dict) -> bool:
precompile_contract = regex_compile(r"0x0{38}(?:0[1-9]|1[0-8])")
return True if precompile_contract.search(str(subcall["to"])) is not None else False
| TransactionReceipt |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 207811,
"end": 208800
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.vdot(x1, x2)
def compute_output_spec(self, x1, x2):
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
return KerasTensor([], dtype=dtype)
@keras_export(["keras.ops.vdot", "keras.ops.numpy.vdot"])
def vdot(x1, x2):
"""Return the dot product of two vectors.
If the first argument is complex, the complex conjugate of the first
argument is used for the calculation of the dot product.
Multidimensional tensors are flattened before the dot product is taken.
Args:
x1: First input tensor. If complex, its complex conjugate is taken
before calculation of the dot product.
x2: Second input tensor.
Returns:
Output tensor.
"""
if any_symbolic_tensors((x1, x2)):
return Vdot().symbolic_call(x1, x2)
return backend.numpy.vdot(x1, x2)
| Vdot |
python | celery__celery | t/unit/app/test_amqp.py | {
"start": 1755,
"end": 4331
} | class ____:
def test_queues_format(self):
self.app.amqp.queues._consume_from = {}
assert self.app.amqp.queues.format() == ''
def test_with_defaults(self):
assert Queues(None) == {}
def test_add(self):
q = Queues()
q.add('foo', exchange='ex', routing_key='rk')
assert 'foo' in q
assert isinstance(q['foo'], Queue)
assert q['foo'].routing_key == 'rk'
def test_setitem_adds_default_exchange(self):
q = Queues(default_exchange=Exchange('bar'))
assert q.default_exchange
queue = Queue('foo', exchange=None)
queue.exchange = None
q['foo'] = queue
assert q['foo'].exchange == q.default_exchange
def test_select_add(self):
q = Queues()
q.select(['foo', 'bar'])
q.select_add('baz')
assert sorted(q._consume_from.keys()) == ['bar', 'baz', 'foo']
def test_deselect(self):
q = Queues()
q.select(['foo', 'bar'])
q.deselect('bar')
assert sorted(q._consume_from.keys()) == ['foo']
def test_add_default_exchange(self):
ex = Exchange('fff', 'fanout')
q = Queues(default_exchange=ex)
q.add(Queue('foo'))
assert q['foo'].exchange.name == 'fff'
def test_alias(self):
q = Queues()
q.add(Queue('foo', alias='barfoo'))
assert q['barfoo'] is q['foo']
@pytest.mark.parametrize('queues_kwargs,qname,q,expected', [
({'max_priority': 10},
'foo', 'foo', {'x-max-priority': 10}),
({'max_priority': 10},
'xyz', Queue('xyz', queue_arguments={'x-max-priority': 3}),
{'x-max-priority': 3}),
({'max_priority': 10},
'moo', Queue('moo', queue_arguments=None),
{'x-max-priority': 10}),
({'max_priority': None},
'foo2', 'foo2',
None),
({'max_priority': None},
'xyx3', Queue('xyx3', queue_arguments={'x-max-priority': 7}),
{'x-max-priority': 7}),
])
def test_with_max_priority(self, queues_kwargs, qname, q, expected):
queues = Queues(**queues_kwargs)
queues.add(q)
assert queues[qname].queue_arguments == expected
def test_missing_queue_quorum(self):
queues = Queues(create_missing_queue_type="quorum",
create_missing_queue_exchange_type="topic")
q = queues.new_missing("spontaneous")
assert q.name == "spontaneous"
assert q.queue_arguments == {"x-queue-type": "quorum"}
assert q.exchange.type == "topic"
| test_Queues |
python | mlflow__mlflow | mlflow/sagemaker/__init__.py | {
"start": 84486,
"end": 129527
} | class ____(BaseDeploymentClient):
"""
Initialize a deployment client for SageMaker. The default region and assumed role ARN will
be set according to the value of the `target_uri`.
This class is meant to supersede the other ``mlflow.sagemaker`` real-time serving API's.
It is also designed to be used through the :py:mod:`mlflow.deployments` module.
This means that you can deploy to SageMaker using the
`mlflow deployments CLI <https://www.mlflow.org/docs/latest/cli.html#mlflow-deployments>`_ and
get a client through the :py:mod:`mlflow.deployments.get_deploy_client` function.
Args:
target_uri: A URI that follows one of the following formats:
- ``sagemaker``: This will set the default region to `us-west-2` and
the default assumed role ARN to `None`.
- ``sagemaker:/region_name``: This will set the default region to
`region_name` and the default assumed role ARN to `None`.
- ``sagemaker:/region_name/assumed_role_arn``: This will set the default
region to `region_name` and the default assumed role ARN to
`assumed_role_arn`.
When an `assumed_role_arn` is provided without a `region_name`,
an MlflowException will be raised.
"""
def __init__(self, target_uri):
super().__init__(target_uri=target_uri)
# Default region_name and assumed_role_arn when
# the target_uri is `sagemaker` or `sagemaker:/`
self.region_name = DEFAULT_REGION_NAME
self.assumed_role_arn = None
self._get_values_from_target_uri()
def _get_values_from_target_uri(self):
parsed = urllib.parse.urlparse(self.target_uri)
values_str = parsed.path.strip("/")
if not parsed.scheme or not values_str:
return
separator_index = values_str.find("/")
if separator_index == -1:
# values_str would look like us-east-1
self.region_name = values_str
else:
# values_str could look like us-east-1/arn:aws:1234:role/assumed_role
self.region_name = values_str[:separator_index]
self.assumed_role_arn = values_str[separator_index + 1 :]
# if values_str contains multiple interior slashes such as
# us-east-1/////arn:aws:1234:role/assumed_role, remove
# the extra slashes that come before "arn"
self.assumed_role_arn = self.assumed_role_arn.strip("/")
if self.region_name.startswith("arn"):
raise MlflowException(
message=(
"It looks like the target_uri contains an IAM role ARN without a region name.\n"
"A region name must be provided when the target_uri contains a role ARN.\n"
"In this case, the target_uri must follow the format: "
"sagemaker:/region_name/assumed_role_arn.\n"
f"The provided target_uri is: {self.target_uri}\n"
),
error_code=INVALID_PARAMETER_VALUE,
)
def _default_deployment_config(self, create_mode=True):
config = {
"assume_role_arn": self.assumed_role_arn,
"execution_role_arn": None,
"bucket": None,
"image_url": None,
"region_name": self.region_name,
"archive": False,
"instance_type": DEFAULT_SAGEMAKER_INSTANCE_TYPE,
"instance_count": DEFAULT_SAGEMAKER_INSTANCE_COUNT,
"vpc_config": None,
"data_capture_config": None,
"synchronous": True,
"timeout_seconds": 1200,
"variant_name": None,
"env": None,
"tags": None,
"async_inference_config": {},
"serverless_config": {},
}
if create_mode:
config["mode"] = DEPLOYMENT_MODE_CREATE
else:
config["mode"] = DEPLOYMENT_MODE_REPLACE
return config
def _apply_custom_config(self, config, custom_config):
int_fields = {"instance_count", "timeout_seconds"}
bool_fields = {"synchronous", "archive"}
dict_fields = {
"vpc_config",
"data_capture_config",
"tags",
"env",
"async_inference_config",
"serverless_config",
}
for key, value in custom_config.items():
if key not in config:
continue
if key in int_fields and not isinstance(value, int):
value = int(value)
elif key in bool_fields and not isinstance(value, bool):
value = value == "True"
elif key in dict_fields and not isinstance(value, dict):
value = json.loads(value)
config[key] = value
def create_deployment(self, name, model_uri, flavor=None, config=None, endpoint=None):
"""
Deploy an MLflow model on AWS SageMaker.
The currently active AWS account must have correct permissions set up.
This function creates a SageMaker endpoint. For more information about the input data
formats accepted by this endpoint, see the
`MLflow deployment tools documentation <../../deployment/deploy-model-to-sagemaker.html>`_.
Args:
name: Name of the deployed application.
model_uri: The location, in URI format, of the MLflow model to deploy to SageMaker.
For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
flavor: The name of the flavor of the model to use for deployment. Must be either
``None`` or one of mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS.
If ``None``, a flavor is automatically selected from the model's available
flavors. If the specified flavor is not present or not supported for
deployment, an exception will be thrown.
config: Configuration parameters. The supported parameters are:
- ``assume_role_arn``: The name of an IAM cross-account role to be assumed
to deploy SageMaker to another AWS account. If this parameter is not
specified, the role given in the ``target_uri`` will be used. If the
role is not given in the ``target_uri``, defaults to ``us-west-2``.
- ``execution_role_arn``: The name of an IAM role granting the SageMaker
service permissions to access the specified Docker image and S3 bucket
containing MLflow model artifacts. If unspecified, the currently-assumed
role will be used. This execution role is passed to the SageMaker service
when creating a SageMaker model from the specified MLflow model. It is
passed as the ``ExecutionRoleArn`` parameter of the `SageMaker
CreateModel API call <https://docs.aws.amazon.com/sagemaker/latest/
dg/API_CreateModel.html>`_. This role is *not* assumed for any other
call. For more information about SageMaker execution roles for model
creation, see
https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html.
- ``bucket``: S3 bucket where model artifacts will be stored. Defaults to a
SageMaker-compatible bucket name.
- ``image_url``: URL of the ECR-hosted Docker image the model should be
deployed into, produced by ``mlflow sagemaker build-and-push-container``.
This parameter can also be specified by the environment variable
``MLFLOW_SAGEMAKER_DEPLOY_IMG_URL``.
- ``region_name``: Name of the AWS region to which to deploy the application.
If unspecified, use the region name given in the ``target_uri``.
If it is also not specified in the ``target_uri``,
defaults to ``us-west-2``.
- ``archive``: If ``True``, any pre-existing SageMaker application resources
that become inactive (i.e. as a result of deploying in
``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE`` mode) are preserved.
These resources may include unused SageMaker models and endpoint
configurations that were associated with a prior version of the
application endpoint. If ``False``, these resources are deleted.
In order to use ``archive=False``, ``create_deployment()`` must be executed
synchronously with ``synchronous=True``. Defaults to ``False``.
- ``instance_type``: The type of SageMaker ML instance on which to deploy the
model. For a list of supported instance types, see
https://aws.amazon.com/sagemaker/pricing/instance-types/.
Defaults to ``ml.m4.xlarge``.
- ``instance_count``: The number of SageMaker ML instances on which to deploy
the model. Defaults to ``1``.
- ``synchronous``: If ``True``, this function will block until the deployment
process succeeds or encounters an irrecoverable failure. If ``False``,
this function will return immediately after starting the deployment
process. It will not wait for the deployment process to complete;
in this case, the caller is responsible for monitoring the health and
status of the pending deployment via native SageMaker APIs or the AWS
console. Defaults to ``True``.
- ``timeout_seconds``: If ``synchronous`` is ``True``, the deployment process
will return after the specified number of seconds if no definitive result
(success or failure) is achieved. Once the function returns, the caller is
responsible for monitoring the health and status of the pending
deployment using native SageMaker APIs or the AWS console. If
``synchronous`` is ``False``, this parameter is ignored.
Defaults to ``300``.
- ``vpc_config``: A dictionary specifying the VPC configuration to use when
creating the new SageMaker model associated with this application.
The acceptable values for this parameter are identical to those of the
``VpcConfig`` parameter in the `SageMaker boto3 client's create_model
method <https://boto3.readthedocs.io/en/latest/reference/services/sagemaker.html
#SageMaker.Client.create_model>`_. For more information, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html.
Defaults to ``None``.
- ``data_capture_config``: A dictionary specifying the data capture
configuration to use when creating the new SageMaker model associated with
this application.
For more information, see
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DataCaptureConfig.html.
Defaults to ``None``.
- ``variant_name``: A string specifying the desired name when creating a production
variant. Defaults to ``None``.
- ``async_inference_config``: A dictionary specifying the
async_inference_configuration
- ``serverless_config``: A dictionary specifying the serverless_configuration
- ``env``: A dictionary specifying environment variables as key-value
pairs to be set for the deployed model. Defaults to ``None``.
- ``tags``: A dictionary of key-value pairs representing additional
tags to be set for the deployed model. Defaults to ``None``.
endpoint: (optional) Endpoint to create the deployment under. Currently unsupported
.. code-block:: python
:caption: Python example
from mlflow.deployments import get_deploy_client
vpc_config = {
"SecurityGroupIds": [
"sg-123456abc",
],
"Subnets": [
"subnet-123456abc",
],
}
config = dict(
assume_role_arn="arn:aws:123:role/assumed_role",
execution_role_arn="arn:aws:456:role/execution_role",
bucket_name="my-s3-bucket",
image_url="1234.dkr.ecr.us-east-1.amazonaws.com/mlflow-test:1.23.1",
region_name="us-east-1",
archive=False,
instance_type="ml.m5.4xlarge",
instance_count=1,
synchronous=True,
timeout_seconds=300,
vpc_config=vpc_config,
variant_name="prod-variant-1",
env={"DISABLE_NGINX": "true", "GUNICORN_CMD_ARGS": '"--timeout 60"'},
tags={"training_timestamp": "2022-11-01T05:12:26"},
)
client = get_deploy_client("sagemaker")
client.create_deployment(
"my-deployment",
model_uri="/mlruns/0/abc/model",
flavor="python_function",
config=config,
)
.. code-block:: bash
:caption: Command-line example
mlflow deployments create --target sagemaker:/us-east-1/arn:aws:123:role/assumed_role \\
--name my-deployment \\
--model-uri /mlruns/0/abc/model \\
--flavor python_function\\
-C execution_role_arn=arn:aws:456:role/execution_role \\
-C bucket_name=my-s3-bucket \\
-C image_url=1234.dkr.ecr.us-east-1.amazonaws.com/mlflow-test:1.23.1 \\
-C region_name=us-east-1 \\
-C archive=False \\
-C instance_type=ml.m5.4xlarge \\
-C instance_count=1 \\
-C synchronous=True \\
-C timeout_seconds=300 \\
-C variant_name=prod-variant-1 \\
-C vpc_config='{"SecurityGroupIds": ["sg-123456abc"], \\
"Subnets": ["subnet-123456abc"]}' \\
-C data_capture_config='{"EnableCapture": True, \\
'InitialSamplingPercentage': 100, 'DestinationS3Uri": 's3://my-bucket/path', \\
'CaptureOptions': [{'CaptureMode': 'Output'}]}'
-C env='{"DISABLE_NGINX": "true", "GUNICORN_CMD_ARGS": "\"--timeout 60\""}' \\
-C tags='{"training_timestamp": "2022-11-01T05:12:26"}' \\
"""
final_config = self._default_deployment_config()
if config:
self._apply_custom_config(final_config, config)
app_name, flavor = _deploy(
app_name=name,
model_uri=model_uri,
flavor=flavor,
execution_role_arn=final_config["execution_role_arn"],
assume_role_arn=final_config["assume_role_arn"],
bucket=final_config["bucket"],
image_url=final_config["image_url"],
region_name=final_config["region_name"],
mode=mlflow.sagemaker.DEPLOYMENT_MODE_CREATE,
archive=final_config["archive"],
instance_type=final_config["instance_type"],
instance_count=final_config["instance_count"],
vpc_config=final_config["vpc_config"],
data_capture_config=final_config["data_capture_config"],
synchronous=final_config["synchronous"],
timeout_seconds=final_config["timeout_seconds"],
variant_name=final_config["variant_name"],
async_inference_config=final_config["async_inference_config"],
serverless_config=final_config["serverless_config"],
env=final_config["env"],
tags=final_config["tags"],
)
return {"name": app_name, "flavor": flavor}
def update_deployment(self, name, model_uri, flavor=None, config=None, endpoint=None):
"""
Update a deployment on AWS SageMaker. This function can replace or add a new model to
an existing SageMaker endpoint. By default, this function replaces the existing model
with the new one. The currently active AWS account must have correct permissions set up.
Args:
name: Name of the deployed application.
model_uri: The location, in URI format, of the MLflow model to deploy to SageMaker.
For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
flavor: The name of the flavor of the model to use for deployment. Must be either
``None`` or one of mlflow.sagemaker.SUPPORTED_DEPLOYMENT_FLAVORS.
If ``None``, a flavor is automatically selected from the model's available
flavors. If the specified flavor is not present or not supported for
deployment, an exception will be thrown.
config: Configuration parameters. The supported parameters are:
- ``assume_role_arn``: The name of an IAM cross-account role to be assumed
to deploy SageMaker to another AWS account. If this parameter is not
specified, the role given in the ``target_uri`` will be used. If the
role is not given in the ``target_uri``, defaults to ``us-west-2``.
- ``execution_role_arn``: The name of an IAM role granting the SageMaker
service permissions to access the specified Docker image and S3 bucket
containing MLflow model artifacts. If unspecified, the currently-assumed
role will be used. This execution role is passed to the SageMaker service
when creating a SageMaker model from the specified MLflow model. It is
passed as the ``ExecutionRoleArn`` parameter of the `SageMaker
CreateModel API call <https://docs.aws.amazon.com/sagemaker/latest/
dg/API_CreateModel.html>`_. This role is *not* assumed for any other
call. For more information about SageMaker execution roles for model
creation, see
https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html.
- ``bucket``: S3 bucket where model artifacts will be stored. Defaults to a
SageMaker-compatible bucket name.
- ``image_url``: URL of the ECR-hosted Docker image the model should be
deployed into, produced by ``mlflow sagemaker build-and-push-container``.
This parameter can also be specified by the environment variable
``MLFLOW_SAGEMAKER_DEPLOY_IMG_URL``.
- ``region_name``: Name of the AWS region to which to deploy the application.
If unspecified, use the region name given in the ``target_uri``.
If it is also not specified in the ``target_uri``,
defaults to ``us-west-2``.
- ``mode``: The mode in which to deploy the application.
Must be one of the following:
``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE``
If an application of the specified name exists, its model(s) is
replaced with the specified model. If no such application exists,
it is created with the specified name and model.
This is the default mode.
``mlflow.sagemaker.DEPLOYMENT_MODE_ADD``
Add the specified model to a pre-existing application with the
specified name, if one exists. If the application does not exist,
a new application is created with the specified name and model.
NOTE: If the application **already exists**, the specified model is
added to the application's corresponding SageMaker endpoint with an
initial weight of zero (0). To route traffic to the model,
update the application's associated endpoint configuration using
either the AWS console or the ``UpdateEndpointWeightsAndCapacities``
function defined in https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpointWeightsAndCapacities.html.
- ``archive``: If ``True``, any pre-existing SageMaker application resources
that become inactive (i.e. as a result of deploying in
``mlflow.sagemaker.DEPLOYMENT_MODE_REPLACE`` mode) are preserved.
These resources may include unused SageMaker models and endpoint
configurations that were associated with a prior version of the
application endpoint. If ``False``, these resources are deleted.
In order to use ``archive=False``, ``update_deployment()`` must be executed
synchronously with ``synchronous=True``. Defaults to ``False``.
- ``instance_type``: The type of SageMaker ML instance on which to deploy the
model. For a list of supported instance types, see
https://aws.amazon.com/sagemaker/pricing/instance-types/.
Defaults to ``ml.m4.xlarge``.
- ``instance_count``: The number of SageMaker ML instances on which to deploy
the model. Defaults to ``1``.
- ``synchronous``: If ``True``, this function will block until the deployment
process succeeds or encounters an irrecoverable failure. If ``False``,
this function will return immediately after starting the deployment
process. It will not wait for the deployment process to complete;
in this case, the caller is responsible for monitoring the health and
status of the pending deployment via native SageMaker APIs or the AWS
console. Defaults to ``True``.
- ``timeout_seconds``: If ``synchronous`` is ``True``, the deployment process
will return after the specified number of seconds if no definitive result
(success or failure) is achieved. Once the function returns, the caller is
responsible for monitoring the health and status of the pending
deployment using native SageMaker APIs or the AWS console. If
``synchronous`` is ``False``, this parameter is ignored.
Defaults to ``300``.
- ``variant_name``: A string specifying the desired name when creating a
production variant. Defaults to ``None``.
- ``vpc_config``: A dictionary specifying the VPC configuration to use when
creating the new SageMaker model associated with this application.
The acceptable values for this parameter are identical to those of the
``VpcConfig`` parameter in the `SageMaker boto3 client's create_model
method <https://boto3.readthedocs.io/en/latest/reference/services/sagemaker.html
#SageMaker.Client.create_model>`_. For more information, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html.
Defaults to ``None``.
- ``data_capture_config``: A dictionary specifying the data capture
configuration to use when creating the new SageMaker model associated with
this application.
For more information, see
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DataCaptureConfig.html.
Defaults to ``None``.
- ``async_inference_config``: A dictionary specifying the async config
configuration. Defaults to ``None``.
- ``env``: A dictionary specifying environment variables as key-value pairs
to be set for the deployed model. Defaults to ``None``.
- ``tags``: A dictionary of key-value pairs representing additional tags
to be set for the deployed model. Defaults to ``None``.
endpoint: (optional) Endpoint containing the deployment to update. Currently unsupported
.. code-block:: python
:caption: Python example
from mlflow.deployments import get_deploy_client
vpc_config = {
"SecurityGroupIds": [
"sg-123456abc",
],
"Subnets": [
"subnet-123456abc",
],
}
data_capture_config = {
"EnableCapture": True,
"InitialSamplingPercentage": 100,
"DestinationS3Uri": "s3://my-bucket/path",
"CaptureOptions": [{"CaptureMode": "Output"}],
}
config = dict(
assume_role_arn="arn:aws:123:role/assumed_role",
execution_role_arn="arn:aws:456:role/execution_role",
bucket_name="my-s3-bucket",
image_url="1234.dkr.ecr.us-east-1.amazonaws.com/mlflow-test:1.23.1",
region_name="us-east-1",
mode="replace",
archive=False,
instance_type="ml.m5.4xlarge",
instance_count=1,
synchronous=True,
timeout_seconds=300,
variant_name="prod-variant-1",
vpc_config=vpc_config,
data_capture_config=data_capture_config,
env={"DISABLE_NGINX": "true", "GUNICORN_CMD_ARGS": '"--timeout 60"'},
tags={"training_timestamp": "2022-11-01T05:12:26"},
)
client = get_deploy_client("sagemaker")
client.update_deployment(
"my-deployment",
model_uri="/mlruns/0/abc/model",
flavor="python_function",
config=config,
)
.. code-block:: bash
:caption: Command-line example
mlflow deployments update --target sagemaker:/us-east-1/arn:aws:123:role/assumed_role \\
--name my-deployment \\
--model-uri /mlruns/0/abc/model \\
--flavor python_function\\
-C execution_role_arn=arn:aws:456:role/execution_role \\
-C bucket_name=my-s3-bucket \\
-C image_url=1234.dkr.ecr.us-east-1.amazonaws.com/mlflow-test:1.23.1 \\
-C region_name=us-east-1 \\
-C mode=replace \\
-C archive=False \\
-C instance_type=ml.m5.4xlarge \\
-C instance_count=1 \\
-C synchronous=True \\
-C timeout_seconds=300 \\
-C variant_name=prod-variant-1 \\
-C vpc_config='{"SecurityGroupIds": ["sg-123456abc"], \\
"Subnets": ["subnet-123456abc"]}' \\
-C data_capture_config='{"EnableCapture": True, \\
"InitialSamplingPercentage": 100, "DestinationS3Uri": "s3://my-bucket/path", \\
"CaptureOptions": [{"CaptureMode": "Output"}]}'
-C env='{"DISABLE_NGINX": "true", "GUNICORN_CMD_ARGS": "\"--timeout 60\""}' \\
-C tags='{"training_timestamp": "2022-11-01T05:12:26"}' \\
"""
final_config = self._default_deployment_config(create_mode=False)
if config:
self._apply_custom_config(final_config, config)
if model_uri is None:
raise MlflowException(
message="A model_uri must be provided when updating a SageMaker deployment",
error_code=INVALID_PARAMETER_VALUE,
)
if final_config["mode"] not in [DEPLOYMENT_MODE_ADD, DEPLOYMENT_MODE_REPLACE]:
raise MlflowException(
message=(
f"Invalid mode `{final_config['mode']}` for deployment"
" to a pre-existing application"
),
error_code=INVALID_PARAMETER_VALUE,
)
app_name, flavor = _deploy(
app_name=name,
model_uri=model_uri,
flavor=flavor,
execution_role_arn=final_config["execution_role_arn"],
assume_role_arn=final_config["assume_role_arn"],
bucket=final_config["bucket"],
image_url=final_config["image_url"],
region_name=final_config["region_name"],
mode=final_config["mode"],
archive=final_config["archive"],
instance_type=final_config["instance_type"],
instance_count=final_config["instance_count"],
vpc_config=final_config["vpc_config"],
data_capture_config=final_config["data_capture_config"],
synchronous=final_config["synchronous"],
timeout_seconds=final_config["timeout_seconds"],
variant_name=final_config["variant_name"],
async_inference_config=final_config["async_inference_config"],
serverless_config=final_config["serverless_config"],
env=final_config["env"],
tags=final_config["tags"],
)
return {"name": app_name, "flavor": flavor}
def delete_deployment(self, name, config=None, endpoint=None):
"""
Delete a SageMaker application.
Args:
name: Name of the deployed application.
config: Configuration parameters. The supported parameters are:
- ``assume_role_arn``: The name of an IAM role to be assumed to delete
the SageMaker deployment.
- ``region_name``: Name of the AWS region in which the application
is deployed. Defaults to ``us-west-2`` or the region provided in
the `target_uri`.
- ``archive``: If `True`, resources associated with the specified
application, such as its associated models and endpoint configuration,
are preserved. If `False`, these resources are deleted. In order to use
``archive=False``, ``delete()`` must be executed synchronously with
``synchronous=True``. Defaults to ``False``.
- ``synchronous``: If `True`, this function blocks until the deletion process
succeeds or encounters an irrecoverable failure. If `False`, this function
returns immediately after starting the deletion process. It will not wait
for the deletion process to complete; in this case, the caller is
responsible for monitoring the status of the deletion process via native
SageMaker APIs or the AWS console. Defaults to ``True``.
- ``timeout_seconds``: If `synchronous` is `True`, the deletion process
returns after the specified number of seconds if no definitive result
(success or failure) is achieved. Once the function returns, the caller
is responsible for monitoring the status of the deletion process via native
SageMaker APIs or the AWS console. If `synchronous` is False, this
parameter is ignored. Defaults to ``300``.
endpoint: (optional) Endpoint containing the deployment to delete. Currently unsupported
.. code-block:: python
:caption: Python example
from mlflow.deployments import get_deploy_client
config = dict(
assume_role_arn="arn:aws:123:role/assumed_role",
region_name="us-east-1",
archive=False,
synchronous=True,
timeout_seconds=300,
)
client = get_deploy_client("sagemaker")
client.delete_deployment("my-deployment", config=config)
.. code-block:: bash
:caption: Command-line example
mlflow deployments delete --target sagemaker \\
--name my-deployment \\
-C assume_role_arn=arn:aws:123:role/assumed_role \\
-C region_name=us-east-1 \\
-C archive=False \\
-C synchronous=True \\
-C timeout_seconds=300
"""
final_config = {
"region_name": self.region_name,
"archive": False,
"synchronous": True,
"timeout_seconds": 300,
"assume_role_arn": self.assumed_role_arn,
}
if config:
self._apply_custom_config(final_config, config)
_delete(
name,
region_name=final_config["region_name"],
assume_role_arn=final_config["assume_role_arn"],
archive=final_config["archive"],
synchronous=final_config["synchronous"],
timeout_seconds=final_config["timeout_seconds"],
)
def list_deployments(self, endpoint=None):
"""
List deployments. This method returns a list of dictionaries that describes each deployment.
If a region name needs to be specified, the plugin must be initialized
with the AWS region in the ``target_uri`` such as ``sagemaker:/us-east-1``.
To assume an IAM role, the plugin must be initialized
with the AWS region and the role ARN in the ``target_uri`` such as
``sagemaker:/us-east-1/arn:aws:1234:role/assumed_role``.
Args:
endpoint: (optional) List deployments in the specified endpoint. Currently unsupported
Returns:
A list of dictionaries corresponding to deployments.
.. code-block:: python
:caption: Python example
from mlflow.deployments import get_deploy_client
client = get_deploy_client("sagemaker:/us-east-1/arn:aws:123:role/assumed_role")
client.list_deployments()
.. code-block:: bash
:caption: Command-line example
mlflow deployments list --target sagemaker:/us-east-1/arn:aws:1234:role/assumed_role
"""
import boto3
assume_role_credentials = _assume_role_and_get_credentials(
assume_role_arn=self.assumed_role_arn
)
sage_client = boto3.client(
"sagemaker", region_name=self.region_name, **assume_role_credentials
)
return sage_client.list_endpoints()["Endpoints"]
def get_deployment(self, name, endpoint=None):
"""
Returns a dictionary describing the specified deployment.
If a region name needs to be specified, the plugin must be initialized
with the AWS region in the ``target_uri`` such as ``sagemaker:/us-east-1``.
To assume an IAM role, the plugin must be initialized
with the AWS region and the role ARN in the ``target_uri`` such as
``sagemaker:/us-east-1/arn:aws:1234:role/assumed_role``.
A :py:class:`mlflow.exceptions.MlflowException` will also be thrown when an error occurs
while retrieving the deployment.
Args:
name: Name of deployment to retrieve
endpoint: (optional) Endpoint containing the deployment to get. Currently unsupported
Returns:
A dictionary that describes the specified deployment
.. code-block:: python
:caption: Python example
from mlflow.deployments import get_deploy_client
client = get_deploy_client("sagemaker:/us-east-1/arn:aws:123:role/assumed_role")
client.get_deployment("my-deployment")
.. code-block:: bash
:caption: Command-line example
mlflow deployments get --target sagemaker:/us-east-1/arn:aws:1234:role/assumed_role \\
--name my-deployment
"""
import boto3
assume_role_credentials = _assume_role_and_get_credentials(
assume_role_arn=self.assumed_role_arn
)
try:
sage_client = boto3.client(
"sagemaker", region_name=self.region_name, **assume_role_credentials
)
return sage_client.describe_endpoint(EndpointName=name)
except Exception as exc:
raise MlflowException(
message=f"There was an error while retrieving the deployment: {exc}\n"
)
def predict(
self,
deployment_name=None,
inputs=None,
endpoint=None,
params: dict[str, Any] | None = None,
):
"""
Compute predictions from the specified deployment using the provided PyFunc input.
The input/output types of this method match the :ref:`MLflow PyFunc prediction
interface <pyfunc-inference-api>`.
If a region name needs to be specified, the plugin must be initialized
with the AWS region in the ``target_uri`` such as ``sagemaker:/us-east-1``.
To assume an IAM role, the plugin must be initialized
with the AWS region and the role ARN in the ``target_uri`` such as
``sagemaker:/us-east-1/arn:aws:1234:role/assumed_role``.
Args:
deployment_name: Name of the deployment to predict against.
inputs: Input data (or arguments) to pass to the deployment or model endpoint for
inference. For a complete list of supported input types, see
:ref:`pyfunc-inference-api`.
endpoint: Endpoint to predict against. Currently unsupported
params: Optional parameters to invoke the endpoint with.
Returns:
A PyFunc output, such as a Pandas DataFrame, Pandas Series, or NumPy array.
For a complete list of supported output types, see :ref:`pyfunc-inference-api`.
.. code-block:: python
:caption: Python example
import pandas as pd
from mlflow.deployments import get_deploy_client
df = pd.DataFrame(data=[[1, 2, 3]], columns=["feat1", "feat2", "feat3"])
client = get_deploy_client("sagemaker:/us-east-1/arn:aws:123:role/assumed_role")
client.predict("my-deployment", df)
.. code-block:: bash
:caption: Command-line example
cat > ./input.json <<- input
{"feat1": {"0": 1}, "feat2": {"0": 2}, "feat3": {"0": 3}}
input
mlflow deployments predict \\
--target sagemaker:/us-east-1/arn:aws:1234:role/assumed_role \\
--name my-deployment \\
--input-path ./input.json
"""
import boto3
assume_role_credentials = _assume_role_and_get_credentials(
assume_role_arn=self.assumed_role_arn
)
try:
sage_client = boto3.client(
"sagemaker-runtime", region_name=self.region_name, **assume_role_credentials
)
response = sage_client.invoke_endpoint(
EndpointName=deployment_name,
Body=dump_input_data(inputs, inputs_key="instances", params=params),
ContentType="application/json",
)
response_body = response["Body"].read().decode("utf-8")
return PredictionsResponse.from_json(response_body)
except Exception as exc:
raise MlflowException(
message=f"There was an error while getting model prediction: {exc}\n"
)
def explain(self, deployment_name=None, df=None, endpoint=None):
"""
*This function has not been implemented and will be coming in the future.*
"""
raise NotImplementedError("This function is not implemented yet.")
def create_endpoint(self, name, config=None):
"""
Create an endpoint with the specified target. By default, this method should block until
creation completes (i.e. until it's possible to create a deployment within the endpoint).
In the case of conflicts (e.g. if it's not possible to create the specified endpoint
due to conflict with an existing endpoint), raises a
:py:class:`mlflow.exceptions.MlflowException`. See target-specific plugin documentation
for additional detail on support for asynchronous creation and other configuration.
Args:
name: Unique name to use for endpoint. If another endpoint exists with the same
name, raises a :py:class:`mlflow.exceptions.MlflowException`.
config: (optional) Dict containing target-specific configuration for the endpoint.
Returns:
Dict corresponding to created endpoint, which must contain the 'name' key.
"""
raise NotImplementedError("This function is not implemented yet.")
def update_endpoint(self, endpoint, config=None):
"""
Update the endpoint with the specified name. You can update any target-specific attributes
of the endpoint (via `config`). By default, this method should block until the update
completes (i.e. until it's possible to create a deployment within the endpoint). See
target-specific plugin documentation for additional detail on support for asynchronous
update and other configuration.
Args:
endpoint: Unique name of endpoint to update
config: (optional) dict containing target-specific configuration for the endpoint
"""
raise NotImplementedError("This function is not implemented yet.")
def delete_endpoint(self, endpoint):
"""
Delete the endpoint from the specified target. Deletion should be idempotent (i.e. deletion
should not fail if retried on a non-existent deployment).
Args:
endpoint: Name of endpoint to delete
"""
raise NotImplementedError("This function is not implemented yet.")
def list_endpoints(self):
"""
List endpoints in the specified target. This method is expected to return an
unpaginated list of all endpoints (an alternative would be to return a dict with
an 'endpoints' field containing the actual endpoints, with plugins able to specify
other fields, e.g. a next_page_token field, in the returned dictionary for pagination,
and to accept a `pagination_args` argument to this method for passing
pagination-related args).
Returns:
A list of dicts corresponding to endpoints. Each dict is guaranteed to
contain a 'name' key containing the endpoint name. The other fields of
the returned dictionary and their types may vary across targets.
"""
raise NotImplementedError("This function is not implemented yet.")
def get_endpoint(self, endpoint):
"""
Returns a dictionary describing the specified endpoint, throwing a
py:class:`mlflow.exception.MlflowException` if no endpoint exists with the provided
name.
The dict is guaranteed to contain an 'name' key containing the endpoint name.
The other fields of the returned dictionary and their types may vary across targets.
Args:
endpoint: Name of endpoint to fetch
"""
raise NotImplementedError("This function is not implemented yet.")
| SageMakerDeploymentClient |
python | scikit-learn__scikit-learn | sklearn/mixture/_bayesian_mixture.py | {
"start": 2166,
"end": 33678
} | class ____(BaseMixture):
"""Variational Bayesian estimation of a Gaussian mixture.
This class allows to infer an approximate posterior distribution over the
parameters of a Gaussian mixture distribution. The effective number of
components can be inferred from the data.
This class implements two types of prior for the weights distribution: a
finite mixture model with Dirichlet distribution and an infinite mixture
model with the Dirichlet Process. In practice Dirichlet Process inference
algorithm is approximated and uses a truncated distribution with a fixed
maximum number of components (called the Stick-breaking representation).
The number of components actually used almost always depends on the data.
.. versionadded:: 0.18
Read more in the :ref:`User Guide <bgmm>`.
Parameters
----------
n_components : int, default=1
The number of mixture components. Depending on the data and the value
of the `weight_concentration_prior` the model can decide to not use
all the components by setting some component `weights_` to values very
close to zero. The number of effective components is therefore smaller
than n_components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full'
String describing the type of covariance parameters to use.
Must be one of:
- 'full' (each component has its own general covariance matrix),
- 'tied' (all components share the same general covariance matrix),
- 'diag' (each component has its own diagonal covariance matrix),
- 'spherical' (each component has its own single variance).
tol : float, default=1e-3
The convergence threshold. EM iterations will stop when the
lower bound average gain on the likelihood (of the training data with
respect to the model) is below this threshold.
reg_covar : float, default=1e-6
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, default=100
The number of EM iterations to perform.
n_init : int, default=1
The number of initializations to perform. The result with the highest
lower bound value on the likelihood is kept.
init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \
default='kmeans'
The method used to initialize the weights, the means and the
covariances. String must be one of:
- 'kmeans': responsibilities are initialized using kmeans.
- 'k-means++': use the k-means++ method to initialize.
- 'random': responsibilities are initialized randomly.
- 'random_from_data': initial means are randomly selected data points.
.. versionchanged:: v1.1
`init_params` now accepts 'random_from_data' and 'k-means++' as
initialization methods.
weight_concentration_prior_type : {'dirichlet_process', 'dirichlet_distribution'}, \
default='dirichlet_process'
String describing the type of the weight concentration prior.
weight_concentration_prior : float or None, default=None
The dirichlet concentration of each component on the weight
distribution (Dirichlet). This is commonly called gamma in the
literature. The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
mixture weights simplex. The value of the parameter must be greater
than 0. If it is None, it's set to ``1. / n_components``.
mean_precision_prior : float or None, default=None
The precision prior on the mean distribution (Gaussian).
Controls the extent of where means can be placed. Larger
values concentrate the cluster means around `mean_prior`.
The value of the parameter must be greater than 0.
If it is None, it is set to 1.
mean_prior : array-like, shape (n_features,), default=None
The prior on the mean distribution (Gaussian).
If it is None, it is set to the mean of X.
degrees_of_freedom_prior : float or None, default=None
The prior of the number of degrees of freedom on the covariance
distributions (Wishart). If it is None, it's set to `n_features`.
covariance_prior : float or array-like, default=None
The prior on the covariance distribution (Wishart).
If it is None, the emiprical covariance prior is initialized using the
covariance of X. The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
random_state : int, RandomState instance or None, default=None
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
See :term:`the Glossary <warm_start>`.
verbose : int, default=0
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default=10
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like of shape (n_components,)
The weights of each mixture components.
means_ : array-like of shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The Cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence of the best fit of inference was reached, False otherwise.
n_iter_ : int
Number of step used by the best fit of inference to reach the
convergence.
lower_bound_ : float
Lower bound value on the model evidence (of the training data) of the
best fit of inference.
lower_bounds_ : array-like of shape (`n_iter_`,)
The list of lower bound values on the model evidence from each iteration
of the best fit of inference.
weight_concentration_prior_ : tuple or float
The dirichlet concentration of each component on the weight
distribution (Dirichlet). The type depends on
``weight_concentration_prior_type``::
(float, float) if 'dirichlet_process' (Beta parameters),
float if 'dirichlet_distribution' (Dirichlet parameters).
The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
simplex.
weight_concentration_ : array-like of shape (n_components,)
The dirichlet concentration of each component on the weight
distribution (Dirichlet).
mean_precision_prior_ : float
The precision prior on the mean distribution (Gaussian).
Controls the extent of where means can be placed.
Larger values concentrate the cluster means around `mean_prior`.
If mean_precision_prior is set to None, `mean_precision_prior_` is set
to 1.
mean_precision_ : array-like of shape (n_components,)
The precision of each components on the mean distribution (Gaussian).
mean_prior_ : array-like of shape (n_features,)
The prior on the mean distribution (Gaussian).
degrees_of_freedom_prior_ : float
The prior of the number of degrees of freedom on the covariance
distributions (Wishart).
degrees_of_freedom_ : array-like of shape (n_components,)
The number of degrees of freedom of each components in the model.
covariance_prior_ : float or array-like
The prior on the covariance distribution (Wishart).
The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
GaussianMixture : Finite Gaussian mixture fit with EM.
References
----------
.. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine
learning". Vol. 4 No. 4. New York: Springer.
<https://www.springer.com/kr/book/9780387310732>`_
.. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for
Graphical Models". In Advances in Neural Information Processing
Systems 12.
<https://proceedings.neurips.cc/paper_files/paper/1999/file/74563ba21a90da13dacf2a73e3ddefa7-Paper.pdf>`_
.. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational
inference for Dirichlet process mixtures". Bayesian analysis 1.1
<https://www.cs.princeton.edu/courses/archive/fall11/cos597C/reading/BleiJordan2005.pdf>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.mixture import BayesianGaussianMixture
>>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]])
>>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X)
>>> bgm.means_
array([[2.49 , 2.29],
[8.45, 4.52 ]])
>>> bgm.predict([[0, 0], [9, 3]])
array([0, 1])
"""
_parameter_constraints: dict = {
**BaseMixture._parameter_constraints,
"covariance_type": [StrOptions({"spherical", "tied", "diag", "full"})],
"weight_concentration_prior_type": [
StrOptions({"dirichlet_process", "dirichlet_distribution"})
],
"weight_concentration_prior": [
None,
Interval(Real, 0.0, None, closed="neither"),
],
"mean_precision_prior": [None, Interval(Real, 0.0, None, closed="neither")],
"mean_prior": [None, "array-like"],
"degrees_of_freedom_prior": [None, Interval(Real, 0.0, None, closed="neither")],
"covariance_prior": [
None,
"array-like",
Interval(Real, 0.0, None, closed="neither"),
],
}
def __init__(
self,
*,
n_components=1,
covariance_type="full",
tol=1e-3,
reg_covar=1e-6,
max_iter=100,
n_init=1,
init_params="kmeans",
weight_concentration_prior_type="dirichlet_process",
weight_concentration_prior=None,
mean_precision_prior=None,
mean_prior=None,
degrees_of_freedom_prior=None,
covariance_prior=None,
random_state=None,
warm_start=False,
verbose=0,
verbose_interval=10,
):
super().__init__(
n_components=n_components,
tol=tol,
reg_covar=reg_covar,
max_iter=max_iter,
n_init=n_init,
init_params=init_params,
random_state=random_state,
warm_start=warm_start,
verbose=verbose,
verbose_interval=verbose_interval,
)
self.covariance_type = covariance_type
self.weight_concentration_prior_type = weight_concentration_prior_type
self.weight_concentration_prior = weight_concentration_prior
self.mean_precision_prior = mean_precision_prior
self.mean_prior = mean_prior
self.degrees_of_freedom_prior = degrees_of_freedom_prior
self.covariance_prior = covariance_prior
def _check_parameters(self, X, xp=None):
"""Check that the parameters are well defined.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
self._check_weights_parameters()
self._check_means_parameters(X)
self._check_precision_parameters(X)
self._checkcovariance_prior_parameter(X)
def _check_weights_parameters(self):
"""Check the parameter of the Dirichlet distribution."""
if self.weight_concentration_prior is None:
self.weight_concentration_prior_ = 1.0 / self.n_components
else:
self.weight_concentration_prior_ = self.weight_concentration_prior
def _check_means_parameters(self, X):
"""Check the parameters of the Gaussian distribution.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.mean_precision_prior is None:
self.mean_precision_prior_ = 1.0
else:
self.mean_precision_prior_ = self.mean_precision_prior
if self.mean_prior is None:
self.mean_prior_ = X.mean(axis=0)
else:
self.mean_prior_ = check_array(
self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False
)
_check_shape(self.mean_prior_, (n_features,), "means")
def _check_precision_parameters(self, X):
"""Check the prior parameters of the precision distribution.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.degrees_of_freedom_prior is None:
self.degrees_of_freedom_prior_ = n_features
elif self.degrees_of_freedom_prior > n_features - 1.0:
self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior
else:
raise ValueError(
"The parameter 'degrees_of_freedom_prior' "
"should be greater than %d, but got %.3f."
% (n_features - 1, self.degrees_of_freedom_prior)
)
def _checkcovariance_prior_parameter(self, X):
"""Check the `covariance_prior_`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.covariance_prior is None:
self.covariance_prior_ = {
"full": np.atleast_2d(np.cov(X.T)),
"tied": np.atleast_2d(np.cov(X.T)),
"diag": np.var(X, axis=0, ddof=1),
"spherical": np.var(X, axis=0, ddof=1).mean(),
}[self.covariance_type]
elif self.covariance_type in ["full", "tied"]:
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False
)
_check_shape(
self.covariance_prior_,
(n_features, n_features),
"%s covariance_prior" % self.covariance_type,
)
_check_precision_matrix(self.covariance_prior_, self.covariance_type)
elif self.covariance_type == "diag":
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False
)
_check_shape(
self.covariance_prior_,
(n_features,),
"%s covariance_prior" % self.covariance_type,
)
_check_precision_positivity(self.covariance_prior_, self.covariance_type)
# spherical case
else:
self.covariance_prior_ = self.covariance_prior
def _initialize(self, X, resp):
"""Initialization of the mixture parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
resp : array-like of shape (n_samples, n_components)
"""
nk, xk, sk = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type
)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_weights(self, nk):
"""Estimate the parameters of the Dirichlet distribution.
Parameters
----------
nk : array-like of shape (n_components,)
"""
if self.weight_concentration_prior_type == "dirichlet_process":
# For dirichlet process weight_concentration will be a tuple
# containing the two parameters of the beta distribution
self.weight_concentration_ = (
1.0 + nk,
(
self.weight_concentration_prior_
+ np.hstack((np.cumsum(nk[::-1])[-2::-1], 0))
),
)
else:
# case Variational Gaussian mixture with dirichlet distribution
self.weight_concentration_ = self.weight_concentration_prior_ + nk
def _estimate_means(self, nk, xk):
"""Estimate the parameters of the Gaussian distribution.
Parameters
----------
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
"""
self.mean_precision_ = self.mean_precision_prior_ + nk
self.means_ = (
self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk
) / self.mean_precision_[:, np.newaxis]
def _estimate_precisions(self, nk, xk, sk):
"""Estimate the precisions parameters of the precision distribution.
Parameters
----------
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like
The shape depends of `covariance_type`:
'full' : (n_components, n_features, n_features)
'tied' : (n_features, n_features)
'diag' : (n_components, n_features)
'spherical' : (n_components,)
"""
{
"full": self._estimate_wishart_full,
"tied": self._estimate_wishart_tied,
"diag": self._estimate_wishart_diag,
"spherical": self._estimate_wishart_spherical,
}[self.covariance_type](nk, xk, sk)
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type
)
def _estimate_wishart_full(self, nk, xk, sk):
"""Estimate the full Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components, n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is
# the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
self.covariances_ = np.empty((self.n_components, n_features, n_features))
for k in range(self.n_components):
diff = xk[k] - self.mean_prior_
self.covariances_[k] = (
self.covariance_prior_
+ nk[k] * sk[k]
+ nk[k]
* self.mean_precision_prior_
/ self.mean_precision_[k]
* np.outer(diff, diff)
)
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis]
def _estimate_wishart_tied(self, nk, xk, sk):
"""Estimate the tied Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = (
self.degrees_of_freedom_prior_ + nk.sum() / self.n_components
)
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_
+ sk * nk.sum() / self.n_components
+ self.mean_precision_prior_
/ self.n_components
* np.dot((nk / self.mean_precision_) * diff.T, diff)
)
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _estimate_wishart_diag(self, nk, xk, sk):
"""Estimate the diag Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * (
sk
+ (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis]
* np.square(diff)
)
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
def _estimate_wishart_spherical(self, nk, xk, sk):
"""Estimate the spherical Wishart distribution parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
nk : array-like of shape (n_components,)
xk : array-like of shape (n_components, n_features)
sk : array-like of shape (n_components,)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = self.covariance_prior_ + nk * (
sk
+ self.mean_precision_prior_
/ self.mean_precision_
* np.mean(np.square(diff), 1)
)
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _m_step(self, X, log_resp, xp=None):
"""M step.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array-like of shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
nk, xk, sk = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type
)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_log_weights(self, xp=None):
if self.weight_concentration_prior_type == "dirichlet_process":
digamma_sum = digamma(
self.weight_concentration_[0] + self.weight_concentration_[1]
)
digamma_a = digamma(self.weight_concentration_[0])
digamma_b = digamma(self.weight_concentration_[1])
return (
digamma_a
- digamma_sum
+ np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1]))
)
else:
# case Variational Gaussian mixture with dirichlet distribution
return digamma(self.weight_concentration_) - digamma(
np.sum(self.weight_concentration_)
)
def _estimate_log_prob(self, X, xp=None):
_, n_features = X.shape
# We remove `n_features * np.log(self.degrees_of_freedom_)` because
# the precision matrix is normalized
log_gauss = _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type
) - 0.5 * n_features * np.log(self.degrees_of_freedom_)
log_lambda = n_features * np.log(2.0) + np.sum(
digamma(
0.5
* (self.degrees_of_freedom_ - np.arange(0, n_features)[:, np.newaxis])
),
0,
)
return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_)
def _compute_lower_bound(self, log_resp, log_prob_norm):
"""Estimate the lower bound of the model.
The lower bound on the likelihood (of the training data with respect to
the model) is used to detect the convergence and has to increase at
each iteration.
Parameters
----------
X : array-like of shape (n_samples, n_features)
log_resp : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
log_prob_norm : float
Logarithm of the probability of each sample in X.
Returns
-------
lower_bound : float
"""
# Contrary to the original formula, we have done some simplification
# and removed all the constant terms.
(n_features,) = self.mean_prior_.shape
# We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
# because the precision matrix is normalized.
log_det_precisions_chol = _compute_log_det_cholesky(
self.precisions_cholesky_, self.covariance_type, n_features
) - 0.5 * n_features * np.log(self.degrees_of_freedom_)
if self.covariance_type == "tied":
log_wishart = self.n_components * np.float64(
_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features
)
)
else:
log_wishart = np.sum(
_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features
)
)
if self.weight_concentration_prior_type == "dirichlet_process":
log_norm_weight = -np.sum(
betaln(self.weight_concentration_[0], self.weight_concentration_[1])
)
else:
log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)
return (
-np.sum(np.exp(log_resp) * log_resp)
- log_wishart
- log_norm_weight
- 0.5 * n_features * np.sum(np.log(self.mean_precision_))
)
def _get_parameters(self):
return (
self.weight_concentration_,
self.mean_precision_,
self.means_,
self.degrees_of_freedom_,
self.covariances_,
self.precisions_cholesky_,
)
def _set_parameters(self, params, xp=None):
(
self.weight_concentration_,
self.mean_precision_,
self.means_,
self.degrees_of_freedom_,
self.covariances_,
self.precisions_cholesky_,
) = params
# Weights computation
if self.weight_concentration_prior_type == "dirichlet_process":
weight_dirichlet_sum = (
self.weight_concentration_[0] + self.weight_concentration_[1]
)
tmp = self.weight_concentration_[1] / weight_dirichlet_sum
self.weights_ = (
self.weight_concentration_[0]
/ weight_dirichlet_sum
* np.hstack((1, np.cumprod(tmp[:-1])))
)
self.weights_ /= np.sum(self.weights_)
else:
self.weights_ = self.weight_concentration_ / np.sum(
self.weight_concentration_
)
# Precisions matrices computation
if self.covariance_type == "full":
self.precisions_ = np.array(
[
np.dot(prec_chol, prec_chol.T)
for prec_chol in self.precisions_cholesky_
]
)
elif self.covariance_type == "tied":
self.precisions_ = np.dot(
self.precisions_cholesky_, self.precisions_cholesky_.T
)
else:
self.precisions_ = self.precisions_cholesky_**2
| BayesianGaussianMixture |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 73433,
"end": 75000
} | class ____(TMAWorkspaceMixin, BaseScaledMMConfigMixin):
"""
Scaled TMA-specific mixin that extends BaseScaledMMConfigMixin with TMA functionality.
This is for scaled MM templates that use device TMA.
This inherits from BaseScaledMMConfigMixin and adds TMA-specific options.
"""
# pyrefly: ignore [bad-override]
def _filter_configs(self, configs: list[BaseConfig]) -> list[BaseConfig]:
"""
TMA specific filtering:
- num_warps=2 not safe for TMA
- block_k >= 32 required for TMA (requires inner-most dimension >= 32)
"""
configs = [c for c in configs if c.num_warps != 2 and c.block_k >= 32]
return super()._filter_configs(configs)
def _get_template_configs_impl(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> Generator[dict[str, Any], None, None]:
"""
Generate scaled TMA template configs with both scaled MM and TMA-specific options.
"""
# Get base scaled MM template configs from superclass
for template_kwargs in super()._get_template_configs_impl(
kernel_inputs,
op_name,
):
# Add TMA-specific options for device TMA scaled MM
template_kwargs["TMA_SIZE"] = TMA_DESCRIPTOR_SIZE
template_kwargs["NUM_SMS"] = get_num_sms()
template_kwargs["TMA_EXPERIMENTAL_API"] = not has_triton_stable_tma_api()
yield template_kwargs
# Scaled Blackwell TMA-specific mixin for scaled MM templates with TMA
| ScaledTMAConfigMixin |
python | walkccc__LeetCode | solutions/86. Partition List/86.py | {
"start": 0,
"end": 433
} | class ____:
def partition(self, head: ListNode, x: int) -> ListNode:
beforeHead = ListNode(0)
afterHead = ListNode(0)
before = beforeHead
after = afterHead
while head:
if head.val < x:
before.next = head
before = head
else:
after.next = head
after = head
head = head.next
after.next = None
before.next = afterHead.next
return beforeHead.next
| Solution |
python | sphinx-doc__sphinx | sphinx/pygments_styles.py | {
"start": 380,
"end": 709
} | class ____(Style):
"""Like friendly, but a bit darker to enhance contrast on the green
background.
"""
background_color = '#eeffcc'
default_style = ''
styles = {
**FriendlyStyle.styles,
Generic.Output: '#333',
Comment: 'italic #408090',
Number: '#208050',
}
| SphinxStyle |
python | doocs__leetcode | solution/1800-1899/1866.Number of Ways to Rearrange Sticks With K Sticks Visible/Solution.py | {
"start": 0,
"end": 333
} | class ____:
def rearrangeSticks(self, n: int, k: int) -> int:
mod = 10**9 + 7
f = [[0] * (k + 1) for _ in range(n + 1)]
f[0][0] = 1
for i in range(1, n + 1):
for j in range(1, k + 1):
f[i][j] = (f[i - 1][j - 1] + f[i - 1][j] * (i - 1)) % mod
return f[n][k]
| Solution |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/grant_types/dispatchers.py | {
"start": 52,
"end": 119
} | class ____:
default_grant = None
oidc_grant = None
| Dispatcher |
python | django-haystack__django-haystack | test_haystack/elasticsearch7_tests/test_backend.py | {
"start": 25096,
"end": 27486
} | class ____(TestCase):
def setUp(self):
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = "daniel%s" % i
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
# Stow.
# Point the backend at a URL that doesn't exist so we can watch the
# sparks fly.
self.old_es_url = settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"]
settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"] = (
"%s/foo/" % self.old_es_url
)
self.cap = CaptureHandler()
logging.getLogger("haystack").addHandler(self.cap)
config = apps.get_app_config("haystack")
logging.getLogger("haystack").removeHandler(config.stream)
# Setup the rest of the bits.
self.old_ui = connections["elasticsearch"].get_unified_index()
ui = UnifiedIndex()
self.smmi = Elasticsearch7MockSearchIndex()
ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = ui
self.sb = connections["elasticsearch"].get_backend()
def tearDown(self):
# Restore.
settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"] = self.old_es_url
connections["elasticsearch"]._index = self.old_ui
config = apps.get_app_config("haystack")
logging.getLogger("haystack").removeHandler(self.cap)
logging.getLogger("haystack").addHandler(config.stream)
@unittest.expectedFailure
def test_all_cases(self):
# Prior to the addition of the try/except bits, these would all fail miserably.
self.assertEqual(len(CaptureHandler.logs_seen), 0)
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(len(CaptureHandler.logs_seen), 1)
self.sb.remove(self.sample_objs[0])
self.assertEqual(len(CaptureHandler.logs_seen), 2)
self.sb.search("search")
self.assertEqual(len(CaptureHandler.logs_seen), 3)
self.sb.more_like_this(self.sample_objs[0])
self.assertEqual(len(CaptureHandler.logs_seen), 4)
self.sb.clear([MockModel])
self.assertEqual(len(CaptureHandler.logs_seen), 5)
self.sb.clear()
self.assertEqual(len(CaptureHandler.logs_seen), 6)
| FailedElasticsearch7SearchBackendTestCase |
python | django__django | tests/transactions/tests.py | {
"start": 397,
"end": 10436
} | class ____(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ["transactions"]
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
return Reporter.objects.create(first_name="Tintin")
reporter = make_reporter()
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
return Reporter.objects.create(first_name="Tintin")
reporter = make_reporter()
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
reporter = Reporter.objects.create(first_name="Tintin")
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
reporter1 = Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
reporter2 = Reporter.objects.create(
first_name="Archibald", last_name="Haddock"
)
self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1])
def test_nested_commit_rollback(self):
with transaction.atomic():
reporter = Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
reporter1 = Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
reporter2 = Reporter.objects.create(
first_name="Archibald", last_name="Haddock"
)
self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
reporter1 = Reporter.objects.create(first_name="Tintin")
with atomic:
reporter2 = Reporter.objects.create(
first_name="Archibald", last_name="Haddock"
)
self.assertSequenceEqual(Reporter.objects.all(), [reporter2, reporter1])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
reporter = Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertSequenceEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
reporter = Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute("SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertSequenceEqual(Reporter.objects.all(), [reporter])
@skipUnlessDBFeature("can_release_savepoints")
def test_failure_on_exit_transaction(self):
with transaction.atomic():
with self.assertRaises(DatabaseError):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
self.assertEqual(len(Reporter.objects.all()), 1)
# Incorrect savepoint id to provoke a database error.
connection.savepoint_ids.append("12")
with self.assertRaises(transaction.TransactionManagementError):
len(Reporter.objects.all())
self.assertIs(connection.needs_rollback, True)
if connection.savepoint_ids:
connection.savepoint_ids.pop()
self.assertSequenceEqual(Reporter.objects.all(), [])
| AtomicTests |
python | python-pillow__Pillow | src/PIL/DdsImagePlugin.py | {
"start": 7830,
"end": 14086
} | class ____(ImageFile.ImageFile):
format = "DDS"
format_description = "DirectDraw Surface"
def _open(self) -> None:
assert self.fp is not None
if not _accept(self.fp.read(4)):
msg = "not a DDS file"
raise SyntaxError(msg)
(header_size,) = struct.unpack("<I", self.fp.read(4))
if header_size != 124:
msg = f"Unsupported header size {repr(header_size)}"
raise OSError(msg)
header = self.fp.read(header_size - 4)
if len(header) != 120:
msg = f"Incomplete header: {len(header)} bytes"
raise OSError(msg)
flags, height, width = struct.unpack("<3I", header[:12])
self._size = (width, height)
extents = (0, 0) + self.size
pitch, depth, mipmaps = struct.unpack("<3I", header[12:24])
struct.unpack("<11I", header[24:68]) # reserved
# pixel format
pfsize, pfflags, fourcc, bitcount = struct.unpack("<4I", header[68:84])
n = 0
rawmode = None
if pfflags & DDPF.RGB:
# Texture contains uncompressed RGB data
if pfflags & DDPF.ALPHAPIXELS:
self._mode = "RGBA"
mask_count = 4
else:
self._mode = "RGB"
mask_count = 3
masks = struct.unpack(f"<{mask_count}I", header[84 : 84 + mask_count * 4])
self.tile = [ImageFile._Tile("dds_rgb", extents, 0, (bitcount, masks))]
return
elif pfflags & DDPF.LUMINANCE:
if bitcount == 8:
self._mode = "L"
elif bitcount == 16 and pfflags & DDPF.ALPHAPIXELS:
self._mode = "LA"
else:
msg = f"Unsupported bitcount {bitcount} for {pfflags}"
raise OSError(msg)
elif pfflags & DDPF.PALETTEINDEXED8:
self._mode = "P"
self.palette = ImagePalette.raw("RGBA", self.fp.read(1024))
self.palette.mode = "RGBA"
elif pfflags & DDPF.FOURCC:
offset = header_size + 4
if fourcc == D3DFMT.DXT1:
self._mode = "RGBA"
self.pixel_format = "DXT1"
n = 1
elif fourcc == D3DFMT.DXT3:
self._mode = "RGBA"
self.pixel_format = "DXT3"
n = 2
elif fourcc == D3DFMT.DXT5:
self._mode = "RGBA"
self.pixel_format = "DXT5"
n = 3
elif fourcc in (D3DFMT.BC4U, D3DFMT.ATI1):
self._mode = "L"
self.pixel_format = "BC4"
n = 4
elif fourcc == D3DFMT.BC5S:
self._mode = "RGB"
self.pixel_format = "BC5S"
n = 5
elif fourcc in (D3DFMT.BC5U, D3DFMT.ATI2):
self._mode = "RGB"
self.pixel_format = "BC5"
n = 5
elif fourcc == D3DFMT.DX10:
offset += 20
# ignoring flags which pertain to volume textures and cubemaps
(dxgi_format,) = struct.unpack("<I", self.fp.read(4))
self.fp.read(16)
if dxgi_format in (
DXGI_FORMAT.BC1_UNORM,
DXGI_FORMAT.BC1_TYPELESS,
):
self._mode = "RGBA"
self.pixel_format = "BC1"
n = 1
elif dxgi_format in (DXGI_FORMAT.BC2_TYPELESS, DXGI_FORMAT.BC2_UNORM):
self._mode = "RGBA"
self.pixel_format = "BC2"
n = 2
elif dxgi_format in (DXGI_FORMAT.BC3_TYPELESS, DXGI_FORMAT.BC3_UNORM):
self._mode = "RGBA"
self.pixel_format = "BC3"
n = 3
elif dxgi_format in (DXGI_FORMAT.BC4_TYPELESS, DXGI_FORMAT.BC4_UNORM):
self._mode = "L"
self.pixel_format = "BC4"
n = 4
elif dxgi_format in (DXGI_FORMAT.BC5_TYPELESS, DXGI_FORMAT.BC5_UNORM):
self._mode = "RGB"
self.pixel_format = "BC5"
n = 5
elif dxgi_format == DXGI_FORMAT.BC5_SNORM:
self._mode = "RGB"
self.pixel_format = "BC5S"
n = 5
elif dxgi_format == DXGI_FORMAT.BC6H_UF16:
self._mode = "RGB"
self.pixel_format = "BC6H"
n = 6
elif dxgi_format == DXGI_FORMAT.BC6H_SF16:
self._mode = "RGB"
self.pixel_format = "BC6HS"
n = 6
elif dxgi_format in (
DXGI_FORMAT.BC7_TYPELESS,
DXGI_FORMAT.BC7_UNORM,
DXGI_FORMAT.BC7_UNORM_SRGB,
):
self._mode = "RGBA"
self.pixel_format = "BC7"
n = 7
if dxgi_format == DXGI_FORMAT.BC7_UNORM_SRGB:
self.info["gamma"] = 1 / 2.2
elif dxgi_format in (
DXGI_FORMAT.R8G8B8A8_TYPELESS,
DXGI_FORMAT.R8G8B8A8_UNORM,
DXGI_FORMAT.R8G8B8A8_UNORM_SRGB,
):
self._mode = "RGBA"
if dxgi_format == DXGI_FORMAT.R8G8B8A8_UNORM_SRGB:
self.info["gamma"] = 1 / 2.2
else:
msg = f"Unimplemented DXGI format {dxgi_format}"
raise NotImplementedError(msg)
else:
msg = f"Unimplemented pixel format {repr(fourcc)}"
raise NotImplementedError(msg)
else:
msg = f"Unknown pixel format flags {pfflags}"
raise NotImplementedError(msg)
if n:
self.tile = [
ImageFile._Tile("bcn", extents, offset, (n, self.pixel_format))
]
else:
self.tile = [ImageFile._Tile("raw", extents, 0, rawmode or self.mode)]
def load_seek(self, pos: int) -> None:
pass
| DdsImageFile |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 32094,
"end": 40998
} | class ____(Qwen3OmniMoePreTrainedModel):
config: Qwen3OmniMoeAudioEncoderConfig
main_input_name = "input_features"
input_modalities = "audio"
_no_split_modules = ["Qwen3OmniMoeAudioEncoderLayer"]
_supports_sdpa = True
def __init__(self, config: Qwen3OmniMoeAudioEncoderConfig):
super().__init__(config)
self.dropout = config.dropout
embed_dim = config.d_model
self.num_mel_bins = config.num_mel_bins
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.n_window = config.n_window
self.positional_embedding = SinusoidsPositionEmbedding(self.max_source_positions, embed_dim)
self.layers = nn.ModuleList([Qwen3OmniMoeAudioEncoderLayer(config) for _ in range(config.encoder_layers)])
self.ln_post = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.conv2d1 = nn.Conv2d(1, config.downsample_hidden_size, 3, 2, padding=1)
self.conv2d2 = nn.Conv2d(config.downsample_hidden_size, config.downsample_hidden_size, 3, 2, padding=1)
self.conv2d3 = nn.Conv2d(config.downsample_hidden_size, config.downsample_hidden_size, 3, 2, padding=1)
self.conv_out = nn.Linear(
config.downsample_hidden_size * ((((config.num_mel_bins + 1) // 2 + 1) // 2 + 1) // 2),
config.d_model,
bias=False,
)
self.proj1 = nn.Linear(config.d_model, config.d_model)
self.act = ACT2FN[config.activation_function]
self.proj2 = nn.Linear(config.d_model, config.output_dim)
self.n_window_infer = self.config.n_window_infer
self.conv_chunksize = self.config.conv_chunksize
# Initialize weights and apply final processing
self.post_init()
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor:
# Flash Attention 2 doesn't need a 4D mask and relies on `cu_seqlens/max_seqlen`
# NOTE: the created attention masl only approximates the ragged FA2 attention by
# allowing bidirectional attention within `cu_seqlens` blocks, and not attending between
# blocks. Though it will not be a 100% match for FA2's `varlen` path
if self.config._attn_implementation == "flash_attention_2":
return None
seq_length = inputs_tensor.shape[0]
attention_mask = torch.full(
[1, 1, seq_length, seq_length],
torch.finfo(inputs_tensor.dtype).min,
device=inputs_tensor.device,
dtype=inputs_tensor.dtype,
)
for i in range(1, len(cu_seqlens)):
attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0
return attention_mask
@auto_docstring
def forward(
self,
input_features,
feature_lens=None,
aftercnn_lens=None,
):
r"""
feature_lens (`torch.LongTensor` of shape `(batch_size,)`):
mel length
aftercnn_lens (`torch.LongTensor` of shape `(batch_size,)`):
mel length after cnn
"""
aftercnn_lens = _get_feat_extract_output_lengths(feature_lens)
chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long()
chunk_lengths = torch.tensor(
[self.n_window * 2] * chunk_num.sum(),
dtype=torch.long,
device=feature_lens.device,
)
tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:]
chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2)
chunk_lengths[chunk_lengths == 0] = self.n_window * 2
chunk_list = input_features.T.split(chunk_lengths.tolist(), dim=0)
padded_feature = nn.utils.rnn.pad_sequence(chunk_list, batch_first=True).transpose(1, 2)
feature_lens_after_cnn = _get_feat_extract_output_lengths(chunk_lengths)
padded_mask_after_cnn = nn.utils.rnn.pad_sequence(
[torch.ones(length, dtype=torch.bool, device=padded_feature.device) for length in feature_lens_after_cnn],
batch_first=True,
)
padded_feature = padded_feature.unsqueeze(1)
# Split to chunk to avoid OOM during convolution
padded_embeds = []
for chunk in padded_feature.split(self.conv_chunksize, dim=0):
padded_embed = F.gelu(self.conv2d1(chunk))
padded_embed = F.gelu(self.conv2d2(padded_embed))
padded_embed = F.gelu(self.conv2d3(padded_embed))
padded_embeds.append(padded_embed)
padded_embed = torch.cat(padded_embeds, dim=0)
b, c, f, t = padded_embed.size()
padded_embed = self.conv_out(padded_embed.permute(0, 3, 1, 2).contiguous().view(b, t, c * f))
positional_embedding = (
self.positional_embedding.positional_embedding[: padded_embed.shape[1], :]
.unsqueeze(0)
.to(padded_embed.dtype)
)
padded_embed = padded_embed + positional_embedding
hidden_states = padded_embed[padded_mask_after_cnn]
cu_chunk_lens = [0]
window_aftercnn = padded_mask_after_cnn.shape[-1] * (self.n_window_infer // (self.n_window * 2))
for cnn_len in aftercnn_lens:
cu_chunk_lens += [window_aftercnn] * (cnn_len // window_aftercnn)
remainder = cnn_len % window_aftercnn
if remainder != 0:
cu_chunk_lens += [remainder]
cu_seqlens = torch.tensor(cu_chunk_lens, device=aftercnn_lens.device).cumsum(-1, dtype=torch.int32)
for encoder_layer in self.layers:
layer_outputs = encoder_layer(
hidden_states,
cu_seqlens,
)
hidden_states = layer_outputs[0]
hidden_states = self.ln_post(hidden_states)
hidden_states = self.proj1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.proj2(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states)
def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"):
"""
Pads a sequence of tensors to their maximum length on indicated `padding_side`.
Then prepares a mask so that pad tokens are not attended to.
"""
max_len = tensor_len.max()
dim = tensor_list[0].shape[0]
padded_tensor = torch.full(
size=(len(tensor_list), dim, max_len),
fill_value=padding_value,
dtype=self.dtype,
device=tensor_list[0].device,
)
batch_mask = torch.zeros(
(len(tensor_len), max_len),
dtype=torch.long,
device=padded_tensor.device,
)
for i, length in enumerate(tensor_len):
batch_mask[i, :length] = 1
padded_tensor[i, :, :length] = tensor_list[i]
feature_lens_after_cnn = (tensor_len - 1) // 2 + 1
max_len_after_cnn = feature_lens_after_cnn.max()
batch_mask_after_cnn = torch.zeros(
(len(tensor_len), max_len_after_cnn),
dtype=torch.long,
device=padded_tensor.device,
)
for i, length in enumerate(feature_lens_after_cnn):
batch_mask_after_cnn[i, :length] = 1
return (
padded_tensor,
batch_mask.unsqueeze(1),
batch_mask_after_cnn.bool(),
)
# Ignore copy
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers and the output length of the audio encoder
"""
input_lengths = (input_lengths - 1) // 2 + 1
output_lengths = (input_lengths - 2) // 2 + 1
return input_lengths, output_lengths
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb_vision(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
orig_q_dtype = q.dtype
orig_k_dtype = k.dtype
q, k = q.float(), k.float()
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = q_embed.to(orig_q_dtype)
k_embed = k_embed.to(orig_k_dtype)
return q_embed, k_embed
| Qwen3OmniMoeAudioEncoder |
python | lepture__authlib | authlib/oidc/core/claims.py | {
"start": 614,
"end": 6849
} | class ____(JWTClaims):
ESSENTIAL_CLAIMS = ["iss", "sub", "aud", "exp", "iat"]
def validate(self, now=None, leeway=0):
for k in self.ESSENTIAL_CLAIMS:
if k not in self:
raise MissingClaimError(k)
self._validate_essential_claims()
if now is None:
now = int(time.time())
self.validate_iss()
self.validate_sub()
self.validate_aud()
self.validate_exp(now, leeway)
self.validate_nbf(now, leeway)
self.validate_iat(now, leeway)
self.validate_auth_time()
self.validate_nonce()
self.validate_acr()
self.validate_amr()
self.validate_azp()
self.validate_at_hash()
def validate_auth_time(self):
"""Time when the End-User authentication occurred. Its value is a JSON
number representing the number of seconds from 1970-01-01T0:0:0Z as
measured in UTC until the date/time. When a max_age request is made or
when auth_time is requested as an Essential Claim, then this Claim is
REQUIRED; otherwise, its inclusion is OPTIONAL.
"""
auth_time = self.get("auth_time")
if self.params.get("max_age") and not auth_time:
raise MissingClaimError("auth_time")
if auth_time and not isinstance(auth_time, (int, float)):
raise InvalidClaimError("auth_time")
def validate_nonce(self):
"""String value used to associate a Client session with an ID Token,
and to mitigate replay attacks. The value is passed through unmodified
from the Authentication Request to the ID Token. If present in the ID
Token, Clients MUST verify that the nonce Claim Value is equal to the
value of the nonce parameter sent in the Authentication Request. If
present in the Authentication Request, Authorization Servers MUST
include a nonce Claim in the ID Token with the Claim Value being the
nonce value sent in the Authentication Request. Authorization Servers
SHOULD perform no other processing on nonce values used. The nonce
value is a case sensitive string.
"""
nonce_value = self.params.get("nonce")
if nonce_value:
if "nonce" not in self:
raise MissingClaimError("nonce")
if nonce_value != self["nonce"]:
raise InvalidClaimError("nonce")
def validate_acr(self):
"""OPTIONAL. Authentication Context Class Reference. String specifying
an Authentication Context Class Reference value that identifies the
Authentication Context Class that the authentication performed
satisfied. The value "0" indicates the End-User authentication did not
meet the requirements of `ISO/IEC 29115`_ level 1. Authentication
using a long-lived browser cookie, for instance, is one example where
the use of "level 0" is appropriate. Authentications with level 0
SHOULD NOT be used to authorize access to any resource of any monetary
value. An absolute URI or an `RFC 6711`_ registered name SHOULD be
used as the acr value; registered names MUST NOT be used with a
different meaning than that which is registered. Parties using this
claim will need to agree upon the meanings of the values used, which
may be context-specific. The acr value is a case sensitive string.
.. _`ISO/IEC 29115`: https://www.iso.org/standard/45138.html
.. _`RFC 6711`: https://tools.ietf.org/html/rfc6711
"""
return self._validate_claim_value("acr")
def validate_amr(self):
"""OPTIONAL. Authentication Methods References. JSON array of strings
that are identifiers for authentication methods used in the
authentication. For instance, values might indicate that both password
and OTP authentication methods were used. The definition of particular
values to be used in the amr Claim is beyond the scope of this
specification. Parties using this claim will need to agree upon the
meanings of the values used, which may be context-specific. The amr
value is an array of case sensitive strings.
"""
amr = self.get("amr")
if amr and not isinstance(self["amr"], list):
raise InvalidClaimError("amr")
def validate_azp(self):
"""OPTIONAL. Authorized party - the party to which the ID Token was
issued. If present, it MUST contain the OAuth 2.0 Client ID of this
party. This Claim is only needed when the ID Token has a single
audience value and that audience is different than the authorized
party. It MAY be included even when the authorized party is the same
as the sole audience. The azp value is a case sensitive string
containing a StringOrURI value.
"""
aud = self.get("aud")
client_id = self.params.get("client_id")
required = False
if aud and client_id:
if isinstance(aud, list) and len(aud) == 1:
aud = aud[0]
if aud != client_id:
required = True
azp = self.get("azp")
if required and not azp:
raise MissingClaimError("azp")
if azp and client_id and azp != client_id:
raise InvalidClaimError("azp")
def validate_at_hash(self):
"""OPTIONAL. Access Token hash value. Its value is the base64url
encoding of the left-most half of the hash of the octets of the ASCII
representation of the access_token value, where the hash algorithm
used is the hash algorithm used in the alg Header Parameter of the
ID Token's JOSE Header. For instance, if the alg is RS256, hash the
access_token value with SHA-256, then take the left-most 128 bits and
base64url encode them. The at_hash value is a case sensitive string.
"""
access_token = self.params.get("access_token")
at_hash = self.get("at_hash")
if at_hash and access_token:
if not _verify_hash(at_hash, access_token, self.header["alg"]):
raise InvalidClaimError("at_hash")
| IDToken |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor24.py | {
"start": 581,
"end": 993
} | class ____(Generic[T]):
def __init__(self, value: T = None):
self.value = value
@classmethod
def create(cls) -> "Container[T]":
# This should generate an error if strictParameterNoneValue
# is true because Container[T] being constructed is different
# from the current Container[T].
return Container[T]()
def on_next(self, value: T):
pass
| Container |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_index.py | {
"start": 15273,
"end": 17164
} | class ____(TwoFactorAPITestCase):
endpoint = "sentry-organization-home"
def setUp(self) -> None:
self.org_2fa = self.create_organization(owner=self.create_user())
self.enable_org_2fa(self.org_2fa)
self.no_2fa_user = self.create_user()
self.create_member(organization=self.org_2fa, user=self.no_2fa_user, role="member")
def assert_redirected_to_2fa(self):
response = self.get_success_response(self.org_2fa.slug, status_code=302)
assert self.path_2fa in response.url
def test_preexisting_members_must_enable_2fa(self) -> None:
self.login_as(self.no_2fa_user)
self.assert_redirected_to_2fa()
with assume_test_silo_mode(SiloMode.CONTROL):
TotpInterface().enroll(self.no_2fa_user)
self.get_success_response(self.org_2fa.slug)
def test_new_member_must_enable_2fa(self) -> None:
new_user = self.create_user()
self.create_member(organization=self.org_2fa, user=new_user, role="member")
self.login_as(new_user)
self.assert_redirected_to_2fa()
with assume_test_silo_mode(SiloMode.CONTROL):
TotpInterface().enroll(new_user)
self.get_success_response(self.org_2fa.slug)
def test_member_disable_all_2fa_blocked(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
TotpInterface().enroll(self.no_2fa_user)
self.login_as(self.no_2fa_user)
self.get_success_response(self.org_2fa.slug)
with assume_test_silo_mode(SiloMode.CONTROL):
Authenticator.objects.get(user=self.no_2fa_user).delete()
self.assert_redirected_to_2fa()
def test_superuser_can_access_org_home(self) -> None:
user = self.create_user(is_superuser=True)
self.login_as(user, superuser=True)
self.get_success_response(self.org_2fa.slug)
| OrganizationIndex2faTest |
python | doocs__leetcode | solution/1500-1599/1514.Path with Maximum Probability/Solution.py | {
"start": 0,
"end": 756
} | class ____:
def maxProbability(
self,
n: int,
edges: List[List[int]],
succProb: List[float],
start_node: int,
end_node: int,
) -> float:
g: List[List[Tuple[int, float]]] = [[] for _ in range(n)]
for (a, b), p in zip(edges, succProb):
g[a].append((b, p))
g[b].append((a, p))
pq = [(-1, start_node)]
dist = [0] * n
dist[start_node] = 1
while pq:
w, a = heappop(pq)
w = -w
if dist[a] > w:
continue
for b, p in g[a]:
if (t := w * p) > dist[b]:
dist[b] = t
heappush(pq, (-t, b))
return dist[end_node]
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 24336,
"end": 25693
} | class ____(graphene.Mutation):
"""Shuts down a code location server."""
Output = graphene.NonNull(GrapheneShutdownRepositoryLocationMutationResult)
class Arguments:
repositoryLocationName = graphene.NonNull(graphene.String)
class Meta:
name = "ShutdownRepositoryLocationMutation"
@capture_error
@require_permission_check(Permissions.RELOAD_REPOSITORY_LOCATION)
def mutate(
self, graphene_info: ResolveInfo, repositoryLocationName: str
) -> Union[GrapheneRepositoryLocationNotFound, GrapheneShutdownRepositoryLocationSuccess]:
assert_permission_for_location(
graphene_info, Permissions.RELOAD_REPOSITORY_LOCATION, repositoryLocationName
)
if not graphene_info.context.has_code_location_name(repositoryLocationName):
return GrapheneRepositoryLocationNotFound(repositoryLocationName)
if not graphene_info.context.is_shutdown_supported(repositoryLocationName):
raise Exception(
f"Location {repositoryLocationName} does not support shutting down via GraphQL"
)
graphene_info.context.shutdown_code_location(repositoryLocationName)
return GrapheneShutdownRepositoryLocationSuccess(
repositoryLocationName=repositoryLocationName
)
| GrapheneShutdownRepositoryLocationMutation |
python | walkccc__LeetCode | solutions/886. Possible Bipartition/886.py | {
"start": 79,
"end": 902
} | class ____:
def possibleBipartition(self, n: int, dislikes: list[list[int]]) -> bool:
graph = [[] for _ in range(n + 1)]
colors = [Color.WHITE] * (n + 1)
for u, v in dislikes:
graph[u].append(v)
graph[v].append(u)
# Reduce to 785. Is Graph Bipartite?
def isValidColor(u: int, color: Color) -> bool:
# Always paint red for a white node.
if colors[u] != Color.WHITE:
return colors[u] == color
colors[u] = color # Always paint the node with `color`.
# All the children should have valid colors.
childrenColor = Color.RED if colors[u] == Color.GREEN else Color.GREEN
return all(isValidColor(v, childrenColor) for v in graph[u])
return all(colors[i] != Color.WHITE or isValidColor(i, Color.RED)
for i in range(1, n + 1))
| Solution |
python | kamyu104__LeetCode-Solutions | Python/increment-submatrices-by-one.py | {
"start": 89,
"end": 923
} | class ____(object):
def rangeAddQueries(self, n, queries):
"""
:type n: int
:type queries: List[List[int]]
:rtype: List[List[int]]
"""
result = [[0]*n for _ in xrange(n)]
for r1, c1, r2, c2 in queries:
result[r1][c1] += 1
if c2+1 < len(result[0]):
result[r1][c2+1] -= 1
if r2+1 < len(result):
result[r2+1][c1] -= 1
if r2+1 < len(result) and c2+1 < len(result[0]):
result[r2+1][c2+1] += 1
for r in xrange(len(result)):
for c in xrange(len(result[0])-1):
result[r][c+1] += result[r][c]
for r in xrange(len(result)-1):
for c in xrange(len(result[0])):
result[r+1][c] += result[r][c]
return result
| Solution |
python | palantir__python-language-server | versioneer.py | {
"start": 12658,
"end": 15934
} | class ____:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
| VersioneerConfig |
python | PyCQA__pylint | tests/functional/r/return_in_init.py | {
"start": 75,
"end": 152
} | class ____:
def __init__(self): # [return-in-init]
return 1
| MyClass |
python | huggingface__transformers | src/transformers/models/dab_detr/modeling_dab_detr.py | {
"start": 23474,
"end": 25413
} | class ____(nn.Module):
def __init__(self, config: DabDetrConfig):
super().__init__()
self.dropout = config.dropout
self.self_attn_query_content_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.self_attn_query_pos_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.self_attn_key_content_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.self_attn_key_pos_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.self_attn_value_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.self_attn = DabDetrAttention(config)
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
query_position_embeddings: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
):
residual = hidden_states
query_content = self.self_attn_query_content_proj(hidden_states)
query_pos = self.self_attn_query_pos_proj(query_position_embeddings)
key_content = self.self_attn_key_content_proj(hidden_states)
key_pos = self.self_attn_key_pos_proj(query_position_embeddings)
value = self.self_attn_value_proj(hidden_states)
query = query_content + query_pos
key = key_content + key_pos
hidden_states, attn_weights = self.self_attn(
hidden_states=query,
attention_mask=attention_mask,
key_states=key,
value_states=value,
output_attentions=True,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
return hidden_states, attn_weights
| DabDetrDecoderLayerSelfAttention |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-operations-to-sort-a-binary-tree-by-level.py | {
"start": 33,
"end": 138
} | class ____(object):
def __init__(self, val=0, left=None, right=None):
pass
# bfs, sort
| TreeNode |
python | getsentry__sentry | tests/sentry/runner/commands/test_backup.py | {
"start": 3829,
"end": 5508
} | class ____(TestCase):
"""
Test success cases of the `sentry backup compare` CLI command.
"""
def test_compare_equal(self) -> None:
rv = CliRunner().invoke(backup, ["compare", GOOD_FILE_PATH, GOOD_FILE_PATH])
assert rv.exit_code == 0, rv.output
assert "found 0" in rv.output
def test_compare_equal_findings_file(self) -> None:
with TemporaryDirectory() as tmp_dir:
tmp_findings = Path(tmp_dir).joinpath(f"{self._testMethodName}.findings.json")
rv = CliRunner().invoke(
backup,
["compare", GOOD_FILE_PATH, GOOD_FILE_PATH, "--findings-file", str(tmp_findings)],
)
assert rv.exit_code == 0, rv.output
with open(tmp_findings) as findings_file:
findings = json.load(findings_file)
assert len(findings) == 0
def test_compare_unequal(self) -> None:
rv = CliRunner().invoke(backup, ["compare", MAX_USER_PATH, MIN_USER_PATH])
assert rv.exit_code == 0, rv.output
assert "found 0" not in rv.output
def test_compare_unequal_findings_file(self) -> None:
with TemporaryDirectory() as tmp_dir:
tmp_findings = Path(tmp_dir).joinpath(f"{self._testMethodName}.findings.json")
rv = CliRunner().invoke(
backup,
["compare", MAX_USER_PATH, MIN_USER_PATH, "--findings-file", str(tmp_findings)],
)
assert rv.exit_code == 0, rv.output
with open(tmp_findings) as findings_file:
findings = json.load(findings_file)
assert len(findings) > 0
| GoodCompareCommandTests |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/execution/base.py | {
"start": 4091,
"end": 9290
} | class ____(object):
"""The result of execution. `data` is the result of executing the
query, `errors` is null if no errors occurred, and is a
non-empty array if an error occurred."""
__slots__ = 'data', 'errors', 'invalid'
def __init__(self, data=None, errors=None, invalid=False):
self.data = data
self.errors = errors
if invalid:
assert data is None
self.invalid = invalid
def __eq__(self, other):
return (
self is other or (
isinstance(other, ExecutionResult) and
self.data == other.data and
self.errors == other.errors and
self.invalid == other.invalid
)
)
def get_operation_root_type(schema, operation):
op = operation.operation
if op == 'query':
return schema.get_query_type()
elif op == 'mutation':
mutation_type = schema.get_mutation_type()
if not mutation_type:
raise GraphQLError(
'Schema is not configured for mutations',
[operation]
)
return mutation_type
elif op == 'subscription':
subscription_type = schema.get_subscription_type()
if not subscription_type:
raise GraphQLError(
'Schema is not configured for subscriptions',
[operation]
)
return subscription_type
raise GraphQLError(
'Can only execute queries, mutations and subscriptions',
[operation]
)
def collect_fields(ctx, runtime_type, selection_set, fields, prev_fragment_names):
"""
Given a selectionSet, adds all of the fields in that selection to
the passed in map of fields, and returns it at the end.
collect_fields requires the "runtime type" of an object. For a field which
returns and Interface or Union type, the "runtime type" will be the actual
Object type returned by that field.
"""
for selection in selection_set.selections:
directives = selection.directives
if isinstance(selection, ast.Field):
if not should_include_node(ctx, directives):
continue
name = get_field_entry_key(selection)
fields[name].append(selection)
elif isinstance(selection, ast.InlineFragment):
if not should_include_node(
ctx, directives) or not does_fragment_condition_match(
ctx, selection, runtime_type):
continue
collect_fields(ctx, runtime_type, selection.selection_set, fields, prev_fragment_names)
elif isinstance(selection, ast.FragmentSpread):
frag_name = selection.name.value
if frag_name in prev_fragment_names or not should_include_node(ctx, directives):
continue
prev_fragment_names.add(frag_name)
fragment = ctx.fragments.get(frag_name)
frag_directives = fragment.directives
if not fragment or not \
should_include_node(ctx, frag_directives) or not \
does_fragment_condition_match(ctx, fragment, runtime_type):
continue
collect_fields(ctx, runtime_type, fragment.selection_set, fields, prev_fragment_names)
return fields
def should_include_node(ctx, directives):
"""Determines if a field should be included based on the @include and
@skip directives, where @skip has higher precidence than @include."""
# TODO: Refactor based on latest code
if directives:
skip_ast = None
for directive in directives:
if directive.name.value == GraphQLSkipDirective.name:
skip_ast = directive
break
if skip_ast:
args = get_argument_values(
GraphQLSkipDirective.args,
skip_ast.arguments,
ctx.variable_values,
)
if args.get('if') is True:
return False
include_ast = None
for directive in directives:
if directive.name.value == GraphQLIncludeDirective.name:
include_ast = directive
break
if include_ast:
args = get_argument_values(
GraphQLIncludeDirective.args,
include_ast.arguments,
ctx.variable_values,
)
if args.get('if') is False:
return False
return True
def does_fragment_condition_match(ctx, fragment, type_):
type_condition_ast = fragment.type_condition
if not type_condition_ast:
return True
conditional_type = type_from_ast(ctx.schema, type_condition_ast)
if conditional_type.is_same_type(type_):
return True
if isinstance(conditional_type, (GraphQLInterfaceType, GraphQLUnionType)):
return ctx.schema.is_possible_type(conditional_type, type_)
return False
def get_field_entry_key(node):
"""Implements the logic to compute the key of a given field's entry"""
if node.alias:
return node.alias.value
return node.name.value
| ExecutionResult |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 83853,
"end": 86917
} | class ____(Function):
def cache_config(self, prefer_equal=False, prefer_cache=False,
prefer_shared=False):
prefer_equal = prefer_equal or (prefer_cache and prefer_shared)
attr = binding.CUfunction_attribute
if prefer_equal:
flag = attr.CU_FUNC_CACHE_PREFER_EQUAL
elif prefer_cache:
flag = attr.CU_FUNC_CACHE_PREFER_L1
elif prefer_shared:
flag = attr.CU_FUNC_CACHE_PREFER_SHARED
else:
flag = attr.CU_FUNC_CACHE_PREFER_NONE
driver.cuFuncSetCacheConfig(self.handle, flag)
def read_func_attr(self, attrid):
return driver.cuFuncGetAttribute(attrid, self.handle)
def read_func_attr_all(self):
attr = binding.CUfunction_attribute
nregs = self.read_func_attr(attr.CU_FUNC_ATTRIBUTE_NUM_REGS)
cmem = self.read_func_attr(attr.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES)
lmem = self.read_func_attr(attr.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES)
smem = self.read_func_attr(attr.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES)
maxtpb = self.read_func_attr(
attr.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK)
return FuncAttr(regs=nregs, const=cmem, local=lmem, shared=smem,
maxthreads=maxtpb)
def launch_kernel(cufunc_handle,
gx, gy, gz,
bx, by, bz,
sharedmem,
hstream,
args,
cooperative=False):
param_ptrs = [addressof(arg) for arg in args]
params = (c_void_p * len(param_ptrs))(*param_ptrs)
if USE_NV_BINDING:
params_for_launch = addressof(params)
extra = 0
else:
params_for_launch = params
extra = None
if cooperative:
driver.cuLaunchCooperativeKernel(cufunc_handle,
gx, gy, gz,
bx, by, bz,
sharedmem,
hstream,
params_for_launch)
else:
driver.cuLaunchKernel(cufunc_handle,
gx, gy, gz,
bx, by, bz,
sharedmem,
hstream,
params_for_launch,
extra)
if USE_NV_BINDING:
jitty = binding.CUjitInputType
FILE_EXTENSION_MAP = {
'o': jitty.CU_JIT_INPUT_OBJECT,
'ptx': jitty.CU_JIT_INPUT_PTX,
'a': jitty.CU_JIT_INPUT_LIBRARY,
'lib': jitty.CU_JIT_INPUT_LIBRARY,
'cubin': jitty.CU_JIT_INPUT_CUBIN,
'fatbin': jitty.CU_JIT_INPUT_FATBINARY,
}
else:
FILE_EXTENSION_MAP = {
'o': enums.CU_JIT_INPUT_OBJECT,
'ptx': enums.CU_JIT_INPUT_PTX,
'a': enums.CU_JIT_INPUT_LIBRARY,
'lib': enums.CU_JIT_INPUT_LIBRARY,
'cubin': enums.CU_JIT_INPUT_CUBIN,
'fatbin': enums.CU_JIT_INPUT_FATBINARY,
}
| CudaPythonFunction |
python | getsentry__sentry | src/sentry/core/endpoints/team_unresolved_issue_age.py | {
"start": 984,
"end": 2788
} | class ____(TeamEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, team: Team) -> Response:
"""
Return a time bucketed list of how old unresolved issues are.
"""
if not features.has("organizations:team-insights", team.organization, actor=request.user):
return Response({"detail": "You do not have the insights feature enabled"}, status=400)
environments = [e.id for e in get_environments(request, team.organization)]
group_environment_filter = (
Q(groupenvironment__environment_id=environments[0]) if environments else Q()
)
current_time = timezone.now()
unresolved_ages = list(
Group.objects.filter_to_team(team)
.filter(
group_environment_filter,
status=GroupStatus.UNRESOLVED,
last_seen__gt=datetime.now(UTC) - timedelta(days=90),
)
.annotate(
bucket=Case(
*[
When(first_seen__gt=current_time - delta, then=Value(label))
for (label, delta) in buckets
],
default=Value(OLDEST_LABEL),
output_field=TextField(),
)
)
.values("bucket")
.annotate(count=Count("id"))
)
unresolved_ages_dict = {
unresolved["bucket"]: unresolved["count"] for unresolved in unresolved_ages
}
for label, _ in buckets:
unresolved_ages_dict.setdefault(label, 0)
unresolved_ages_dict.setdefault(OLDEST_LABEL, 0)
return Response(unresolved_ages_dict)
| TeamUnresolvedIssueAgeEndpoint |
python | joke2k__faker | tests/providers/test_lorem.py | {
"start": 33409,
"end": 36230
} | class ____:
"""Test it_IT lorem provider"""
word_list = [word.lower() for word in ItItLoremProvider.word_list]
def test_paragraph(self, faker, num_samples):
num_sentences = 10
for _ in range(num_samples):
paragraph = faker.paragraph(nb_sentences=num_sentences)
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_paragraphs(self, faker, num_samples):
num_paragraphs = 5
for _ in range(num_samples):
paragraphs = faker.paragraphs(nb=num_paragraphs)
for paragraph in paragraphs:
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentence(self, faker, num_samples):
num_words = 10
for _ in range(num_samples):
sentence = faker.sentence(nb_words=num_words)
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentences(self, faker, num_samples):
num_sentences = 5
for _ in range(num_samples):
sentences = faker.sentences(nb=num_sentences)
for sentence in sentences:
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_text(self, faker, num_samples):
num_chars = 25
for _ in range(num_samples):
text = faker.text(max_nb_chars=num_chars)
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_texts(self, faker, num_samples):
num_texts = 5
num_chars = 25
for _ in range(num_samples):
texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
for text in texts:
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_word(self, faker, num_samples):
for _ in range(num_samples):
word = faker.word()
assert isinstance(word, str) and word in ItItLoremProvider.word_list
def test_words(self, faker, num_samples):
num_words = 5
for _ in range(num_samples):
words = faker.words(num_words)
assert all(isinstance(word, str) and word in ItItLoremProvider.word_list for word in words)
| TestItIt |
python | readthedocs__readthedocs.org | readthedocs/api/v2/serializers.py | {
"start": 12006,
"end": 12152
} | class ____(serializers.Serializer):
id = serializers.CharField(max_length=20)
name = serializers.CharField(max_length=20)
| ProviderSerializer |
python | facebookresearch__faiss | benchs/bench_fw/benchmark.py | {
"start": 9511,
"end": 11714
} | class ____(IndexOperator):
index_descs: List[IndexDescriptor] = field(default_factory=lambda: [])
serialize_index: bool = False
def get_desc(self, name: str) -> Optional[IndexDescriptor]:
for desc in self.index_descs:
if desc.get_name() == name:
return desc
return None
def get_flat_desc(self, name=None) -> Optional[IndexDescriptor]:
for desc in self.index_descs:
desc_name = desc.get_name()
if desc_name == name:
return desc
if desc_name.startswith("Flat"):
return desc
return None
def build_index_wrapper(self, index_desc: IndexDescriptor):
if hasattr(index_desc, "index"):
return
if hasattr(index_desc.codec_desc, "index"):
index_desc.index = index_desc.codec_desc.index
index_desc.index.database_vectors = index_desc.database_desc
index_desc.index.index_name = index_desc.get_name()
return
if index_desc.codec_desc is not None:
index = IndexFromCodec(
num_threads=self.num_threads,
d=index_desc.d,
metric=self.distance_metric,
database_vectors=index_desc.database_desc,
bucket=index_desc.codec_desc.bucket,
path=index_desc.codec_desc.path,
index_name=index_desc.get_name(),
codec_name=index_desc.codec_desc.get_name(),
serialize_full_index=self.serialize_index,
)
index.set_io(self.io)
index_desc.index = index
else:
assert index_desc.is_built()
def build_one(self, index_desc: IndexDescriptor, results: Dict[str, Any]):
faiss.omp_set_num_threads(index_desc.num_threads)
self.build_index_wrapper(index_desc)
if index_desc.is_built():
return
index_desc.index.get_index()
def build(self, results: Dict[str, Any]):
# TODO: add support for dry_run
for index_desc in self.index_descs:
self.build_one(index_desc, results)
return results, None
@dataclass
| BuildOperator |
python | encode__httpx | httpx/_config.py | {
"start": 5406,
"end": 6906
} | class ____:
"""
Configuration for limits to various client behaviors.
**Parameters:**
* **max_connections** - The maximum number of concurrent connections that may be
established.
* **max_keepalive_connections** - Allow the connection pool to maintain
keep-alive connections below this point. Should be less than or equal
to `max_connections`.
* **keepalive_expiry** - Time limit on idle keep-alive connections in seconds.
"""
def __init__(
self,
*,
max_connections: int | None = None,
max_keepalive_connections: int | None = None,
keepalive_expiry: float | None = 5.0,
) -> None:
self.max_connections = max_connections
self.max_keepalive_connections = max_keepalive_connections
self.keepalive_expiry = keepalive_expiry
def __eq__(self, other: typing.Any) -> bool:
return (
isinstance(other, self.__class__)
and self.max_connections == other.max_connections
and self.max_keepalive_connections == other.max_keepalive_connections
and self.keepalive_expiry == other.keepalive_expiry
)
def __repr__(self) -> str:
class_name = self.__class__.__name__
return (
f"{class_name}(max_connections={self.max_connections}, "
f"max_keepalive_connections={self.max_keepalive_connections}, "
f"keepalive_expiry={self.keepalive_expiry})"
)
| Limits |
python | getsentry__sentry | src/sentry/organizations/services/organization/model.py | {
"start": 5853,
"end": 6493
} | class ____(RpcOrganizationMappingFlags):
def as_int(self) -> int:
# Must maintain the same order as the ORM's `Organization.flags` fields
return flags_to_bits(
self.allow_joinleave,
self.enhanced_privacy,
self.disable_shared_issues,
self.early_adopter,
self.require_2fa,
self.disable_new_visibility_features,
self.require_email_verification,
self.codecov_access,
self.disable_member_project_creation,
self.prevent_superuser_access,
self.disable_member_invite,
)
| RpcOrganizationFlags |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_worksheet05.py | {
"start": 400,
"end": 2282
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with strings in cells."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
# Write some strings.
worksheet.write_string(0, 0, "Foo")
worksheet.write_string(2, 0, "Bar")
worksheet.write_string(2, 3, "Baz")
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:D3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:4">
<c r="A1" t="s">
<v>0</v>
</c>
</row>
<row r="3" spans="1:4">
<c r="A3" t="s">
<v>1</v>
</c>
<c r="D3" t="s">
<v>2</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | django__django | tests/queries/test_db_returning.py | {
"start": 291,
"end": 2660
} | class ____(TestCase):
def test_insert_returning(self):
with CaptureQueriesContext(connection) as captured_queries:
DumbCategory.objects.create()
self.assertIn(
"RETURNING %s.%s"
% (
connection.ops.quote_name(DumbCategory._meta.db_table),
connection.ops.quote_name(DumbCategory._meta.get_field("id").column),
),
captured_queries[-1]["sql"],
)
def test_insert_returning_non_integer(self):
obj = NonIntegerPKReturningModel.objects.create()
self.assertTrue(obj.created)
self.assertIsInstance(obj.created, datetime.datetime)
def test_insert_returning_non_integer_from_literal_value(self):
obj = NonIntegerPKReturningModel.objects.create(pk="2025-01-01")
self.assertTrue(obj.created)
self.assertIsInstance(obj.created, datetime.datetime)
def test_insert_returning_multiple(self):
with CaptureQueriesContext(connection) as captured_queries:
obj = ReturningModel.objects.create()
table_name = connection.ops.quote_name(ReturningModel._meta.db_table)
self.assertIn(
"RETURNING %s.%s, %s.%s"
% (
table_name,
connection.ops.quote_name(ReturningModel._meta.get_field("id").column),
table_name,
connection.ops.quote_name(
ReturningModel._meta.get_field("created").column
),
),
captured_queries[-1]["sql"],
)
self.assertEqual(
captured_queries[-1]["sql"]
.split("RETURNING ")[1]
.count(
connection.ops.quote_name(
ReturningModel._meta.get_field("created").column
),
),
1,
)
self.assertTrue(obj.pk)
self.assertIsInstance(obj.created, datetime.datetime)
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_bulk_insert(self):
objs = [ReturningModel(), ReturningModel(pk=2**11), ReturningModel()]
ReturningModel.objects.bulk_create(objs)
for obj in objs:
with self.subTest(obj=obj):
self.assertTrue(obj.pk)
self.assertIsInstance(obj.created, datetime.datetime)
| ReturningValuesTests |
python | viewflow__viewflow | viewflow/views/list.py | {
"start": 15728,
"end": 15924
} | class ____(
BulkActionsMixin,
FilterableViewMixin,
OrderableListViewMixin,
SearchableViewMixin,
BaseListModelView,
):
"""
Render some list of objects.
"""
| ListModelView |
python | huggingface__transformers | src/transformers/models/mvp/modeling_mvp.py | {
"start": 54297,
"end": 61689
} | class ____(MvpPreTrainedModel):
def __init__(self, config: MvpConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = MvpModel(config)
self.classification_head = MvpClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
# Initialize weights and apply final processing
self.post_init()
def set_lightweight_tuning(self):
self.model.set_lightweight_tuning()
self.classification_head.requires_grad_(False)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
information on the default strategy.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Example of single-label classification:
Fine-tuning a model on `num_labels` classes
```python
>>> import torch
>>> from transformers import AutoTokenizer, MvpForSequenceClassification
>>> num_labels = 2 # for example, this is a binary classification task
>>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp")
>>> model = MvpForSequenceClassification.from_pretrained("RUCAIBox/mvp", num_labels=num_labels)
>>> inputs = tokenizer("Classify: Hello, my dog is cute", return_tensors="pt")
>>> labels = torch.tensor(1) # the real label for inputs
>>> loss = model(**inputs, labels=labels).loss
>>> loss.backward()
```
Inference after the model fine-tuned
```python
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax()
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@auto_docstring
| MvpForSequenceClassification |
python | kamyu104__LeetCode-Solutions | Python/find-smallest-letter-greater-than-target.py | {
"start": 48,
"end": 339
} | class ____(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
i = bisect.bisect_right(letters, target)
return letters[0] if i == len(letters) else letters[i]
| Solution |
python | numba__numba | numba/tests/test_caching.py | {
"start": 38813,
"end": 40784
} | class ____(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cfunc_cache_usecases.py")
modname = "cfunc_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
f = mod.add_usecase
assert f.cache_hits == 1
f = mod.outer
assert f.cache_hits == 1
f = mod.div_usecase
assert f.cache_hits == 1
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(f"process failed with code {popen.returncode}:"
f"stderr follows\n{err.decode()}\n")
def check_module(self, mod):
mod.self_test()
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(6) # 3 index, 3 data
self.assertEqual(mod.add_usecase.cache_hits, 0)
self.assertEqual(mod.outer.cache_hits, 0)
self.assertEqual(mod.add_nocache_usecase.cache_hits, 0)
self.assertEqual(mod.div_usecase.cache_hits, 0)
self.check_module(mod)
# Reload module to hit the cache
mod = self.import_module()
self.check_pycache(6) # 3 index, 3 data
self.assertEqual(mod.add_usecase.cache_hits, 1)
self.assertEqual(mod.outer.cache_hits, 1)
self.assertEqual(mod.add_nocache_usecase.cache_hits, 0)
self.assertEqual(mod.div_usecase.cache_hits, 1)
self.check_module(mod)
self.run_in_separate_process()
| TestCFuncCache |
python | getsentry__sentry | src/sentry/incidents/logic.py | {
"start": 11234,
"end": 16962
} | class ____(BaseMetricIssueQueryParams):
project_ids: list[int]
start_arg: datetime | None = None
end_arg: datetime | None = None
def _build_metric_query_builder(
params: BuildMetricQueryBuilderParams,
) -> BaseQueryBuilder:
start, end = _calculate_open_period_time_range(
CalculateOpenPeriodTimeRangeParams(
snuba_query=params.snuba_query,
date_started=params.date_started,
current_end_date=params.current_end_date,
organization=params.organization,
start_arg=params.start_arg,
end_arg=params.end_arg,
)
)
query_builder = params.entity_subscription.build_query_builder(
query=params.snuba_query.query,
project_ids=params.project_ids,
environment=params.snuba_query.environment,
params={
"organization_id": params.organization.id,
"project_id": params.project_ids,
"start": start,
"end": end,
},
)
for i, column in enumerate(query_builder.columns):
if column.alias == CRASH_RATE_ALERT_AGGREGATE_ALIAS:
query_builder.columns[i] = replace(column, alias="count")
entity_key = get_entity_key_from_query_builder(query_builder)
time_col = ENTITY_TIME_COLUMNS[entity_key]
entity = get_entity_from_query_builder(query_builder)
query_builder.add_conditions(
[
Condition(Column(time_col, entity=entity), Op.GTE, start),
Condition(Column(time_col, entity=entity), Op.LT, end),
]
)
query_builder.limit = Limit(10000)
return query_builder
def _calculate_open_period_time_range(
params: CalculateOpenPeriodTimeRangeParams,
) -> tuple[datetime, datetime]:
time_window = params.snuba_query.time_window
time_window_delta = timedelta(seconds=time_window)
start = (
(params.date_started - time_window_delta) if params.start_arg is None else params.start_arg
)
end = (
(params.current_end_date + time_window_delta) if params.end_arg is None else params.end_arg
)
retention = quotas.backend.get_event_retention(organization=params.organization) or 90
start = max(
start.replace(tzinfo=timezone.utc),
datetime.now(timezone.utc) - timedelta(days=retention),
)
end = max(start, end.replace(tzinfo=timezone.utc))
return start, end
def get_metric_issue_aggregates(
params: GetMetricIssueAggregatesParams,
) -> dict[str, float | int]:
"""
Calculates aggregate stats across the life of an incident, or the provided range.
"""
entity_subscription = get_entity_subscription_from_snuba_query(
params.snuba_query,
params.organization.id,
)
if entity_subscription.dataset == Dataset.EventsAnalyticsPlatform:
start, end = _calculate_open_period_time_range(
CalculateOpenPeriodTimeRangeParams(
snuba_query=params.snuba_query,
date_started=params.date_started,
current_end_date=params.current_end_date,
organization=params.organization,
start_arg=params.start_arg,
end_arg=params.end_arg,
)
)
snuba_params = SnubaParams(
environments=[params.snuba_query.environment],
projects=[
Project.objects.get_from_cache(id=project_id) for project_id in params.project_ids
],
organization=params.organization,
start=start,
end=end,
)
try:
results = Spans.run_table_query(
params=snuba_params,
query_string=params.snuba_query.query,
selected_columns=[entity_subscription.aggregate],
orderby=None,
offset=0,
limit=1,
referrer=Referrer.API_ALERTS_ALERT_RULE_CHART.value,
sampling_mode=None,
config=SearchResolverConfig(
auto_fields=True,
),
)
except Exception:
entity_key = EntityKey.EAPItems
metrics.incr(
"incidents.get_incident_aggregates.snql.query.error",
tags={
"dataset": params.snuba_query.dataset,
"entity": entity_key.value,
},
)
raise
else:
query_builder = _build_metric_query_builder(
BuildMetricQueryBuilderParams(
snuba_query=params.snuba_query,
organization=params.organization,
project_ids=params.project_ids,
entity_subscription=entity_subscription,
date_started=params.date_started,
current_end_date=params.current_end_date,
start_arg=params.start_arg,
end_arg=params.end_arg,
)
)
try:
results = query_builder.run_query(referrer="incidents.get_incident_aggregates")
except Exception:
metrics.incr(
"incidents.get_incident_aggregates.snql.query.error",
tags={
"dataset": params.snuba_query.dataset,
"entity": get_entity_key_from_query_builder(query_builder).value,
},
)
raise
aggregated_result = entity_subscription.aggregate_query_results(results["data"], alias="count")
return aggregated_result[0]
def get_incident_activity(incident: Incident) -> Iterable[IncidentActivity]:
return IncidentActivity.objects.filter(incident=incident).select_related("incident")
| GetMetricIssueAggregatesParams |
python | bokeh__bokeh | src/bokeh/models/widgets/pickers.py | {
"start": 10618,
"end": 11593
} | class ____(BaseDatetimePicker):
""" Calendar-based picker of dates and times. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = List(Datetime, default=[], help="""
The initial or picked dates and times.
""")
separator = String(default=", ", help="""
The separator between displayed dates and times.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| MultipleDatetimePicker |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass4.py | {
"start": 242,
"end": 475
} | class ____(ParentA[P, R]):
pass
def func(x: ParentA[[int], int]):
if isinstance(x, ChildA):
reveal_type(x, expected_text="ChildA[(int), int]")
else:
reveal_type(x, expected_text="ParentA[(int), int]")
| ChildA |
python | astropy__astropy | astropy/units/tests/test_logarithmic.py | {
"start": 6020,
"end": 7638
} | class ____:
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == "mag(Jy)"
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string("generic") == "mag(Jy)"
with pytest.raises(ValueError):
lu1.to_string("fits")
with pytest.raises(ValueError):
lu1.to_string(format="cds")
lu2 = u.dex()
assert str(lu2) == "dex"
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == "dex(1)"
lu3 = u.MagUnit(u.Jy, function_unit=2 * u.mag)
assert str(lu3) == "2 mag(Jy)"
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == "2 mag(Jy)"
lu4 = u.mag(u.ct)
assert lu4.to_string("generic") == "mag(ct)"
latex_str = r"$\mathrm{mag\left(ct\right)}$"
assert lu4.to_string("latex") == latex_str
assert lu4.to_string("latex_inline") == latex_str
assert lu4._repr_latex_() == latex_str
lu5 = u.mag(u.ct / u.s)
latex_str = r"$\mathrm{mag\left(\frac{ct}{s}\right)}$"
assert lu5.to_string("latex") == latex_str
latex_str = r"$\mathrm{mag\left(ct\,s^{-1}\right)}$"
assert lu5.to_string("latex_inline") == latex_str
def test_dex_latex_str(self):
# Regression test for gh-18618.
lu = u.dex(u.cm / u.s**2)
latex_str = r"$\mathrm{dex\left(\frac{cm}{s^{2}}\right)}$"
assert lu.to_string(format="latex") == latex_str
assert lu._repr_latex_() == latex_str
| TestLogUnitStrings |
python | doocs__leetcode | solution/2400-2499/2498.Frog Jump II/Solution.py | {
"start": 0,
"end": 214
} | class ____:
def maxJump(self, stones: List[int]) -> int:
ans = stones[1] - stones[0]
for i in range(2, len(stones)):
ans = max(ans, stones[i] - stones[i - 2])
return ans
| Solution |
python | bokeh__bokeh | src/bokeh/plotting/_figure.py | {
"start": 31982,
"end": 32890
} | class ____(BaseFigureOptions):
x_range = RangeLike(default=InstanceDefault(DataRange1d), help="""
Customize the x-range of the plot.
""")
y_range = RangeLike(default=InstanceDefault(DataRange1d), help="""
Customize the y-range of the plot.
""")
x_axis_type = AxisType(default="auto", help="""
The type of the x-axis.
""")
y_axis_type = AxisType(default="auto", help="""
The type of the y-axis.
""")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_color_fields = {"color", "fill_color", "line_color"}
_alpha_fields = {"alpha", "fill_alpha", "line_alpha"}
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| FigureOptions |
python | viewflow__viewflow | viewflow/utils.py | {
"start": 769,
"end": 3700
} | class ____(object):
def __init__(self, marker: str):
self.marker = marker
def __iter__(self):
return iter(())
def __repr__(self):
return self.marker
def __lt__(self, other) -> bool:
return self.marker < str(other)
DEFAULT = MARKER("DEFAULT")
IS_DEV = settings.DEBUG or not hasattr(mail, "outbox") # DEBUG or test mode
def first_not_default(*args):
"""
Return the first argument that is not the `DEFAULT` marker. If all arguments
are `DEFAULT`, return the last one.
"""
if not args:
return None
for arg in args:
if arg is not DEFAULT:
return arg
return arg
def camel_case_to_underscore(name):
"""
Convert a camel-cased string to an underscore-separated string.
For example, 'SomeString' becomes 'some_string'.
"""
return re.sub(
"([a-z0-9])([A-Z])", r"\1_\2", re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
).lower()
def camel_case_to_title(name):
"""
Convert a camel-cased string to a title-cased string.
For example, 'SomeString' becomes 'Some String'.
"""
return re.sub(
"([a-z0-9])([A-Z])", r"\1 \2", re.sub("(.)([A-Z][a-z]+)", r"\1 \2", name)
).capitalize()
def has_object_perm(user, short_perm_name, model, obj=None):
"""
Check if the user has the specified permission for the given model. If an
object is provided, and user has no model-wide permission, check if the user
has the permission for that specific object instance.
"""
perm_name = f"{model._meta.app_label}.{auth.get_permission_codename(short_perm_name, model._meta)}"
has_perm = user.has_perm(perm_name)
if not has_perm and obj is not None:
has_perm = user.has_perm(perm_name, obj=obj)
return has_perm
def strip_suffixes(word, suffixes):
"""
Strip the specified suffixes from the given word.
Never strip the whole word to an empty string.
"""
for suffix in suffixes:
if word != suffix and word.endswith(suffix):
word = word[: -len(suffix)]
return word
def get_app_package(app_label):
"""
Returns the name of the package that contains the specified app or None if
the app is not found.
"""
app_config = apps.get_app_config(app_label)
if not app_config:
return None
return app_config.module.__name__
def get_containing_app_data(module):
"""
Returns the app label and package string for the specified module.
"""
app_config = apps.get_containing_app_config(module)
if not app_config:
return None, None
return app_config.label, app_config.module.__name__
def is_owner(owner: models.Model, user: models.Model) -> bool:
"""
Checks whether the specified user instance or subclass is equal to the
specified owner instance or subclass.
"""
return isinstance(user, type(owner)) and owner.pk == user.pk
| MARKER |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 4348,
"end": 5284
} | class ____(PrefectFilterBaseModel):
"""Filter by `Flow.name`."""
any_: Optional[list[str]] = Field(
default=None,
description="A list of flow names to include",
examples=[["my-flow-1", "my-flow-2"]],
)
like_: Optional[str] = Field(
default=None,
description=(
"A case-insensitive partial match. For example, "
" passing 'marvin' will match "
"'marvin', 'sad-Marvin', and 'marvin-robot'."
),
examples=["marvin"],
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Flow.name.in_(self.any_))
if self.like_ is not None:
filters.append(db.Flow.name.ilike(f"%{self.like_}%"))
return filters
| FlowFilterName |
python | huggingface__transformers | tests/models/megatron_bert/test_modeling_megatron_bert.py | {
"start": 15005,
"end": 16097
} | class ____(unittest.TestCase):
@slow
@unittest.skip(reason="Model is not available.")
def test_inference_no_head(self):
directory = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
directory = os.path.join(os.environ["MYDIR"], directory)
model = MegatronBertModel.from_pretrained(directory)
model.to(torch_device)
model.half()
input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
output = model(input_ids)[0]
expected_shape = torch.Size((1, 9, 1024))
self.assertEqual(output.shape, expected_shape)
expected = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3):
for jj in range(3):
a = output[0, ii, jj]
b = expected[3 * ii + jj]
msg = f"ii={ii} jj={jj} a={a} b={b}"
self.assertTrue(math.isclose(a, b, rel_tol=TOLERANCE, abs_tol=TOLERANCE), msg=msg)
| MegatronBertModelIntegrationTests |
python | ansible__ansible | test/units/module_utils/basic/test_run_command.py | {
"start": 1855,
"end": 3508
} | class ____:
def __init__(self, fileobj):
self.fileobj = fileobj
@pytest.fixture
def mock_subprocess(mocker):
class MockSelector(selectors.BaseSelector):
def __init__(self):
super(MockSelector, self).__init__()
self._file_objs = []
def register(self, fileobj, events, data=None):
self._file_objs.append(fileobj)
def unregister(self, fileobj):
self._file_objs.remove(fileobj)
def select(self, timeout=None):
ready = []
for file_obj in self._file_objs:
ready.append((DummyKey(subprocess._output[file_obj.fileno()]), selectors.EVENT_READ))
return ready
def get_map(self):
return self._file_objs
def close(self):
super(MockSelector, self).close()
self._file_objs = []
selectors.PollSelector = MockSelector
subprocess = mocker.patch('ansible.module_utils.basic.subprocess')
subprocess._output = {mocker.sentinel.stdout: SpecialBytesIO(b'', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr: SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
cmd = mocker.MagicMock()
cmd.returncode = 0
cmd.stdin = OpenBytesIO()
cmd.stdout = subprocess._output[mocker.sentinel.stdout]
cmd.stderr = subprocess._output[mocker.sentinel.stderr]
subprocess.Popen.return_value = cmd
yield subprocess
@pytest.fixture()
def rc_am(mocker, am, mock_os, mock_subprocess):
am.fail_json = mocker.MagicMock(side_effect=SystemExit)
am._os = mock_os
am._subprocess = mock_subprocess
yield am
| DummyKey |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py | {
"start": 30767,
"end": 31681
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
XLM-RoBERTa-XL Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
| XLMRobertaXLClassificationHead |
python | kubernetes-client__python | kubernetes/client/models/v1_cluster_role.py | {
"start": 383,
"end": 7592
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'aggregation_rule': 'V1AggregationRule',
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'rules': 'list[V1PolicyRule]'
}
attribute_map = {
'aggregation_rule': 'aggregationRule',
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'rules': 'rules'
}
def __init__(self, aggregation_rule=None, api_version=None, kind=None, metadata=None, rules=None, local_vars_configuration=None): # noqa: E501
"""V1ClusterRole - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._aggregation_rule = None
self._api_version = None
self._kind = None
self._metadata = None
self._rules = None
self.discriminator = None
if aggregation_rule is not None:
self.aggregation_rule = aggregation_rule
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if rules is not None:
self.rules = rules
@property
def aggregation_rule(self):
"""Gets the aggregation_rule of this V1ClusterRole. # noqa: E501
:return: The aggregation_rule of this V1ClusterRole. # noqa: E501
:rtype: V1AggregationRule
"""
return self._aggregation_rule
@aggregation_rule.setter
def aggregation_rule(self, aggregation_rule):
"""Sets the aggregation_rule of this V1ClusterRole.
:param aggregation_rule: The aggregation_rule of this V1ClusterRole. # noqa: E501
:type: V1AggregationRule
"""
self._aggregation_rule = aggregation_rule
@property
def api_version(self):
"""Gets the api_version of this V1ClusterRole. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ClusterRole. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ClusterRole.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ClusterRole. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ClusterRole. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ClusterRole. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ClusterRole.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ClusterRole. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ClusterRole. # noqa: E501
:return: The metadata of this V1ClusterRole. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ClusterRole.
:param metadata: The metadata of this V1ClusterRole. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def rules(self):
"""Gets the rules of this V1ClusterRole. # noqa: E501
Rules holds all the PolicyRules for this ClusterRole # noqa: E501
:return: The rules of this V1ClusterRole. # noqa: E501
:rtype: list[V1PolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1ClusterRole.
Rules holds all the PolicyRules for this ClusterRole # noqa: E501
:param rules: The rules of this V1ClusterRole. # noqa: E501
:type: list[V1PolicyRule]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ClusterRole):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ClusterRole):
return True
return self.to_dict() != other.to_dict()
| V1ClusterRole |
python | walkccc__LeetCode | solutions/1585. Check If String Is Transformable With Substring Sort Operations/1585.py | {
"start": 0,
"end": 1021
} | class ____:
def isTransformable(self, s: str, t: str) -> bool:
if collections.Counter(s) != collections.Counter(t):
return False
positions = [collections.deque() for _ in range(10)]
for i, c in enumerate(s):
positions[int(c)].append(i)
# For each digit in `t`, check if we can put this digit in `s` at the same
# position as `t`. Ensure that all the left digits are equal to or greater
# than it. This is because the only operation we can perform is sorting in
# ascending order. If there is a digit to the left that is smaller than it,
# we can never move it to the same position as in `t`. However, if all the
# digits to its left are equal to or greater than it, we can move it one
# position to the left until it reaches the same position as in `t`.
for c in t:
d = int(c)
front = positions[d].popleft()
for smaller in range(d):
if positions[smaller] and positions[smaller][0] < front:
return False
return True
| Solution |
python | PrefectHQ__prefect | tests/_internal/compatibility/test_async_dispatch.py | {
"start": 5017,
"end": 6391
} | class ____:
async def test_is_in_async_context_from_coroutine(self):
"""Verify detection inside a coroutine"""
assert is_in_async_context() is True
def test_is_in_async_context_from_sync(self):
"""Verify detection in pure sync context"""
assert is_in_async_context() is False
async def test_is_in_async_context_with_nested_sync_in_worker_thread(self):
def sync_func() -> bool:
return is_in_async_context()
assert await run_sync_in_worker_thread(sync_func) is False
def test_is_in_async_context_with_running_loop(self):
"""Verify detection with just a running event loop"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result: Optional[bool] = None
def check_context() -> None:
nonlocal result
result = is_in_async_context()
loop.stop()
try:
loop.call_soon(check_context)
loop.run_forever()
assert result is True, (
"the result we captured while loop was running should be True"
)
finally:
loop.close()
asyncio.set_event_loop(None)
assert is_in_async_context() is False, (
"the loop should be closed and not considered an async context"
)
| TestIsInAsyncContext |
python | wandb__wandb | wandb/vendor/pygments/lexers/jvm.py | {
"start": 43656,
"end": 46105
} | class ____(RegexLexer):
"""
For `Kotlin <http://kotlinlang.org/>`_
source code.
.. versionadded:: 1.5
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
mimetypes = ['text/x-kotlin']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
kt_name = ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc', 'Cf',
'Mn', 'Mc') + ']*')
kt_id = '(' + kt_name + '|`' + kt_name + '`)'
tokens = {
'root': [
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'::|!!|\?[:.]', Operator),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'(class)(\s+)(object)', bygroups(Keyword, Text, Keyword)),
(r'(class|interface|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(package|import)(\s+)', bygroups(Keyword, Text), 'package'),
(r'(val|var)(\s+)', bygroups(Keyword, Text), 'property'),
(r'(fun)(\s+)', bygroups(Keyword, Text), 'function'),
(r'(abstract|annotation|as|break|by|catch|class|companion|const|'
r'constructor|continue|crossinline|data|do|dynamic|else|enum|'
r'external|false|final|finally|for|fun|get|if|import|in|infix|'
r'inline|inner|interface|internal|is|lateinit|noinline|null|'
r'object|open|operator|out|override|package|private|protected|'
r'public|reified|return|sealed|set|super|tailrec|this|throw|'
r'true|try|val|var|vararg|when|where|while)\b', Keyword),
(kt_id, Name),
],
'package': [
(r'\S+', Name.Namespace, '#pop')
],
'class': [
(kt_id, Name.Class, '#pop')
],
'property': [
(kt_id, Name.Property, '#pop')
],
'function': [
(kt_id, Name.Function, '#pop')
],
}
| KotlinLexer |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/update/tutorial002_py310.py | {
"start": 258,
"end": 388
} | class ____(HeroBase, table=True):
id: int | None = Field(default=None, primary_key=True)
hashed_password: str = Field()
| Hero |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 188572,
"end": 189182
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository_id",
"web_commit_signoff_required",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
web_commit_signoff_required = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="webCommitSignoffRequired"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateRepositoryWebCommitSignoffSettingInput |
python | langchain-ai__langchain | libs/partners/openai/tests/unit_tests/chat_models/test_base.py | {
"start": 40311,
"end": 85779
} | class ____(BaseModel):
bar: int
# class FooV1(BaseModelV1):
# bar: int
@pytest.mark.parametrize(
"schema",
[
Foo
# FooV1
],
)
def test_schema_from_with_structured_output(schema: type) -> None:
"""Test schema from with_structured_output."""
llm = ChatOpenAI(model="gpt-4o")
structured_llm = llm.with_structured_output(
schema, method="json_schema", strict=True
)
expected = {
"properties": {"bar": {"title": "Bar", "type": "integer"}},
"required": ["bar"],
"title": schema.__name__,
"type": "object",
}
actual = structured_llm.get_output_schema().model_json_schema()
assert actual == expected
def test__create_usage_metadata() -> None:
usage_metadata = {
"completion_tokens": 15,
"prompt_tokens_details": None,
"completion_tokens_details": None,
"prompt_tokens": 11,
"total_tokens": 26,
}
result = _create_usage_metadata(usage_metadata)
assert result == UsageMetadata(
output_tokens=15,
input_tokens=11,
total_tokens=26,
input_token_details={},
output_token_details={},
)
def test__create_usage_metadata_responses() -> None:
response_usage_metadata = {
"input_tokens": 100,
"input_tokens_details": {"cached_tokens": 50},
"output_tokens": 50,
"output_tokens_details": {"reasoning_tokens": 10},
"total_tokens": 150,
}
result = _create_usage_metadata_responses(response_usage_metadata)
assert result == UsageMetadata(
output_tokens=50,
input_tokens=100,
total_tokens=150,
input_token_details={"cache_read": 50},
output_token_details={"reasoning": 10},
)
def test__convert_to_openai_response_format() -> None:
# Test response formats that aren't tool-like.
response_format: dict = {
"type": "json_schema",
"json_schema": {
"name": "math_reasoning",
"schema": {
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": {"type": "string"},
"output": {"type": "string"},
},
"required": ["explanation", "output"],
"additionalProperties": False,
},
},
"final_answer": {"type": "string"},
},
"required": ["steps", "final_answer"],
"additionalProperties": False,
},
"strict": True,
},
}
actual = _convert_to_openai_response_format(response_format)
assert actual == response_format
actual = _convert_to_openai_response_format(response_format["json_schema"])
assert actual == response_format
actual = _convert_to_openai_response_format(response_format, strict=True)
assert actual == response_format
with pytest.raises(ValueError):
_convert_to_openai_response_format(response_format, strict=False)
@pytest.mark.parametrize("method", ["function_calling", "json_schema"])
@pytest.mark.parametrize("strict", [True, None])
def test_structured_output_strict(
method: Literal["function_calling", "json_schema"], strict: bool | None
) -> None:
"""Test to verify structured output with strict=True."""
llm = ChatOpenAI(model="gpt-4o-2024-08-06")
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
llm.with_structured_output(Joke, method=method, strict=strict)
# Schema
llm.with_structured_output(Joke.model_json_schema(), method=method, strict=strict)
def test_nested_structured_output_strict() -> None:
"""Test to verify structured output with strict=True for nested object."""
llm = ChatOpenAI(model="gpt-4o-2024-08-06")
class SelfEvaluation(TypedDict):
score: int
text: str
class JokeWithEvaluation(TypedDict):
"""Joke to tell user."""
setup: str
punchline: str
_evaluation: SelfEvaluation
llm.with_structured_output(JokeWithEvaluation, method="json_schema")
def test__get_request_payload() -> None:
llm = ChatOpenAI(model="gpt-4o-2024-08-06")
messages: list = [
SystemMessage("hello"),
SystemMessage("bye", additional_kwargs={"__openai_role__": "developer"}),
SystemMessage(content=[{"type": "text", "text": "hello!"}]),
{"role": "human", "content": "how are you"},
{"role": "user", "content": [{"type": "text", "text": "feeling today"}]},
]
expected = {
"messages": [
{"role": "system", "content": "hello"},
{"role": "developer", "content": "bye"},
{"role": "system", "content": [{"type": "text", "text": "hello!"}]},
{"role": "user", "content": "how are you"},
{"role": "user", "content": [{"type": "text", "text": "feeling today"}]},
],
"model": "gpt-4o-2024-08-06",
"stream": False,
}
payload = llm._get_request_payload(messages)
assert payload == expected
# Test we coerce to developer role for o-series models
llm = ChatOpenAI(model="o3-mini")
payload = llm._get_request_payload(messages)
expected = {
"messages": [
{"role": "developer", "content": "hello"},
{"role": "developer", "content": "bye"},
{"role": "developer", "content": [{"type": "text", "text": "hello!"}]},
{"role": "user", "content": "how are you"},
{"role": "user", "content": [{"type": "text", "text": "feeling today"}]},
],
"model": "o3-mini",
"stream": False,
}
assert payload == expected
# Test we ignore reasoning blocks from other providers
reasoning_messages: list = [
{
"role": "user",
"content": [
{"type": "reasoning_content", "reasoning_content": "reasoning..."},
{"type": "text", "text": "reasoned response"},
],
},
{
"role": "user",
"content": [
{"type": "thinking", "thinking": "thinking..."},
{"type": "text", "text": "thoughtful response"},
],
},
]
expected = {
"messages": [
{
"role": "user",
"content": [{"type": "text", "text": "reasoned response"}],
},
{
"role": "user",
"content": [{"type": "text", "text": "thoughtful response"}],
},
],
"model": "o3-mini",
"stream": False,
}
payload = llm._get_request_payload(reasoning_messages)
assert payload == expected
def test_init_o1() -> None:
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("error") # Treat warnings as errors
ChatOpenAI(model="o1", reasoning_effort="medium")
assert len(record) == 0
def test_init_minimal_reasoning_effort() -> None:
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("error")
ChatOpenAI(model="gpt-5", reasoning_effort="minimal")
assert len(record) == 0
@pytest.mark.parametrize("use_responses_api", [False, True])
@pytest.mark.parametrize("use_max_completion_tokens", [True, False])
def test_minimal_reasoning_effort_payload(
use_max_completion_tokens: bool, use_responses_api: bool
) -> None:
"""Test that minimal reasoning effort is included in request payload."""
if use_max_completion_tokens:
kwargs = {"max_completion_tokens": 100}
else:
kwargs = {"max_tokens": 100}
init_kwargs: dict[str, Any] = {
"model": "gpt-5",
"reasoning_effort": "minimal",
"use_responses_api": use_responses_api,
**kwargs,
}
llm = ChatOpenAI(**init_kwargs)
messages = [
{"role": "developer", "content": "respond with just 'test'"},
{"role": "user", "content": "hello"},
]
payload = llm._get_request_payload(messages, stop=None)
# When using responses API, reasoning_effort becomes reasoning.effort
if use_responses_api:
assert "reasoning" in payload
assert payload["reasoning"]["effort"] == "minimal"
# For responses API, tokens param becomes max_output_tokens
assert payload["max_output_tokens"] == 100
else:
# For non-responses API, reasoning_effort remains as is
assert payload["reasoning_effort"] == "minimal"
if use_max_completion_tokens:
assert payload["max_completion_tokens"] == 100
else:
# max_tokens gets converted to max_completion_tokens in non-responses API
assert payload["max_completion_tokens"] == 100
def test_output_version_compat() -> None:
llm = ChatOpenAI(model="gpt-5", output_version="responses/v1")
assert llm._use_responses_api({}) is True
def test_verbosity_parameter_payload() -> None:
"""Test verbosity parameter is included in request payload for Responses API."""
llm = ChatOpenAI(model="gpt-5", verbosity="high", use_responses_api=True)
messages = [{"role": "user", "content": "hello"}]
payload = llm._get_request_payload(messages, stop=None)
assert payload["text"]["verbosity"] == "high"
def test_structured_output_old_model() -> None:
class Output(TypedDict):
"""output."""
foo: str
with pytest.warns(match="Cannot use method='json_schema'"):
llm = ChatOpenAI(model="gpt-4").with_structured_output(Output)
# assert tool calling was used instead of json_schema
assert "tools" in llm.steps[0].kwargs # type: ignore
assert "response_format" not in llm.steps[0].kwargs # type: ignore
def test_structured_outputs_parser() -> None:
parsed_response = GenerateUsername(name="alice", hair_color="black")
llm_output = ChatGeneration(
message=AIMessage(
content='{"name": "alice", "hair_color": "black"}',
additional_kwargs={"parsed": parsed_response},
)
)
output_parser = RunnableLambda(
partial(_oai_structured_outputs_parser, schema=GenerateUsername)
)
serialized = dumps(llm_output)
deserialized = loads(serialized)
assert isinstance(deserialized, ChatGeneration)
result = output_parser.invoke(cast(AIMessage, deserialized.message))
assert result == parsed_response
def test__construct_lc_result_from_responses_api_error_handling() -> None:
"""Test that errors in the response are properly raised."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
error=ResponseError(message="Test error", code="server_error"),
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[],
)
with pytest.raises(ValueError) as excinfo:
_construct_lc_result_from_responses_api(response)
assert "Test error" in str(excinfo.value)
def test__construct_lc_result_from_responses_api_basic_text_response() -> None:
"""Test a basic text response with no tools or special features."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputText(
type="output_text", text="Hello, world!", annotations=[]
)
],
role="assistant",
status="completed",
)
],
usage=ResponseUsage(
input_tokens=10,
output_tokens=3,
total_tokens=13,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
),
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
assert isinstance(result, ChatResult)
assert len(result.generations) == 1
assert isinstance(result.generations[0], ChatGeneration)
assert isinstance(result.generations[0].message, AIMessage)
assert result.generations[0].message.content == [
{"type": "text", "text": "Hello, world!", "annotations": []}
]
assert result.generations[0].message.id == "msg_123"
assert result.generations[0].message.usage_metadata
assert result.generations[0].message.usage_metadata["input_tokens"] == 10
assert result.generations[0].message.usage_metadata["output_tokens"] == 3
assert result.generations[0].message.usage_metadata["total_tokens"] == 13
assert result.generations[0].message.response_metadata["id"] == "resp_123"
assert result.generations[0].message.response_metadata["model_name"] == "gpt-4o"
# responses/v1
result = _construct_lc_result_from_responses_api(response)
assert result.generations[0].message.content == [
{"type": "text", "text": "Hello, world!", "annotations": [], "id": "msg_123"}
]
assert result.generations[0].message.id == "resp_123"
assert result.generations[0].message.response_metadata["id"] == "resp_123"
def test__construct_lc_result_from_responses_api_multiple_text_blocks() -> None:
"""Test a response with multiple text blocks."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputText(
type="output_text", text="First part", annotations=[]
),
ResponseOutputText(
type="output_text", text="Second part", annotations=[]
),
],
role="assistant",
status="completed",
)
],
)
result = _construct_lc_result_from_responses_api(response, output_version="v0")
assert len(result.generations[0].message.content) == 2
assert result.generations[0].message.content == [
{"type": "text", "text": "First part", "annotations": []},
{"type": "text", "text": "Second part", "annotations": []},
]
def test__construct_lc_result_from_responses_api_multiple_messages() -> None:
"""Test a response with multiple text blocks."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputText(type="output_text", text="foo", annotations=[])
],
role="assistant",
status="completed",
),
ResponseReasoningItem(
type="reasoning",
id="rs_123",
summary=[Summary(type="summary_text", text="reasoning foo")],
),
ResponseOutputMessage(
type="message",
id="msg_234",
content=[
ResponseOutputText(type="output_text", text="bar", annotations=[])
],
role="assistant",
status="completed",
),
],
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
assert result.generations[0].message.content == [
{"type": "text", "text": "foo", "annotations": []},
{"type": "text", "text": "bar", "annotations": []},
]
assert result.generations[0].message.additional_kwargs == {
"reasoning": {
"type": "reasoning",
"summary": [{"type": "summary_text", "text": "reasoning foo"}],
"id": "rs_123",
}
}
assert result.generations[0].message.id == "msg_234"
# responses/v1
result = _construct_lc_result_from_responses_api(response)
assert result.generations[0].message.content == [
{"type": "text", "text": "foo", "annotations": [], "id": "msg_123"},
{
"type": "reasoning",
"summary": [{"type": "summary_text", "text": "reasoning foo"}],
"id": "rs_123",
},
{"type": "text", "text": "bar", "annotations": [], "id": "msg_234"},
]
assert result.generations[0].message.id == "resp_123"
def test__construct_lc_result_from_responses_api_refusal_response() -> None:
"""Test a response with a refusal."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputRefusal(
type="refusal", refusal="I cannot assist with that request."
)
],
role="assistant",
status="completed",
)
],
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
assert result.generations[0].message.additional_kwargs["refusal"] == (
"I cannot assist with that request."
)
# responses/v1
result = _construct_lc_result_from_responses_api(response)
assert result.generations[0].message.content == [
{
"type": "refusal",
"refusal": "I cannot assist with that request.",
"id": "msg_123",
}
]
def test__construct_lc_result_from_responses_api_function_call_valid_json() -> None:
"""Test a response with a valid function call."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseFunctionToolCall(
type="function_call",
id="func_123",
call_id="call_123",
name="get_weather",
arguments='{"location": "New York", "unit": "celsius"}',
)
],
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
msg: AIMessage = cast(AIMessage, result.generations[0].message)
assert len(msg.tool_calls) == 1
assert msg.tool_calls[0]["type"] == "tool_call"
assert msg.tool_calls[0]["name"] == "get_weather"
assert msg.tool_calls[0]["id"] == "call_123"
assert msg.tool_calls[0]["args"] == {"location": "New York", "unit": "celsius"}
assert _FUNCTION_CALL_IDS_MAP_KEY in result.generations[0].message.additional_kwargs
assert (
result.generations[0].message.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][
"call_123"
]
== "func_123"
)
# responses/v1
result = _construct_lc_result_from_responses_api(response)
msg = cast(AIMessage, result.generations[0].message)
assert msg.tool_calls
assert msg.content == [
{
"type": "function_call",
"id": "func_123",
"name": "get_weather",
"arguments": '{"location": "New York", "unit": "celsius"}',
"call_id": "call_123",
}
]
def test__construct_lc_result_from_responses_api_function_call_invalid_json() -> None:
"""Test a response with an invalid JSON function call."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseFunctionToolCall(
type="function_call",
id="func_123",
call_id="call_123",
name="get_weather",
arguments='{"location": "New York", "unit": "celsius"',
# Missing closing brace
)
],
)
result = _construct_lc_result_from_responses_api(response, output_version="v0")
msg: AIMessage = cast(AIMessage, result.generations[0].message)
assert len(msg.invalid_tool_calls) == 1
assert msg.invalid_tool_calls[0]["type"] == "invalid_tool_call"
assert msg.invalid_tool_calls[0]["name"] == "get_weather"
assert msg.invalid_tool_calls[0]["id"] == "call_123"
assert (
msg.invalid_tool_calls[0]["args"]
== '{"location": "New York", "unit": "celsius"'
)
assert "error" in msg.invalid_tool_calls[0]
assert _FUNCTION_CALL_IDS_MAP_KEY in result.generations[0].message.additional_kwargs
def test__construct_lc_result_from_responses_api_complex_response() -> None:
"""Test a complex response with multiple output types."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputText(
type="output_text",
text="Here's the information you requested:",
annotations=[],
)
],
role="assistant",
status="completed",
),
ResponseFunctionToolCall(
type="function_call",
id="func_123",
call_id="call_123",
name="get_weather",
arguments='{"location": "New York"}',
),
],
metadata={"key1": "value1", "key2": "value2"},
incomplete_details=IncompleteDetails(reason="max_output_tokens"),
status="completed",
user="user_123",
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
# Check message content
assert result.generations[0].message.content == [
{
"type": "text",
"text": "Here's the information you requested:",
"annotations": [],
}
]
# Check tool calls
msg: AIMessage = cast(AIMessage, result.generations[0].message)
assert len(msg.tool_calls) == 1
assert msg.tool_calls[0]["name"] == "get_weather"
# Check metadata
assert result.generations[0].message.response_metadata["id"] == "resp_123"
assert result.generations[0].message.response_metadata["metadata"] == {
"key1": "value1",
"key2": "value2",
}
assert result.generations[0].message.response_metadata["incomplete_details"] == {
"reason": "max_output_tokens"
}
assert result.generations[0].message.response_metadata["status"] == "completed"
assert result.generations[0].message.response_metadata["user"] == "user_123"
# responses/v1
result = _construct_lc_result_from_responses_api(response)
msg = cast(AIMessage, result.generations[0].message)
assert msg.response_metadata["metadata"] == {"key1": "value1", "key2": "value2"}
assert msg.content == [
{
"type": "text",
"text": "Here's the information you requested:",
"annotations": [],
"id": "msg_123",
},
{
"type": "function_call",
"id": "func_123",
"call_id": "call_123",
"name": "get_weather",
"arguments": '{"location": "New York"}',
},
]
def test__construct_lc_result_from_responses_api_no_usage_metadata() -> None:
"""Test a response without usage metadata."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputText(
type="output_text", text="Hello, world!", annotations=[]
)
],
role="assistant",
status="completed",
)
],
# No usage field
)
result = _construct_lc_result_from_responses_api(response)
assert cast(AIMessage, result.generations[0].message).usage_metadata is None
def test__construct_lc_result_from_responses_api_web_search_response() -> None:
"""Test a response with web search output."""
from openai.types.responses.response_function_web_search import (
ResponseFunctionWebSearch,
)
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseFunctionWebSearch(
id="websearch_123",
type="web_search_call",
status="completed",
action=ActionSearch(type="search", query="search query"),
)
],
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
assert "tool_outputs" in result.generations[0].message.additional_kwargs
assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 1
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["type"]
== "web_search_call"
)
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["id"]
== "websearch_123"
)
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["status"]
== "completed"
)
# responses/v1
result = _construct_lc_result_from_responses_api(response)
assert result.generations[0].message.content == [
{
"type": "web_search_call",
"id": "websearch_123",
"status": "completed",
"action": {"query": "search query", "type": "search"},
}
]
def test__construct_lc_result_from_responses_api_file_search_response() -> None:
"""Test a response with file search output."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseFileSearchToolCall(
id="filesearch_123",
type="file_search_call",
status="completed",
queries=["python code", "langchain"],
results=[
Result(
file_id="file_123",
filename="example.py",
score=0.95,
text="def hello_world() -> None:\n print('Hello, world!')",
attributes={"language": "python", "size": 42},
)
],
)
],
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
assert "tool_outputs" in result.generations[0].message.additional_kwargs
assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 1
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["type"]
== "file_search_call"
)
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["id"]
== "filesearch_123"
)
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["status"]
== "completed"
)
assert result.generations[0].message.additional_kwargs["tool_outputs"][0][
"queries"
] == ["python code", "langchain"]
assert (
len(
result.generations[0].message.additional_kwargs["tool_outputs"][0][
"results"
]
)
== 1
)
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["results"][
0
]["file_id"]
== "file_123"
)
assert (
result.generations[0].message.additional_kwargs["tool_outputs"][0]["results"][
0
]["score"]
== 0.95
)
# responses/v1
result = _construct_lc_result_from_responses_api(response)
assert result.generations[0].message.content == [
{
"type": "file_search_call",
"id": "filesearch_123",
"status": "completed",
"queries": ["python code", "langchain"],
"results": [
{
"file_id": "file_123",
"filename": "example.py",
"score": 0.95,
"text": "def hello_world() -> None:\n print('Hello, world!')",
"attributes": {"language": "python", "size": 42},
}
],
}
]
def test__construct_lc_result_from_responses_api_mixed_search_responses() -> None:
"""Test a response with both web search and file search outputs."""
response = Response(
id="resp_123",
created_at=1234567890,
model="gpt-4o",
object="response",
parallel_tool_calls=True,
tools=[],
tool_choice="auto",
output=[
ResponseOutputMessage(
type="message",
id="msg_123",
content=[
ResponseOutputText(
type="output_text", text="Here's what I found:", annotations=[]
)
],
role="assistant",
status="completed",
),
ResponseFunctionWebSearch(
id="websearch_123",
type="web_search_call",
status="completed",
action=ActionSearch(type="search", query="search query"),
),
ResponseFileSearchToolCall(
id="filesearch_123",
type="file_search_call",
status="completed",
queries=["python code"],
results=[
Result(
file_id="file_123",
filename="example.py",
score=0.95,
text="def hello_world() -> None:\n print('Hello, world!')",
)
],
),
],
)
# v0
result = _construct_lc_result_from_responses_api(response, output_version="v0")
# Check message content
assert result.generations[0].message.content == [
{"type": "text", "text": "Here's what I found:", "annotations": []}
]
# Check tool outputs
assert "tool_outputs" in result.generations[0].message.additional_kwargs
assert len(result.generations[0].message.additional_kwargs["tool_outputs"]) == 2
# Check web search output
web_search = next(
output
for output in result.generations[0].message.additional_kwargs["tool_outputs"]
if output["type"] == "web_search_call"
)
assert web_search["id"] == "websearch_123"
assert web_search["status"] == "completed"
# Check file search output
file_search = next(
output
for output in result.generations[0].message.additional_kwargs["tool_outputs"]
if output["type"] == "file_search_call"
)
assert file_search["id"] == "filesearch_123"
assert file_search["queries"] == ["python code"]
assert file_search["results"][0]["filename"] == "example.py"
# responses/v1
result = _construct_lc_result_from_responses_api(response)
assert result.generations[0].message.content == [
{
"type": "text",
"text": "Here's what I found:",
"annotations": [],
"id": "msg_123",
},
{
"type": "web_search_call",
"id": "websearch_123",
"status": "completed",
"action": {"type": "search", "query": "search query"},
},
{
"type": "file_search_call",
"id": "filesearch_123",
"queries": ["python code"],
"results": [
{
"file_id": "file_123",
"filename": "example.py",
"score": 0.95,
"text": "def hello_world() -> None:\n print('Hello, world!')",
}
],
"status": "completed",
},
]
def test__construct_responses_api_input_human_message_with_text_blocks_conversion() -> (
None
):
"""Test that human messages with text blocks are properly converted."""
messages: list = [
HumanMessage(content=[{"type": "text", "text": "What's in this image?"}])
]
result = _construct_responses_api_input(messages)
assert len(result) == 1
assert result[0]["role"] == "user"
assert isinstance(result[0]["content"], list)
assert len(result[0]["content"]) == 1
assert result[0]["content"][0]["type"] == "input_text"
assert result[0]["content"][0]["text"] == "What's in this image?"
def test__construct_responses_api_input_multiple_message_components() -> None:
"""Test that human messages with text blocks are properly converted."""
# v0
messages = [
AIMessage(
content=[{"type": "text", "text": "foo"}, {"type": "text", "text": "bar"}],
id="msg_123",
response_metadata={"id": "resp_123"},
)
]
result = _construct_responses_api_input(messages)
assert result == [
{
"type": "message",
"role": "assistant",
"content": [
{"type": "output_text", "text": "foo", "annotations": []},
{"type": "output_text", "text": "bar", "annotations": []},
],
"id": "msg_123",
}
]
# responses/v1
messages = [
AIMessage(
content=[
{"type": "text", "text": "foo", "id": "msg_123"},
{"type": "text", "text": "bar", "id": "msg_123"},
{"type": "refusal", "refusal": "I refuse.", "id": "msg_123"},
{"type": "text", "text": "baz", "id": "msg_234"},
]
)
]
result = _construct_responses_api_input(messages)
assert result == [
{
"type": "message",
"role": "assistant",
"content": [
{"type": "output_text", "text": "foo", "annotations": []},
{"type": "output_text", "text": "bar", "annotations": []},
{"type": "refusal", "refusal": "I refuse."},
],
"id": "msg_123",
},
{
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "baz", "annotations": []}],
"id": "msg_234",
},
]
def test__construct_responses_api_input_human_message_with_image_url_conversion() -> (
None
):
"""Test that human messages with image_url blocks are properly converted."""
messages: list = [
HumanMessage(
content=[
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": "https://example.com/image.jpg",
"detail": "high",
},
},
]
)
]
result = _construct_responses_api_input(messages)
assert len(result) == 1
assert result[0]["role"] == "user"
assert isinstance(result[0]["content"], list)
assert len(result[0]["content"]) == 2
# Check text block conversion
assert result[0]["content"][0]["type"] == "input_text"
assert result[0]["content"][0]["text"] == "What's in this image?"
# Check image block conversion
assert result[0]["content"][1]["type"] == "input_image"
assert result[0]["content"][1]["image_url"] == "https://example.com/image.jpg"
assert result[0]["content"][1]["detail"] == "high"
def test__construct_responses_api_input_ai_message_with_tool_calls() -> None:
"""Test that AI messages with tool calls are properly converted."""
tool_calls = [
{
"id": "call_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
"type": "tool_call",
}
]
ai_message = AIMessage(
content=[
{
"type": "function_call",
"name": "get_weather",
"arguments": '{"location": "San Francisco"}',
"call_id": "call_123",
"id": "fc_456",
}
],
tool_calls=tool_calls,
)
result = _construct_responses_api_input([ai_message])
assert len(result) == 1
assert result[0]["type"] == "function_call"
assert result[0]["name"] == "get_weather"
assert result[0]["arguments"] == '{"location": "San Francisco"}'
assert result[0]["call_id"] == "call_123"
assert result[0]["id"] == "fc_456"
# Message with only tool calls attribute provided
ai_message = AIMessage(content="", tool_calls=tool_calls)
result = _construct_responses_api_input([ai_message])
assert len(result) == 1
assert result[0]["type"] == "function_call"
assert result[0]["name"] == "get_weather"
assert result[0]["arguments"] == '{"location": "San Francisco"}'
assert result[0]["call_id"] == "call_123"
assert "id" not in result[0]
def test__construct_responses_api_input_ai_message_with_tool_calls_and_content() -> (
None
):
"""Test that AI messages with both tool calls and content are properly converted."""
tool_calls = [
{
"id": "call_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
"type": "tool_call",
}
]
# Content blocks
ai_message = AIMessage(
content=[
{"type": "text", "text": "I'll check the weather for you."},
{
"type": "function_call",
"name": "get_weather",
"arguments": '{"location": "San Francisco"}',
"call_id": "call_123",
"id": "fc_456",
},
],
tool_calls=tool_calls,
)
result = _construct_responses_api_input([ai_message])
assert len(result) == 2
assert result[0]["role"] == "assistant"
assert result[0]["content"] == [
{
"type": "output_text",
"text": "I'll check the weather for you.",
"annotations": [],
}
]
assert result[1]["type"] == "function_call"
assert result[1]["name"] == "get_weather"
assert result[1]["arguments"] == '{"location": "San Francisco"}'
assert result[1]["call_id"] == "call_123"
assert result[1]["id"] == "fc_456"
# String content
ai_message = AIMessage(
content="I'll check the weather for you.", tool_calls=tool_calls
)
result = _construct_responses_api_input([ai_message])
assert len(result) == 2
assert result[0]["role"] == "assistant"
assert result[0]["content"] == [
{
"type": "output_text",
"text": "I'll check the weather for you.",
"annotations": [],
}
]
assert result[1]["type"] == "function_call"
assert result[1]["name"] == "get_weather"
assert result[1]["arguments"] == '{"location": "San Francisco"}'
assert result[1]["call_id"] == "call_123"
assert "id" not in result[1]
def test__construct_responses_api_input_tool_message_conversion() -> None:
"""Test that tool messages are properly converted to function_call_output."""
messages = [
ToolMessage(
content='{"temperature": 72, "conditions": "sunny"}',
tool_call_id="call_123",
)
]
result = _construct_responses_api_input(messages)
assert len(result) == 1
assert result[0]["type"] == "function_call_output"
assert result[0]["output"] == '{"temperature": 72, "conditions": "sunny"}'
assert result[0]["call_id"] == "call_123"
def test__construct_responses_api_input_multiple_message_types() -> None:
"""Test conversion of a conversation with multiple message types."""
messages = [
SystemMessage(content="You are a helpful assistant."),
SystemMessage(
content=[{"type": "text", "text": "You are a very helpful assistant!"}]
),
HumanMessage(content="What's the weather in San Francisco?"),
HumanMessage(
content=[{"type": "text", "text": "What's the weather in San Francisco?"}]
),
AIMessage(
content="",
tool_calls=[
{
"type": "tool_call",
"id": "call_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
}
],
),
ToolMessage(
content='{"temperature": 72, "conditions": "sunny"}',
tool_call_id="call_123",
),
AIMessage(content="The weather in San Francisco is 72°F and sunny."),
AIMessage(
content=[
{
"type": "text",
"text": "The weather in San Francisco is 72°F and sunny.",
}
]
),
]
messages_copy = [m.model_copy(deep=True) for m in messages]
result = _construct_responses_api_input(messages)
assert len(result) == len(messages)
# Check system message
assert result[0]["role"] == "system"
assert result[0]["content"] == "You are a helpful assistant."
assert result[1]["role"] == "system"
assert result[1]["content"] == [
{"type": "input_text", "text": "You are a very helpful assistant!"}
]
# Check human message
assert result[2]["role"] == "user"
assert result[2]["content"] == "What's the weather in San Francisco?"
assert result[3]["role"] == "user"
assert result[3]["content"] == [
{"type": "input_text", "text": "What's the weather in San Francisco?"}
]
# Check function call
assert result[4]["type"] == "function_call"
assert result[4]["name"] == "get_weather"
assert result[4]["arguments"] == '{"location": "San Francisco"}'
assert result[4]["call_id"] == "call_123"
# Check function call output
assert result[5]["type"] == "function_call_output"
assert result[5]["output"] == '{"temperature": 72, "conditions": "sunny"}'
assert result[5]["call_id"] == "call_123"
assert result[6]["role"] == "assistant"
assert result[6]["content"] == [
{
"type": "output_text",
"text": "The weather in San Francisco is 72°F and sunny.",
"annotations": [],
}
]
assert result[7]["role"] == "assistant"
assert result[7]["content"] == [
{
"type": "output_text",
"text": "The weather in San Francisco is 72°F and sunny.",
"annotations": [],
}
]
# assert no mutation has occurred
assert messages_copy == messages
# Test dict messages
llm = ChatOpenAI(model="o4-mini", use_responses_api=True)
message_dicts: list = [
{"role": "developer", "content": "This is a developer message."},
{
"role": "developer",
"content": [{"type": "text", "text": "This is a developer message!"}],
},
]
payload = llm._get_request_payload(message_dicts)
result = payload["input"]
assert len(result) == 2
assert result[0]["role"] == "developer"
assert result[0]["content"] == "This is a developer message."
assert result[1]["role"] == "developer"
assert result[1]["content"] == [
{"type": "input_text", "text": "This is a developer message!"}
]
def test_service_tier() -> None:
llm = ChatOpenAI(model="o4-mini", service_tier="flex")
payload = llm._get_request_payload([HumanMessage("Hello")])
assert payload["service_tier"] == "flex"
| Foo |
python | apache__airflow | providers/apache/flink/src/airflow/providers/apache/flink/operators/flink_kubernetes.py | {
"start": 1197,
"end": 4663
} | class ____(BaseOperator):
"""
Creates flinkDeployment object in kubernetes cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FlinkKubernetesOperator`
.. seealso::
For more detail about Flink Deployment Object have a look at the reference:
https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/docs/custom-resource/reference/#flinkdeployment
:param application_file: Defines Kubernetes 'custom_resource_definition' of 'flinkDeployment' as either a
path to a '.yaml' file, '.json' file, YAML string or JSON string.
:param namespace: kubernetes namespace to put flinkDeployment
:param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
for the to Kubernetes cluster.
:param api_group: kubernetes api group of flinkDeployment
:param api_version: kubernetes api version of flinkDeployment
:param in_cluster: run kubernetes client with in_cluster configuration.
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
"""
template_fields: Sequence[str] = ("application_file", "namespace")
template_ext: Sequence[str] = (".yaml", ".yml", ".json")
ui_color = "#f4a460"
def __init__(
self,
*,
application_file: str,
namespace: str | None = None,
kubernetes_conn_id: str = "kubernetes_default",
api_group: str = "flink.apache.org",
api_version: str = "v1beta1",
in_cluster: bool | None = None,
cluster_context: str | None = None,
config_file: str | None = None,
plural: str = "flinkdeployments",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_file = application_file
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.api_group = api_group
self.api_version = api_version
self.plural = plural
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.config_file = config_file
@cached_property
def hook(self) -> KubernetesHook:
hook = KubernetesHook(
conn_id=self.kubernetes_conn_id,
in_cluster=self.in_cluster,
config_file=self.config_file,
cluster_context=self.cluster_context,
)
return hook
@cached_property
def client(self) -> CoreV1Api:
return self.hook.core_v1_client
def execute(self, context: Context):
self.log.info(
"Creating flinkApplication with Context: %s and op_context: %s", self.cluster_context, context
)
self.hook.custom_object_client.list_cluster_custom_object(
group=self.api_group, version=self.api_version, plural=self.plural
)
self.log.info("body=self.application_file: %s", self.application_file)
response = self.hook.create_custom_object(
group=self.api_group,
version=self.api_version,
plural=self.plural,
body=self.application_file,
namespace=self.namespace,
)
return response
| FlinkKubernetesOperator |
python | pytorch__pytorch | torch/_logging/_internal.py | {
"start": 5335,
"end": 30994
} | class ____:
# qualified log names -> currently set log level
log_qname_to_level: dict[str, str] = field(default_factory=dict)
# the set of currently enabled artifacts
artifact_names: set[str] = field(default_factory=set)
def enable_artifact(self, artifact_name) -> None:
self.artifact_names.add(artifact_name)
def is_artifact_enabled(self, name):
return name in self.artifact_names
def enable_log(self, log_qnames, log_level) -> None:
if isinstance(log_qnames, str):
log_qnames = [log_qnames]
for log_qname in log_qnames:
self.log_qname_to_level[log_qname] = log_level
def get_log_level_pairs(self):
"""Returns all qualified module names for which the user requested
explicit logging settings.
.. warning:
This function used to return all loggers, regardless of whether
or not the user specified them or not; it now only returns logs
which were explicitly mentioned by the user (and torch, which
always is implicitly requested when we initialize our logging
subsystem.)
"""
return self.log_qname_to_level.items()
def clear(self) -> None:
self.log_qname_to_level.clear()
self.artifact_names.clear()
log_registry = LogRegistry()
log_state = LogState()
# sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING)
DEFAULT_LOGGING = {
"dynamo": logging.INFO,
"aot": logging.INFO,
"inductor": logging.INFO,
"fsdp": logging.INFO,
"ddp_graphs": True,
"graph_breaks": True,
"guards": True,
"recompiles": True,
"dynamic": logging.INFO,
}
def set_logs(
*,
all: Optional[int] = None,
dynamo: Optional[int] = None,
aot: Optional[int] = None,
autograd: Optional[int] = None,
dynamic: Optional[int] = None,
inductor: Optional[int] = None,
distributed: Optional[int] = None,
c10d: Optional[int] = None,
ddp: Optional[int] = None,
fsdp: Optional[int] = None,
dtensor: Optional[int] = None,
onnx: Optional[int] = None,
bytecode: bool = False,
aot_graphs: bool = False,
aot_joint_graph: bool = False,
ddp_graphs: bool = False,
graph: bool = False,
graph_code: bool = False,
graph_code_verbose: bool = False,
graph_breaks: bool = False,
graph_sizes: bool = False,
guards: bool = False,
recompiles: bool = False,
recompiles_verbose: bool = False,
trace_source: bool = False,
trace_call: bool = False,
trace_bytecode: bool = False,
output_code: bool = False,
kernel_code: bool = False,
schedule: bool = False,
perf_hints: bool = False,
pre_grad_graphs: bool = False,
post_grad_graphs: bool = False,
ir_pre_fusion: bool = False,
ir_post_fusion: bool = False,
onnx_diagnostics: bool = False,
fusion: bool = False,
overlap: bool = False,
export: Optional[int] = None,
modules: Optional[dict[str, Union[int, bool]]] = None,
cudagraphs: bool = False,
sym_node: bool = False,
compiled_autograd: bool = False,
compiled_autograd_verbose: bool = False,
cudagraph_static_inputs: bool = False,
benchmarking: bool = False,
autotuning: bool = False,
graph_region_expansion: bool = False,
inductor_metrics: bool = False,
hierarchical_compile: bool = False,
compute_dependencies: bool = False,
) -> None:
"""
Sets the log level for individual components and toggles individual log
artifact types.
.. warning:: This feature is a prototype and may have compatibility
breaking changes in the future.
.. note:: The ``TORCH_LOGS`` environment variable has complete precedence
over this function, so if it was set, this function does nothing.
A component is a set of related features in PyTorch. All of the log
messages emitted from a given component have their own log levels. If the
log level of a particular message has priority greater than or equal to its
component's log level setting, it is emitted. Otherwise, it is suppressed.
This allows you to, for instance, silence large groups of log messages that
are not relevant to you and increase verbosity of logs for components that
are relevant. The expected log level values, ordered from highest to lowest
priority, are:
* ``logging.CRITICAL``
* ``logging.ERROR``
* ``logging.WARNING``
* ``logging.INFO``
* ``logging.DEBUG``
* ``logging.NOTSET``
See documentation for the Python ``logging`` module for more information on
log levels: `<https://docs.python.org/3/library/logging.html#logging-levels>`_
An artifact is a particular type of log message. Each artifact is assigned
to a parent component. A component can emit many different kinds of
artifacts. In general, an artifact is emitted if either its corresponding
setting in the argument list below is turned on or if its parent component
is set to a log level less than or equal to the log level of the artifact.
Keyword args:
all (:class:`Optional[int]`):
The default log level for all components. Default: ``logging.WARN``
dynamo (:class:`Optional[int]`):
The log level for the TorchDynamo component. Default: ``logging.WARN``
aot (:class:`Optional[int]`):
The log level for the AOTAutograd component. Default: ``logging.WARN``
autograd (:class:`Optional[int]`):
The log level for autograd. Default: ``logging.WARN``
inductor (:class:`Optional[int]`):
The log level for the TorchInductor component. Default: ``logging.WARN``
dynamic (:class:`Optional[int]`):
The log level for dynamic shapes. Default: ``logging.WARN``
distributed (:class:`Optional[int]`):
Whether to log c10d communication operations and other debug info from PyTorch Distributed components.
Default: ``logging.WARN``
c10d (:class:`Optional[int]`):
Whether to log c10d communication operations related debug info in PyTorch Distributed components.
Default: ``logging.WARN``
ddp (:class:`Optional[int]`):
Whether to log debug info related to ``DistributedDataParallel``(DDP) from PyTorch Distributed components.
Default: ``logging.WARN``
fsdp (:class:`Optional[int]`):
Whether to log debug info related to ``FullyShardedDataParallel``(FSDP) in PyTorch Distributed components.
Default: ``logging.WARN``
dtensor (:class:`Optional[int]`):
Whether to log debug info related to ``DTensor``(DTensor) in PyTorch Distributed components.
Default: ``logging.WARN``
onnx (:class:`Optional[int]`):
The log level for the ONNX exporter component. Default: ``logging.WARN``
bytecode (:class:`bool`):
Whether to emit the original and generated bytecode from TorchDynamo.
Default: ``False``
aot_graphs (:class:`bool`):
Whether to emit the graphs generated by AOTAutograd. Default: ``False``
aot_joint_graph (:class:`bool`):
Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False``
ddp_graphs (:class:`bool`):
Whether to emit graphs generated by DDPOptimizer. Default: ``False``
graph (:class:`bool`):
Whether to emit the graph captured by TorchDynamo in tabular format.
Default: ``False``
graph_code (:class:`bool`):
Whether to emit the python source of the graph captured by TorchDynamo.
Default: ``False``
graph_code_verbose (:class:`bool`):
Whether to emit verbose/intermediate FX pass logs for graph code. Default: ``False``
graph_breaks (:class:`bool`):
Whether to emit the graph breaks encountered by TorchDynamo.
Default: ``False``
graph_sizes (:class:`bool`):
Whether to emit tensor sizes of the graph captured by TorchDynamo.
Default: ``False``
guards (:class:`bool`):
Whether to emit the guards generated by TorchDynamo for each compiled
function. Default: ``False``
recompiles (:class:`bool`):
Whether to emit a guard failure reason and message every time
TorchDynamo recompiles a function. Default: ``False``
recompiles_verbose (:class:`bool`):
Whether to emit all guard failure reasons when TorchDynamo recompiles
a function, even those that are not actually run. Default: ``False``
trace_source (:class:`bool`):
Whether to emit when TorchDynamo begins tracing a new line. Default: ``False``
trace_call (:class:`bool`):
Whether to emit detailed line location when TorchDynamo creates an FX node
corresponding to function call. Python 3.11+ only. Default: ``False``
trace_bytecode (:class:`bool`):
Whether to emit bytecode instructions and traced stack state as TorchDynamo
traces bytecode. Default: ``False``
output_code (:class:`bool`):
Whether to emit the TorchInductor output code on a per-graph basis. Default: ``False``
kernel_code (:class:`bool`):
Whether to emit the TorchInductor output code on a per-kernel bases. Default: ``False``
schedule (:class:`bool`):
Whether to emit the TorchInductor schedule. Default: ``False``
perf_hints (:class:`bool`):
Whether to emit the TorchInductor perf hints. Default: ``False``
pre_grad_graphs (:class:`bool`):
Whether to emit the graphs before inductor grad passes. Default: ``False``
post_grad_graphs (:class:`bool`):
Whether to emit the graphs generated by after post grad passes. Default: ``False``
ir_pre_fusion (:class:`bool`):
Whether to emit the graphs before inductor fusion passes. Default: ``False``
ir_post_fusion (:class:`bool`):
Whether to emit the graphs after inductor fusion passes. Default: ``False``
onnx_diagnostics (:class:`bool`):
Whether to emit the ONNX exporter diagnostics in logging. Default: ``False``
fusion (:class:`bool`):
Whether to emit detailed Inductor fusion decisions. Default: ``False``
overlap (:class:`bool`):
Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False``
sym_node (:class:`bool`):
Whether to emit debug info for various SymNode opterations. Default: ``False``
export (:class:`Optional[int]`):
The log level for export. Default: ``logging.WARN``
benchmarking (:class:`bool`):
Whether to emit detailed Inductor benchmarking information. Default: ``False``
modules (dict):
This argument provides an alternate way to specify the above log
component and artifact settings, in the format of a keyword args
dictionary given as a single argument. There are two cases
where this is useful (1) if a new log component or artifact has
been registered but a keyword argument for it has not been added
to this function and (2) if the log level for an unregistered module
needs to be set. This can be done by providing the fully-qualified module
name as the key, with the log level as the value. Default: ``None``
cudagraph_static_inputs (:class:`bool`):
Whether to emit debug info for cudagraph static input detection. Default: ``False``
autotuning (:class:`bool`):
Autotuning choice logs, such as kernel source, perf, and tuning parameters. Default: ``False``
graph_region_expansion (:class:`bool`):
Whether to emit the detailed steps of the duplicate graph region tracker expansion algorithm. Default: ``False``
inductor_metrics (:class:`bool`):
Whether to estimate the runtimes of the nodes in a graph and log them to the metrics table. Default: ``False``
hierarchical_compile (:class:`bool`):
Whether to emit debug info for hierarchical compilation. Default: ``False``
Example::
>>> # xdoctest: +SKIP
>>> import logging
# The following changes the "dynamo" component to emit DEBUG-level
# logs, and to emit "graph_code" artifacts.
>>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True)
# The following enables the logs for a different module
>>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG})
"""
# ignore if env var is set
if LOG_ENV_VAR in os.environ:
log.warning(
"Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs"
)
return
log_state.clear()
modules = modules or {}
def _set_logs(**kwargs) -> None:
for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr]
if val is None:
continue
if log_registry.is_artifact(alias):
if not isinstance(val, bool):
raise ValueError(
f"Expected bool to enable artifact {alias}, received {val}"
)
if val:
log_state.enable_artifact(alias)
elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames:
if val not in logging._levelToName:
raise ValueError(
f"Unrecognized log level for log {alias}: {val}, valid level values "
f"are: {','.join([str(k) for k in logging._levelToName])}"
)
log_state.enable_log(
log_registry.log_alias_to_log_qnames.get(alias, alias), val
)
elif _is_valid_module(alias):
if not _has_registered_parent(alias):
log_registry.register_log(alias, alias)
else:
log_registry.register_child_log(alias)
log_state.enable_log(
log_registry.log_alias_to_log_qnames.get(alias, alias), val
)
else:
raise ValueError(
f"Unrecognized log or artifact name passed to set_logs: {alias}"
)
_init_logs()
_set_logs(
torch=all,
dynamo=dynamo,
aot=aot,
autograd=autograd,
inductor=inductor,
dynamic=dynamic,
bytecode=bytecode,
aot_graphs=aot_graphs,
aot_joint_graph=aot_joint_graph,
ddp_graphs=ddp_graphs,
distributed=distributed,
c10d=c10d,
ddp=ddp,
fsdp=fsdp,
dtensor=dtensor,
graph=graph,
graph_code=graph_code,
graph_code_verbose=graph_code_verbose,
graph_breaks=graph_breaks,
graph_sizes=graph_sizes,
guards=guards,
recompiles=recompiles,
recompiles_verbose=recompiles_verbose,
trace_source=trace_source,
trace_call=trace_call,
trace_bytecode=trace_bytecode,
output_code=output_code,
kernel_code=kernel_code,
schedule=schedule,
perf_hints=perf_hints,
pre_grad_graphs=pre_grad_graphs,
post_grad_graphs=post_grad_graphs,
ir_pre_fusion=ir_pre_fusion,
ir_post_fusion=ir_post_fusion,
onnx=onnx,
onnx_diagnostics=onnx_diagnostics,
fusion=fusion,
overlap=overlap,
sym_node=sym_node,
export=export,
cudagraphs=cudagraphs,
compiled_autograd=compiled_autograd,
compiled_autograd_verbose=compiled_autograd_verbose,
cudagraph_static_inputs=cudagraph_static_inputs,
benchmarking=benchmarking,
autotuning=autotuning,
graph_region_expansion=graph_region_expansion,
inductor_metrics=inductor_metrics,
hierarchical_compile=hierarchical_compile,
compute_dependencies=compute_dependencies,
)
def get_loggers() -> list[logging.Logger]:
"""
Returns: a list of all registered loggers
"""
return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]
def register_log(setting_name, log_name) -> None:
"""
Enables a log to be controlled by the env var and user API with the setting_name
Args:
setting_name: the shorthand name used in the env var and user API
log_name: the log name that the setting_name is associated with
"""
log_registry.register_log(setting_name, log_name)
def register_artifact(
setting_name, description, visible=False, off_by_default=False, log_format=None
) -> None:
"""
Enables an artifact to be controlled by the env var and user API with name
Args:
setting_name: the shorthand name used in the env var and user API
description: A description of what this outputs
visible: Whether it gets suggested to users by default
off_by_default: whether this artifact should be logged when the ancestor loggers
are enabled at level DEBUG
"""
log_registry.register_artifact_name(
setting_name, description, visible, off_by_default, log_format
)
def getArtifactLogger(module_qname, artifact_name) -> logging.Logger:
if artifact_name not in log_registry.artifact_names:
raise ValueError(
f"Artifact name: {repr(artifact_name)} not registered,"
f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations."
)
qname = module_qname + f".__{artifact_name}"
log = logging.getLogger(qname)
log.artifact_name = artifact_name # type: ignore[attr-defined]
log_registry.register_artifact_log(qname)
configure_artifact_log(log)
return log
INCR_VERBOSITY_CHAR = "+"
DECR_VERBOSITY_CHAR = "-"
VERBOSITY_REGEX = (
"("
+ "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)])
+ "?)"
)
def configure_artifact_log(log) -> None:
# If the artifact is off by default, then it should only be logged when explicitly
# enabled; set propagate to False so that this artifact is not propagated
# to its ancestor logger
if log_registry.is_off_by_default(log.artifact_name):
log.propagate = False
# enable artifact logging when explicitly enabled
if log_state.is_artifact_enabled(log.artifact_name):
log.setLevel(logging.DEBUG)
log.propagate = True
# match a comma separated list of loggable names (whitespace allowed after commas)
def _gen_settings_regex():
return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?")
def _validate_settings(settings):
return re.fullmatch(_gen_settings_regex(), settings) is not None
def help_message(verbose=False):
def pad_to(s, length=30):
assert len(s) <= length
return s + " " * (length - len(s))
if verbose:
printed_artifacts = log_registry.artifact_names
else:
printed_artifacts = log_registry.visible_artifacts
if verbose:
heading = "All registered names"
else:
heading = "Visible registered names (use TORCH_LOGS='+help' for full list)"
lines = (
["all"]
+ sorted(log_registry.log_alias_to_log_qnames.keys())
+ sorted(
[
f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}"
for name in printed_artifacts
]
)
)
setting_info = " " + "\n ".join(lines)
examples = """
Examples:
TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to
logging.DEBUG and AOT to logging.INFO
TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to
logging.ERROR and TorchInductor to logging.DEBUG
TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact
TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo
to logging.DEBUG and enable the schedule artifact
TORCH_LOGS="+some.random.module,schedule" will set the log level of
some.random.module to logging.DEBUG and enable the schedule artifact
TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format
string will set the output format
Valid keys are "levelname", "message", "pathname", "levelno", "lineno",
"filename" and "name".
TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as
well. This is useful when the output is long.
"""
msg = f"""
TORCH_LOGS Info
{examples}
{heading}
{setting_info}
"""
return msg
def _invalid_settings_err_msg(settings, verbose=False):
valid_settings = (
["all"]
+ list(log_registry.log_alias_to_log_qnames.keys())
+ list(log_registry.artifact_names)
)
valid_settings = ", ".join(sorted(valid_settings))
msg = f"""
Invalid log settings: {settings}, must be a comma separated list of fully
qualified module names, registered log names or registered artifact names.
For more info on various settings, try TORCH_LOGS="help"
Valid settings:
{valid_settings}
"""
return msg
def process_env_var_string_for_windows(env_var_str: str) -> str:
"""
When we setup logging config as guide: https://docs.pytorch.org/docs/stable/logging.html
Such as:
TORCH_LOGS="+schedule,+inductor,+output_code"
On Linux, it shows as:
declare -x SSH_TTY="/dev/pts/0"
declare -x TERM="xterm"
declare -x TORCH_LOGS="+schedule,+inductor,+output_code"
declare -x USER="xu"
On Windows, it shows as:
TORCHINDUCTOR_WINDOWS_TESTS=1
TORCH_LOGS="+schedule,+inductor,+output_code"
UCRTVersion=10.0.22000.0
For Linux, it shows quotes by default, And Windows is not shows quotes.
Besides that, Windows would auto assemble quotes when env var processing.
On Linux, we will get variable: "+schedule,+inductor,+output_code"
On Windows, we will get variable: '"+schedule,+inductor,+output_code"'
So, we need remove the outer quotes for Windows.
"""
_IS_WINDOWS = sys.platform == "win32"
def remove_outer_quotes(s: str) -> str:
if len(s) >= 2 and (
(s[0] == '"' and s[-1] == '"') or (s[0] == "'" and s[-1] == "'")
):
return s[1:-1]
return s
if _IS_WINDOWS:
env_var_str = remove_outer_quotes(env_var_str)
return env_var_str
@functools.lru_cache
def _parse_log_settings(settings):
settings = process_env_var_string_for_windows(settings)
if settings == "":
return {}
if settings == "help":
raise ValueError(help_message(verbose=False))
elif settings == "+help":
raise ValueError(help_message(verbose=True))
if not _validate_settings(settings):
raise ValueError(_invalid_settings_err_msg(settings))
settings = re.sub(r"\s+", "", settings)
log_names = settings.split(",")
def get_name_level_pair(name):
clean_name = name.replace(INCR_VERBOSITY_CHAR, "")
clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "")
if name[0] == INCR_VERBOSITY_CHAR:
level = logging.DEBUG
elif name[0] == DECR_VERBOSITY_CHAR:
level = logging.ERROR
else:
level = logging.INFO
return clean_name, level
log_state = LogState()
for name in log_names:
name, level = get_name_level_pair(name)
if name == "all":
name = "torch"
if log_registry.is_log(name):
assert level is not None
log_qnames = log_registry.log_alias_to_log_qnames[name]
log_state.enable_log(log_qnames, level)
elif log_registry.is_artifact(name):
log_state.enable_artifact(name)
elif _is_valid_module(name):
if not _has_registered_parent(name):
log_registry.register_log(name, name)
else:
log_registry.register_child_log(name)
log_state.enable_log(name, level)
else:
raise ValueError(_invalid_settings_err_msg(settings))
return log_state
def _is_valid_module(qname):
spec = importlib.util.find_spec(qname)
return spec is not None
def _update_log_state_from_env() -> None:
global log_state
log_setting = os.environ.get(LOG_ENV_VAR, None)
if log_setting is not None:
log_state = _parse_log_settings(log_setting)
def _has_registered_parent(log_qname) -> bool:
cur_log = logging.getLogger(log_qname)
registered_log_qnames = log_registry.get_log_qnames()
while cur_log.parent:
if cur_log.name in registered_log_qnames:
return True
cur_log = cur_log.parent
return False
def make_module_path_relative(abs_path):
"""
Given an absolute filepath corresponding to a Python module which was
loaded via normal import mechanisms using sys.path, convert it into
a relative path relative to one of the Python search paths.
"""
abs_path = pathlib.Path(abs_path).resolve()
for path in sys.path:
try:
rel_path = abs_path.relative_to(path)
except ValueError:
continue
else:
return str(rel_path)
return str(abs_path)
# apply custom formats to artifacts when necessary
| LogState |
python | django__django | tests/queryset_pickle/models.py | {
"start": 907,
"end": 1063
} | class ____(models.Model):
title = models.CharField(max_length=100)
group = models.ForeignKey(Group, models.CASCADE, limit_choices_to=models.Q())
| Event |
python | ansible__ansible | test/integration/targets/module_defaults/action_plugins/debug.py | {
"start": 84,
"end": 452
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super().run(tmp, task_vars)
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
else:
msg = "Hello overridden world!"
result['changes'] = False
result['msg'] = msg
return result
| ActionModule |
python | scipy__scipy | scipy/stats/_new_distributions.py | {
"start": 14995,
"end": 18923
} | class ____(DiscreteDistribution):
r"""Binomial distribution with prescribed success probability and number of trials
The probability density function of the binomial distribution is:
.. math::
f(x) = {n \choose x} p^x (1 - p)^{n-x}
"""
_n_domain = _IntegerInterval(endpoints=(0, inf), inclusive=(False, False))
_p_domain = _RealInterval(endpoints=(0, 1), inclusive=(False, False))
_x_support = _IntegerInterval(endpoints=(0, 'n'), inclusive=(True, True))
_n_param = _RealParameter('n', domain=_n_domain, typical=(10, 20))
_p_param = _RealParameter('p', domain=_p_domain, typical=(0.25, 0.75))
_x_param = _RealParameter('x', domain=_x_support, typical=(0, 10))
_parameterizations = [_Parameterization(_n_param, _p_param)]
_variable = _x_param
def __init__(self, *, n, p, **kwargs):
super().__init__(n=n, p=p, **kwargs)
def _pmf_formula(self, x, *, n, p, **kwargs):
return scu._binom_pmf(x, n, p)
def _logpmf_formula(self, x, *, n, p, **kwargs):
# This implementation is from the ``scipy.stats.binom`` and could be improved
# by using a more numerically sound implementation of the absolute value of
# the binomial coefficient.
combiln = (
special.gammaln(n+1) - (special.gammaln(x+1) + special.gammaln(n-x+1))
)
return combiln + special.xlogy(x, p) + special.xlog1py(n-x, -p)
def _cdf_formula(self, x, *, n, p, **kwargs):
return scu._binom_cdf(x, n, p)
def _logcdf_formula(self, x, *, n, p, **kwargs):
# todo: add this strategy to infrastructure more generally, but allow dist
# author to specify threshold other than median in case median is expensive
median = self._icdf_formula(0.5, n=n, p=p)
return xpx.apply_where(x < median, (x, n, p),
lambda *args: np.log(scu._binom_cdf(*args)),
lambda *args: np.log1p(-scu._binom_sf(*args))
)
def _ccdf_formula(self, x, *, n, p, **kwargs):
return scu._binom_sf(x, n, p)
def _logccdf_formula(self, x, *, n, p, **kwargs):
median = self._icdf_formula(0.5, n=n, p=p)
return xpx.apply_where(x < median, (x, n, p),
lambda *args: np.log1p(-scu._binom_cdf(*args)),
lambda *args: np.log(scu._binom_sf(*args))
)
def _icdf_formula(self, x, *, n, p, **kwargs):
return scu._binom_ppf(x, n, p)
def _iccdf_formula(self, x, *, n, p, **kwargs):
return scu._binom_isf(x, n, p)
def _mode_formula(self, *, n, p, **kwargs):
# https://en.wikipedia.org/wiki/Binomial_distribution#Mode
mode = np.floor((n+1)*p)
mode = np.where(p == 1, mode - 1, mode)
return mode[()]
def _moment_raw_formula(self, order, *, n, p, **kwargs):
# https://en.wikipedia.org/wiki/Binomial_distribution#Higher_moments
if order == 1:
return n*p
if order == 2:
return n*p*(1 - p + n*p)
return None
_moment_raw_formula.orders = [1, 2] # type: ignore[attr-defined]
def _moment_central_formula(self, order, *, n, p, **kwargs):
# https://en.wikipedia.org/wiki/Binomial_distribution#Higher_moments
if order == 1:
return np.zeros_like(n)
if order == 2:
return n*p*(1 - p)
if order == 3:
return n*p*(1 - p)*(1 - 2*p)
if order == 4:
return n*p*(1 - p)*(1 + (3*n - 6)*p*(1 - p))
return None
_moment_central_formula.orders = [1, 2, 3, 4] # type: ignore[attr-defined]
# Distribution classes need only define the summary and beginning of the extended
# summary portion of the class documentation. All other documentation, including
# examples, is generated automatically.
_module = sys.modules[__name__].__dict__
for dist_name in __all__:
_module[dist_name].__doc__ = _combine_docs(_module[dist_name])
| Binomial |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 5503,
"end": 8689
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
@use_kernel_forward_from_hub("RMSNorm")
| Emu3Attention |
python | sqlalchemy__sqlalchemy | examples/generic_associations/table_per_association.py | {
"start": 1121,
"end": 1504
} | class ____(Base):
"""The Address class.
This represents all address records in a
single table.
"""
street: Mapped[str]
city: Mapped[str]
zip: Mapped[str]
def __repr__(self):
return "%s(street=%r, city=%r, zip=%r)" % (
self.__class__.__name__,
self.street,
self.city,
self.zip,
)
| Address |
python | sqlalchemy__sqlalchemy | test/sql/test_external_traversal.py | {
"start": 91980,
"end": 95331
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
"""tests the generative capability of Select"""
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global t1, t2
t1 = table("table1", column("col1"), column("col2"), column("col3"))
t2 = table("table2", column("col1"), column("col2"), column("col3"))
def test_columns(self):
s = t1.select()
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1"
)
select_copy = s.add_columns(column("yyy"))
self.assert_compile(
select_copy,
"SELECT table1.col1, table1.col2, table1.col3, yyy FROM table1",
)
is_not(s.selected_columns, select_copy.selected_columns)
is_not(s._raw_columns, select_copy._raw_columns)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1"
)
def test_froms(self):
s = t1.select()
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1"
)
select_copy = s.select_from(t2)
self.assert_compile(
select_copy,
"SELECT table1.col1, table1.col2, "
"table1.col3 FROM table2, table1",
)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1"
)
def test_prefixes(self):
s = t1.select()
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1"
)
select_copy = s.prefix_with("FOOBER")
self.assert_compile(
select_copy,
"SELECT FOOBER table1.col1, table1.col2, "
"table1.col3 FROM table1",
)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1"
)
def test_execution_options(self):
s = select().execution_options(foo="bar")
s2 = s.execution_options(bar="baz")
s3 = s.execution_options(foo="not bar")
# The original select should not be modified.
eq_(s.get_execution_options(), dict(foo="bar"))
# s2 should have its execution_options based on s, though.
eq_(s2.get_execution_options(), dict(foo="bar", bar="baz"))
eq_(s3.get_execution_options(), dict(foo="not bar"))
def test_invalid_options(self):
assert_raises(
exc.ArgumentError, select().execution_options, compiled_cache={}
)
assert_raises(
exc.ArgumentError,
select().execution_options,
isolation_level="READ_COMMITTED",
)
# this feature not available yet
def _NOTYET_test_execution_options_in_kwargs(self):
s = select(execution_options=dict(foo="bar"))
s2 = s.execution_options(bar="baz")
# The original select should not be modified.
assert s._execution_options == dict(foo="bar")
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo="bar", bar="baz")
# this feature not available yet
def _NOTYET_test_execution_options_in_text(self):
s = text("select 42", execution_options=dict(foo="bar"))
assert s._execution_options == dict(foo="bar")
| SelectTest |
python | django__django | django/contrib/gis/db/models/proxy.py | {
"start": 341,
"end": 3174
} | class ____(DeferredAttribute):
def __init__(self, klass, field, load_func=None):
"""
Initialize on the given Geometry or Raster class (not an instance)
and the corresponding field.
"""
self._klass = klass
self._load_func = load_func or klass
super().__init__(field)
def __get__(self, instance, cls=None):
"""
Retrieve the geometry or raster, initializing it using the
corresponding class specified during initialization and the value of
the field. Currently, GEOS or OGR geometries as well as GDALRasters are
supported.
"""
if instance is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
try:
geo_value = instance.__dict__[self.field.attname]
except KeyError:
geo_value = super().__get__(instance, cls)
if isinstance(geo_value, self._klass):
geo_obj = geo_value
elif (geo_value is None) or (geo_value == ""):
geo_obj = None
else:
# Otherwise, a geometry or raster object is built using the field's
# contents, and the model's corresponding attribute is set.
geo_obj = self._load_func(geo_value)
setattr(instance, self.field.attname, geo_obj)
return geo_obj
def __set__(self, instance, value):
"""
Retrieve the proxied geometry or raster with the corresponding class
specified during initialization.
To set geometries, use values of None, HEXEWKB, or WKT.
To set rasters, use JSON or dict values.
"""
# The geographic type of the field.
gtype = self.field.geom_type
if gtype == "RASTER" and (
value is None or isinstance(value, (str, dict, self._klass))
):
# For raster fields, ensure input is None or a string, dict, or
# raster instance.
pass
elif isinstance(value, self._klass):
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if value.srid is None:
# Assigning the field SRID if the geometry has no SRID.
value.srid = self.field.srid
elif value is None or isinstance(value, (str, memoryview)):
# Set geometries with None, WKT, HEX, or WKB
pass
else:
raise TypeError(
"Cannot set %s SpatialProxy (%s) with value of type: %s"
% (instance.__class__.__name__, gtype, type(value))
)
# Setting the objects dictionary with the value, and returning.
instance.__dict__[self.field.attname] = value
return value
| SpatialProxy |
python | walkccc__LeetCode | solutions/1922. Count Good Numbers/1922.py | {
"start": 0,
"end": 330
} | class ____:
def countGoodNumbers(self, n: int) -> int:
MOD = 1_000_000_007
def modPow(x: int, n: int) -> int:
if n == 0:
return 1
if n % 2 == 1:
return x * modPow(x, n - 1) % MOD
return modPow(x * x % MOD, n // 2)
return modPow(4 * 5, n // 2) * (1 if n % 2 == 0 else 5) % MOD
| Solution |
python | walkccc__LeetCode | solutions/2386. Find the K-Sum of an Array/2386.py | {
"start": 0,
"end": 599
} | class ____:
def kSum(self, nums: list[int], k: int) -> int:
maxSum = sum(num for num in nums if num > 0)
absNums = sorted(abs(num) for num in nums)
# (the next maximum sum, the next index i)
maxHeap = [(-(maxSum - absNums[0]), 0)]
nextMaxSum = maxSum
for _ in range(k - 1):
nextMaxSum, i = heapq.heappop(maxHeap)
nextMaxSum *= -1
if i + 1 < len(absNums):
heapq.heappush(maxHeap, (-(nextMaxSum - absNums[i + 1]), i + 1))
heapq.heappush(
maxHeap, (-(nextMaxSum - absNums[i + 1] + absNums[i]), i + 1))
return nextMaxSum
| Solution |
python | sphinx-doc__sphinx | sphinx/builders/singlehtml.py | {
"start": 786,
"end": 8250
} | class ____(StandaloneHTMLBuilder):
"""Builds the whole document tree as a single HTML page."""
name = 'singlehtml'
epilog = __('The HTML page is in %(outdir)s.')
copysource = False
def get_outdated_docs(self) -> str | list[str]: # type: ignore[override]
return 'all documents'
def get_target_uri(self, docname: str, typ: str | None = None) -> str:
if docname in self.env.all_docs:
# all references are on the same page...
return '#document-' + docname
else:
# chances are this is a html_additional_page
return docname + self.out_suffix
def get_relative_uri(self, from_: str, to: str, typ: str | None = None) -> str:
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree: Node) -> None:
deprecation_msg = (
"The 'SingleFileHTMLBuilder.fix_refuris' method is no longer used "
'within the builder and is planned for removal in Sphinx 10. '
'Please report malformed URIs generated by the Sphinx singlehtml '
'builder as bugreports.'
)
warnings.warn(deprecation_msg, RemovedInSphinx10Warning, stacklevel=2)
# fix refuris with double anchor
for refnode in tree.findall(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex + 1)
if hashindex >= 0:
# all references are on the same page...
refnode['refuri'] = refuri[hashindex:]
def _get_local_toctree(
self, docname: str, collapse: bool = True, **kwargs: Any
) -> str:
if isinstance(includehidden := kwargs.get('includehidden'), str):
if includehidden.lower() == 'false':
kwargs['includehidden'] = False
elif includehidden.lower() == 'true':
kwargs['includehidden'] = True
if kwargs.get('maxdepth') == '': # NoQA: PLC1901
kwargs.pop('maxdepth')
toctree = global_toctree_for_doc(
self.env, docname, self, tags=self.tags, collapse=collapse, **kwargs
)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self) -> nodes.document:
master = self.config.root_doc
tree = self.env.get_doctree(master)
logger.info(darkgreen(master))
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
tree['docname'] = master
self.env.resolve_references(tree, master, self)
return tree
def assemble_toc_secnumbers(self) -> dict[str, dict[str, tuple[int, ...]]]:
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
new_secnumbers: dict[str, tuple[int, ...]] = {}
for docname, secnums in self.env.toc_secnumbers.items():
for id, secnum in secnums.items():
alias = f'{docname}/{id}'
new_secnumbers[alias] = secnum
return {self.config.root_doc: new_secnumbers}
def assemble_toc_fignumbers(
self,
) -> dict[str, dict[str, dict[str, tuple[int, ...]]]]:
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
# Note: current Sphinx has refid confliction in singlehtml mode.
# To avoid the problem, it replaces key of secnumbers to
# tuple of docname and refid.
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
new_fignumbers: dict[str, dict[str, tuple[int, ...]]] = {}
# {'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, 'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in self.env.toc_fignumbers.items():
for figtype, fignums in fignumlist.items():
alias = f'{docname}/{figtype}'
new_fignumbers.setdefault(alias, {})
for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.root_doc: new_fignumbers}
def get_doc_context(self, docname: str, body: str, metatags: str) -> dict[str, Any]:
# no relation links...
toctree = global_toctree_for_doc(
self.env, self.config.root_doc, self, tags=self.tags, collapse=False
)
# if there is no toctree, toc is None
if toctree:
toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return {
'parents': [],
'prev': None,
'next': None,
'docstitle': None,
'title': self.config.html_title,
'meta': None,
'body': body,
'metatags': metatags,
'rellinks': [],
'sourcename': '',
'toc': toc,
'display_toc': display_toc,
}
def write_documents(self, _docnames: Set[str]) -> None:
self.prepare_writing(self.env.all_docs.keys())
with progress_message(__('assembling single document'), nonl=False):
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
with progress_message(__('writing')):
self.write_doc_serialized(self.config.root_doc, doctree)
self.write_doc(self.config.root_doc, doctree)
def finish(self) -> None:
self.write_additional_files()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
@progress_message(__('writing additional files'))
def write_additional_files(self) -> None:
# no indices or search pages are supported
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
logger.info(' %s', pagename, nonl=True)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
logger.info(' opensearch', nonl=True)
self.handle_page(
'opensearch',
{},
'opensearch.xml',
outfilename=self._static_dir / 'opensearch.xml',
)
def setup(app: Sphinx) -> ExtensionMetadata:
app.setup_extension('sphinx.builders.html')
app.add_builder(SingleFileHTMLBuilder)
app.add_config_value(
'singlehtml_sidebars',
lambda self: self.html_sidebars,
'html',
types=frozenset({dict}),
)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| SingleFileHTMLBuilder |
python | spack__spack | lib/spack/spack/environment/list.py | {
"start": 10604,
"end": 10724
} | class ____(SpecListError):
"""Error class for invalid spec constraints at concretize time."""
| InvalidSpecConstraintError |
python | apache__airflow | airflow-core/tests/unit/dag_processing/test_dagbag.py | {
"start": 2433,
"end": 13381
} | class ____:
"""Comprehensive tests for _validate_executor_fields function."""
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_multi_team_disabled_ignores_bundle_name(self, mock_lookup):
"""Test that when multi_team is disabled, bundle_name is ignored and no team lookup occurs."""
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="t1", executor="test.executor")
# multi_team disabled by default, no need to add conf_vars
_validate_executor_fields(dag, bundle_name="some_bundle")
# Should call ExecutorLoader without team_name (defaults to None)
mock_lookup.assert_called_once_with("test.executor", team_name=None)
@patch("airflow.dag_processing.bundles.manager.DagBundlesManager")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_multi_team_enabled_bundle_exists_with_team(self, mock_lookup, mock_manager_class):
"""Test successful team lookup when bundle exists and has team_name."""
# Setup mock bundle manager
mock_bundle_config = mock.MagicMock()
mock_bundle_config.team_name = "test_team"
mock_manager = mock_manager_class.return_value
mock_manager._bundle_config = {"test_bundle": mock_bundle_config}
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="t1", executor="team.executor")
with conf_vars({("core", "multi_team"): "True"}):
_validate_executor_fields(dag, bundle_name="test_bundle")
# Should call ExecutorLoader with team from bundle config
mock_lookup.assert_called_once_with("team.executor", team_name="test_team")
@patch("airflow.dag_processing.bundles.manager.DagBundlesManager")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_multi_team_enabled_bundle_exists_no_team(self, mock_lookup, mock_manager_class):
"""Test when bundle exists but has no team_name (None or empty)."""
mock_bundle_config = mock.MagicMock()
mock_bundle_config.team_name = None # No team associated
mock_manager = mock_manager_class.return_value
mock_manager._bundle_config = {"test_bundle": mock_bundle_config}
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="t1", executor="test.executor")
with conf_vars({("core", "multi_team"): "True"}):
_validate_executor_fields(dag, bundle_name="test_bundle")
mock_lookup.assert_called_once_with("test.executor", team_name=None)
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_multiple_tasks_with_executors(self, mock_lookup):
"""Test that all tasks with executors are validated."""
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="t1", executor="executor1")
BaseOperator(task_id="t2", executor="executor2")
BaseOperator(task_id="t3") # No executor, should be skipped
with conf_vars({("core", "multi_team"): "True"}):
_validate_executor_fields(dag)
# Should be called for each task with executor
assert mock_lookup.call_count == 2
mock_lookup.assert_any_call("executor1", team_name=None)
mock_lookup.assert_any_call("executor2", team_name=None)
@patch("airflow.dag_processing.bundles.manager.DagBundlesManager")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_executor_validation_failure_with_team(self, mock_lookup, mock_manager_class):
"""Test executor validation failure when team is associated (team-specific error)."""
mock_bundle_config = mock.MagicMock()
mock_bundle_config.team_name = "test_team"
mock_manager = mock_manager_class.return_value
mock_manager._bundle_config = {"test_bundle": mock_bundle_config}
# ExecutorLoader raises exception
mock_lookup.side_effect = UnknownExecutorException("Executor not found")
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="task1", executor="invalid.executor")
with conf_vars({("core", "multi_team"): "True"}):
with pytest.raises(
UnknownExecutorException,
match=re.escape(
"Task 'task1' specifies executor 'invalid.executor', which is not available "
"for team 'test_team' (the team associated with DAG 'test-dag') or as a global executor. "
"Make sure 'invalid.executor' is configured for team 'test_team' or globally in your "
"[core] executors configuration, or update the task's executor to use one of the "
"configured executors for team 'test_team' or available global executors."
),
):
_validate_executor_fields(dag, bundle_name="test_bundle")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_executor_validation_failure_no_team(self, mock_lookup):
"""Test executor validation failure when no team is associated (generic error)."""
mock_lookup.side_effect = UnknownExecutorException("Executor not found")
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="task1", executor="invalid.executor")
with conf_vars({("core", "multi_team"): "True"}):
with pytest.raises(
UnknownExecutorException,
match=re.escape(
"Task 'task1' specifies executor 'invalid.executor', which is not available. "
"Make sure it is listed in your [core] executors configuration, or update the task's "
"executor to use one of the configured executors."
),
):
_validate_executor_fields(dag) # No bundle_name
@patch("airflow.dag_processing.bundles.manager.DagBundlesManager")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_global_executor_fallback_success(self, mock_lookup, mock_manager_class):
"""Test that team-specific executor failure falls back to global executor successfully."""
mock_bundle_config = mock.MagicMock()
mock_bundle_config.team_name = "test_team"
mock_manager = mock_manager_class.return_value
mock_manager._bundle_config = {"test_bundle": mock_bundle_config}
# First call (team-specific) fails, second call (global) succeeds
mock_lookup.side_effect = [UnknownExecutorException("Team executor not found"), None]
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="task1", executor="global.executor")
with conf_vars({("core", "multi_team"): "True"}):
# Should not raise exception due to global fallback
_validate_executor_fields(dag, bundle_name="test_bundle")
# Should call lookup twice: first for team, then for global
assert mock_lookup.call_count == 2
mock_lookup.assert_any_call("global.executor", team_name="test_team")
mock_lookup.assert_any_call("global.executor", team_name=None)
@patch("airflow.dag_processing.bundles.manager.DagBundlesManager")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_global_executor_fallback_failure(self, mock_lookup, mock_manager_class):
"""Test that when both team-specific and global executors fail, appropriate error is raised."""
mock_bundle_config = mock.MagicMock()
mock_bundle_config.team_name = "test_team"
mock_manager = mock_manager_class.return_value
mock_manager._bundle_config = {"test_bundle": mock_bundle_config}
# Both calls fail
mock_lookup.side_effect = UnknownExecutorException("Executor not found")
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="task1", executor="unknown.executor")
with conf_vars({("core", "multi_team"): "True"}):
with pytest.raises(
UnknownExecutorException,
match=re.escape(
"Task 'task1' specifies executor 'unknown.executor', which is not available "
"for team 'test_team' (the team associated with DAG 'test-dag') or as a global executor. "
"Make sure 'unknown.executor' is configured for team 'test_team' or globally in your "
"[core] executors configuration, or update the task's executor to use one of the "
"configured executors for team 'test_team' or available global executors."
),
):
_validate_executor_fields(dag, bundle_name="test_bundle")
# Should call lookup twice: first for team, then for global fallback
assert mock_lookup.call_count == 2
mock_lookup.assert_any_call("unknown.executor", team_name="test_team")
mock_lookup.assert_any_call("unknown.executor", team_name=None)
@patch("airflow.dag_processing.bundles.manager.DagBundlesManager")
@patch.object(ExecutorLoader, "lookup_executor_name_by_str")
def test_team_specific_executor_success_no_fallback(self, mock_lookup, mock_manager_class):
"""Test that when team-specific executor succeeds, global fallback is not attempted."""
mock_bundle_config = mock.MagicMock()
mock_bundle_config.team_name = "test_team"
mock_manager = mock_manager_class.return_value
mock_manager._bundle_config = {"test_bundle": mock_bundle_config}
# First call (team-specific) succeeds
mock_lookup.return_value = None
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="task1", executor="team.executor")
with conf_vars({("core", "multi_team"): "True"}):
_validate_executor_fields(dag, bundle_name="test_bundle")
# Should only call lookup once for team-specific executor
mock_lookup.assert_called_once_with("team.executor", team_name="test_team")
def test_validate_executor_field_executor_not_configured():
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="t1", executor="test.custom.executor")
with pytest.raises(
UnknownExecutorException,
match=re.escape(
"Task 't1' specifies executor 'test.custom.executor', which is not available. "
"Make sure it is listed in your [core] executors configuration, or update the task's "
"executor to use one of the configured executors."
),
):
_validate_executor_fields(dag)
def test_validate_executor_field():
with DAG("test-dag", schedule=None) as dag:
BaseOperator(task_id="t1", executor="test.custom.executor")
with patch.object(ExecutorLoader, "lookup_executor_name_by_str"):
_validate_executor_fields(dag)
| TestValidateExecutorFields |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/patch_several_dependencies/package.py | {
"start": 217,
"end": 2021
} | class ____(Package):
"""Package that requries multiple patches on a dependency."""
homepage = "http://www.example.com"
url = "http://www.example.com/patch-a-dependency-1.0.tar.gz"
version("2.0", md5="0123456789abcdef0123456789abcdef")
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("foo", default=False, description="Forces a version on libelf")
# demonstrate all the different ways to patch things
# single patch file in repo
depends_on("libelf", patches="foo.patch")
# The following 3 directives are all under the same when clause, to be combined in
# the metadata for this package class
depends_on("libelf@0.8.10", patches="foo.patch", type="link", when="+foo")
depends_on("libelf", type="build", when="+foo")
depends_on("libelf@0.8:", when="+foo")
# using a list of patches in one depends_on
depends_on(
"libdwarf",
patches=[
patch("bar.patch"), # nested patch directive
patch("baz.patch", when="@20111030"), # and with a conditional
],
when="@1.0", # with a depends_on conditional
)
# URL patches
depends_on(
"fake",
patches=[
# uncompressed URL patch
patch(
"http://example.com/urlpatch.patch",
sha256="abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234",
),
# compressed URL patch requires separate archive sha
patch(
"http://example.com/urlpatch2.patch.gz",
archive_sha256="abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd",
sha256="1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd",
),
],
)
| PatchSeveralDependencies |
python | pytorch__pytorch | torch/ao/nn/sparse/quantized/dynamic/linear.py | {
"start": 326,
"end": 6428
} | class ____(torch.nn.Module):
r"""
A dynamically quantized sparse linear module with float tensor as inputs and outputs.
"""
_version = 1
_op_type = "sparse_dynamic"
_FLOAT_MODULE = torch.nn.Linear
def __init__(
self,
in_features,
out_features,
row_block_size,
col_block_size,
bias=True,
dtype=torch.qint8,
):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError(
"Only QINT8 is supported for Sparse Quantized Linear Dynamic"
)
self.in_features = in_features
self.out_features = out_features
if bias:
bias = torch.zeros(self.out_features, dtype=torch.float)
else:
bias = None
qweight = torch._empty_affine_quantized(
[out_features, in_features], scale=1, zero_point=0, dtype=torch.qint8
)
self._packed_params = linear.LinearPackedParams(
row_block_size=row_block_size, col_block_size=col_block_size, dtype=dtype
)
self._packed_params.set_weight_bias(
qweight, bias, row_block_size, col_block_size
)
def _get_name(self):
return "SparseQuantizedDynamicLinear"
def extra_repr(self):
return f"in_features={self.in_features}, out_features={self.out_features}, qscheme={self.weight().qscheme()}"
def __repr__(self):
return _hide_packed_params_repr(self, linear.LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.sparse.qlinear_dynamic(x, self._packed_params._packed_params)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "op_type"] = self._op_type
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
op_type = int(state_dict[prefix + "op_type"])
assert op_type == "sparse", (
f"Cannot load from op_type [{op_type}], expecting [{self._op_type}]"
)
state_dict.pop(prefix + "op_type")
version = local_metadata.get("version", None)
assert version <= self._version
# Is this code valid? In old quantization it seemed to be used to load
# older model
weight = state_dict.pop(prefix + "weight")
bias = state_dict.pop(prefix + "bias")
state_dict.update(
{
prefix + "_packed_params.weight": weight,
prefix + "_packed_params.bias": bias,
}
)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def set_weight_bias(
self,
w: torch.Tensor,
b: torch.Tensor | None,
row_block_size: int | None,
col_block_size: int | None,
) -> None:
assert row_block_size is not None and col_block_size is not None
self.out_features = w.shape[0]
self.in_features = w.shape[1]
self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a quantized sparse dynamic module from a float module.
We only care about the convert at this stage, no need for observers just yet.
"""
assert type(mod) is cls._FLOAT_MODULE, (
" nnq."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
# TODO: Need to add options to qconfig to avoid the calibration.
# TODO: Add calibration for the sparsity
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
if type(mod) is nni.LinearReLU:
mod = mod[0]
# pyrefly: ignore [missing-attribute]
if mod.qconfig is not None and mod.qconfig.weight is not None:
# pyrefly: ignore [not-callable]
weight_observer = mod.qconfig.weight()
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
# It is important to multiply by the mask BEFORE calling the `weight_observer`
# TODO (zaf): Mask might not be part of the qconfig (T83295194)
weight = mod.weight
if getattr(mod.qconfig, "mask", False):
weight = mod.qconfig.mask * mod.weight
weight_observer(weight)
dtype = weight_observer.dtype
assert dtype == torch.qint8, "Weight observer must have dtype torch.qint8"
_w_sc, w_zp = weight_observer.calculate_qparams()
if isinstance(w_zp, torch.Tensor):
assert not torch.any(w_zp.bool()), "All weight zero points must map to 0"
else:
assert w_zp == 0, "Weight zero point must map to 0"
qweight = _quantize_weight(weight.float(), weight_observer)
row_block_size, col_block_size = LinearBlockSparsePattern.block_size()
qlinear = cls(
mod.in_features,
mod.out_features,
row_block_size,
col_block_size,
dtype=dtype,
)
# pyrefly: ignore [bad-argument-type]
qlinear.set_weight_bias(qweight, mod.bias, row_block_size, col_block_size)
return qlinear
| Linear |
python | pytorch__pytorch | torch/testing/_internal/opinfo/core.py | {
"start": 66730,
"end": 67264
} | class ____(SampleRule):
@property
def type(self):
return "skip"
def get_context(self, test_case):
@contextlib.contextmanager
def skipcontext(test_case=test_case):
test_case.skipTest("Skipped!")
yield
return skipcontext()
# Decorator that defines skip / xfail rules for a given test function. If these are
# present, the @ops decorator will apply these for each op and place them onto the
# parametrized test functions for use by e.g. OpInfo.sample_inputs().
| SkipRule |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_concrete.py | {
"start": 42551,
"end": 44986
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global offices_table, refugees_table
refugees_table = Table(
"refugee",
metadata,
Column(
"refugee_fid",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("refugee_name", String(30), key="name"),
)
offices_table = Table(
"office",
metadata,
Column(
"office_fid",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("office_name", String(30), key="name"),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
refugees_table.insert(),
[
dict(refugee_fid=1, name="refugee1"),
dict(refugee_fid=2, name="refugee2"),
],
)
connection.execute(
offices_table.insert(),
[
dict(office_fid=1, name="office1"),
dict(office_fid=2, name="office2"),
],
)
def test_keys(self):
pjoin = polymorphic_union(
{"refugee": refugees_table, "office": offices_table},
"type",
"pjoin",
)
class Location:
pass
class Refugee(Location):
pass
class Office(Location):
pass
location_mapper = self.mapper_registry.map_imperatively(
Location,
pjoin,
polymorphic_on=pjoin.c.type,
polymorphic_identity="location",
)
self.mapper_registry.map_imperatively(
Office,
offices_table,
inherits=location_mapper,
concrete=True,
polymorphic_identity="office",
)
self.mapper_registry.map_imperatively(
Refugee,
refugees_table,
inherits=location_mapper,
concrete=True,
polymorphic_identity="refugee",
)
sess = fixture_session()
eq_(sess.get(Refugee, 1).name, "refugee1")
eq_(sess.get(Refugee, 2).name, "refugee2")
eq_(sess.get(Office, 1).name, "office1")
eq_(sess.get(Office, 2).name, "office2")
| ColKeysTest |
python | apache__airflow | airflow-core/src/airflow/task/weight_rule.py | {
"start": 867,
"end": 1441
} | class ____(str, Enum):
"""Weight rules for task priority calculation."""
DOWNSTREAM = "downstream"
UPSTREAM = "upstream"
ABSOLUTE = "absolute"
@classmethod
def is_valid(cls, weight_rule: str) -> bool:
"""Check if weight rule is valid."""
return weight_rule in cls.all_weight_rules()
@methodtools.lru_cache(maxsize=None)
@classmethod
def all_weight_rules(cls) -> set[str]:
"""Return all weight rules."""
return set(cls.__members__.values())
def __str__(self) -> str:
return self.value
| WeightRule |
python | tensorflow__tensorflow | tensorflow/lite/tools/signature/signature_def_utils_test.py | {
"start": 926,
"end": 3123
} | class ____(tf.test.TestCase):
def testAddSignatureDefToFlatbufferMetadata(self):
"""Test a SavedModel conversion has correct Metadata."""
filename = tf.compat.v1.resource_loader.get_path_to_datafile(
'../../testdata/add.bin')
if not tf.io.gfile.exists(filename):
raise IOError('File "{0}" does not exist in {1}.'.format(
filename,
tf.compat.v1.resource_loader.get_root_dir_with_all_resources()))
with tf.io.gfile.GFile(filename, 'rb') as fp:
tflite_model = bytearray(fp.read())
self.assertIsNotNone(tflite_model, 'TFLite model is none')
sig_input_tensor = meta_graph_pb2.TensorInfo(
dtype=tf.as_dtype(tf.float32).as_datatype_enum,
tensor_shape=tf.TensorShape([1, 8, 8, 3]).as_proto())
sig_input_tensor_signature = {'x': sig_input_tensor}
sig_output_tensor = meta_graph_pb2.TensorInfo(
dtype=tf.as_dtype(tf.float32).as_datatype_enum,
tensor_shape=tf.TensorShape([1, 8, 8, 3]).as_proto())
sig_output_tensor_signature = {'y': sig_output_tensor}
predict_signature_def = (
tf.compat.v1.saved_model.build_signature_def(
sig_input_tensor_signature, sig_output_tensor_signature,
tf.saved_model.PREDICT_METHOD_NAME))
serving_key = tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
signature_def_map = {serving_key: predict_signature_def}
tflite_model = signature_def_utils.set_signature_defs(
tflite_model, signature_def_map)
saved_signature_def_map = signature_def_utils.get_signature_defs(
tflite_model)
signature_def = saved_signature_def_map.get(serving_key)
self.assertIsNotNone(signature_def, 'SignatureDef not found')
self.assertEqual(signature_def.SerializeToString(),
predict_signature_def.SerializeToString())
remove_tflite_model = (
signature_def_utils.clear_signature_defs(tflite_model))
signature_def_map = signature_def_utils.get_signature_defs(
remove_tflite_model)
self.assertIsNone(signature_def_map.get(serving_key),
'SignatureDef found, but should be missing')
if __name__ == '__main__':
tf.test.main()
| SignatureDefUtilsTest |
python | HIPS__autograd | autograd/misc/tracers.py | {
"start": 1663,
"end": 2220
} | class ____(Node):
__slots__ = ["value", "recipe"]
def __init__(self, value, fun, args, kwargs, parent_argnums, parents):
self.value = value
self.recipe = (fun, args, kwargs, zip(parent_argnums, parents))
def initialize_root(self):
self.value = None
self.recipe = (lambda x: x, (), {}, [])
def full_graph(fun, *args, **kwargs):
unary_fun = lambda args: fun(*args, **kwargs)
start_node = FullGraphNode.new_root()
end_value, end_node = trace(start_node, unary_fun, args)
return end_node
| FullGraphNode |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_identity.py | {
"start": 531,
"end": 1338
} | class ____(UserEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, user: User) -> Response:
"""
Retrieve all of a users' identities (NOT AuthIdentities)
`````````````````````````````````
:pparam string user ID: user ID, or 'me'
:auth: required
"""
queryset = Identity.objects.filter(user=user)
provider = request.GET.get("provider")
if provider:
queryset = queryset.filter(idp__type=provider.lower())
return self.paginate(
request=request,
queryset=queryset,
on_results=lambda x: serialize(x, request.user, serializer=IdentitySerializer()),
paginator_cls=OffsetPaginator,
)
| UserIdentityEndpoint |
python | pandas-dev__pandas | asv_bench/benchmarks/join_merge.py | {
"start": 13913,
"end": 16892
} | class ____:
params = [["backward", "forward", "nearest"], [None, 5]]
param_names = ["direction", "tolerance"]
def setup(self, direction, tolerance):
one_count = 200000
two_count = 1000000
df1 = DataFrame(
{
"time": np.random.randint(0, one_count / 20, one_count),
"key": np.random.choice(list(string.ascii_uppercase), one_count),
"key2": np.random.randint(0, 25, one_count),
"value1": np.random.randn(one_count),
}
)
df2 = DataFrame(
{
"time": np.random.randint(0, two_count / 20, two_count),
"key": np.random.choice(list(string.ascii_uppercase), two_count),
"key2": np.random.randint(0, 25, two_count),
"value2": np.random.randn(two_count),
}
)
df1 = df1.sort_values("time")
df2 = df2.sort_values("time")
df1["time32"] = np.int32(df1.time)
df2["time32"] = np.int32(df2.time)
df1["timeu64"] = np.uint64(df1.time)
df2["timeu64"] = np.uint64(df2.time)
self.df1a = df1[["time", "value1"]]
self.df2a = df2[["time", "value2"]]
self.df1b = df1[["time", "key", "value1"]]
self.df2b = df2[["time", "key", "value2"]]
self.df1c = df1[["time", "key2", "value1"]]
self.df2c = df2[["time", "key2", "value2"]]
self.df1d = df1[["time32", "value1"]]
self.df2d = df2[["time32", "value2"]]
self.df1e = df1[["time", "key", "key2", "value1"]]
self.df2e = df2[["time", "key", "key2", "value2"]]
self.df1f = df1[["timeu64", "value1"]]
self.df2f = df2[["timeu64", "value2"]]
def time_on_int(self, direction, tolerance):
merge_asof(
self.df1a, self.df2a, on="time", direction=direction, tolerance=tolerance
)
def time_on_int32(self, direction, tolerance):
merge_asof(
self.df1d, self.df2d, on="time32", direction=direction, tolerance=tolerance
)
def time_on_uint64(self, direction, tolerance):
merge_asof(
self.df1f, self.df2f, on="timeu64", direction=direction, tolerance=tolerance
)
def time_by_object(self, direction, tolerance):
merge_asof(
self.df1b,
self.df2b,
on="time",
by="key",
direction=direction,
tolerance=tolerance,
)
def time_by_int(self, direction, tolerance):
merge_asof(
self.df1c,
self.df2c,
on="time",
by="key2",
direction=direction,
tolerance=tolerance,
)
def time_multiby(self, direction, tolerance):
merge_asof(
self.df1e,
self.df2e,
on="time",
by=["key", "key2"],
direction=direction,
tolerance=tolerance,
)
| MergeAsof |
python | pyparsing__pyparsing | examples/adventureEngine.py | {
"start": 5740,
"end": 6265
} | class ____(Command):
def __init__(self, quals):
super().__init__("DROP", "dropping")
self.subject = quals.item
@staticmethod
def help_description():
return "DROP or LEAVE - drop an object (but fragile items may break)"
def _do_command(self, player):
rm = player.room
subj = Item.items[self.subject]
if subj in player.inv:
rm.add_item(subj)
player.drop(subj)
else:
print(f"You don't have {a_or_an(subj)}.")
| DropCommand |
python | huggingface__transformers | src/transformers/models/altclip/modeling_altclip.py | {
"start": 12450,
"end": 13356
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.self = ALT_ROBERTA_SELF_ATTENTION_CLASSES[config._attn_implementation](config)
self.output = AltRobertaSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate with Roberta->AltRoberta
| AltRobertaAttention |
python | lxml__lxml | src/lxml/tests/test_nsclasses.py | {
"start": 202,
"end": 7252
} | class ____(HelperTestCase):
class default_class(etree.ElementBase):
pass
class maeh_class(etree.ElementBase):
def maeh(self):
return 'maeh'
class bluff_class(etree.ElementBase):
def bluff(self):
return 'bluff'
def setUp(self):
super().setUp()
lookup = etree.ElementNamespaceClassLookup()
self.Namespace = lookup.get_namespace
parser = etree.XMLParser()
parser.set_element_class_lookup(lookup)
etree.set_default_parser(parser)
def tearDown(self):
etree.set_default_parser()
del self.Namespace
super().tearDown()
def test_registry(self):
ns = self.Namespace('ns01')
ns['maeh'] = self.maeh_class
self.Namespace('ns01').clear()
self.Namespace('ns02').update({'maeh' : self.maeh_class})
self.Namespace('ns03').update({'bluff' : self.bluff_class}.items())
self.Namespace('ns02').clear()
self.Namespace('ns03').clear()
def test_ns_classes(self):
bluff_dict = {'bluff' : self.bluff_class}
maeh_dict = {'maeh' : self.maeh_class}
self.Namespace('ns10').update(bluff_dict)
tree = self.parse(b'<bluff xmlns="ns10"><ns11:maeh xmlns:ns11="ns11"/></bluff>')
el = tree.getroot()
self.assertTrue(isinstance(el, etree.ElementBase))
self.assertTrue(hasattr(el, 'bluff'))
self.assertFalse(hasattr(el[0], 'maeh'))
self.assertFalse(hasattr(el[0], 'bluff'))
self.assertEqual(el.bluff(), 'bluff')
del el
gc.collect()
if IS_PYPY:
# PyPy doesn't necessarily clean up the tree immediately.
# Relax the test and use a new tree.
tree = self.parse(b'<bluff xmlns="ns10"><ns11:maeh xmlns:ns11="ns11"/></bluff>')
self.Namespace('ns11').update(maeh_dict)
el = tree.getroot()
self.assertTrue(hasattr(el, 'bluff'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.assertEqual(el.bluff(), 'bluff')
self.assertEqual(el[0].maeh(), 'maeh')
del el
gc.collect()
self.Namespace('ns10').clear()
tree = self.parse(b'<bluff xmlns="ns10"><ns11:maeh xmlns:ns11="ns11"/></bluff>')
el = tree.getroot()
self.assertFalse(hasattr(el, 'bluff'))
self.assertFalse(hasattr(el, 'maeh'))
self.assertFalse(hasattr(el[0], 'bluff'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.Namespace('ns11').clear()
def test_default_tagname(self):
bluff_dict = {
None : self.bluff_class,
'maeh' : self.maeh_class
}
ns = self.Namespace("uri:nsDefClass")
ns.update(bluff_dict)
tree = self.parse(b'''
<test xmlns="bla" xmlns:ns1="uri:nsDefClass" xmlns:ns2="uri:nsDefClass">
<ns2:el1/><ns1:el2/><ns1:maeh/><ns2:maeh/><maeh/>
</test>
''')
el = tree.getroot()
self.assertFalse(isinstance(el, etree.ElementBase))
for child in el[:-1]:
self.assertTrue(isinstance(child, etree.ElementBase), child.tag)
self.assertFalse(isinstance(el[-1], etree.ElementBase))
self.assertTrue(hasattr(el[0], 'bluff'))
self.assertTrue(hasattr(el[1], 'bluff'))
self.assertTrue(hasattr(el[2], 'maeh'))
self.assertTrue(hasattr(el[3], 'maeh'))
self.assertFalse(hasattr(el[4], 'maeh'))
del el
ns.clear()
def test_create_element(self):
bluff_dict = {'bluff' : self.bluff_class}
self.Namespace('ns20').update(bluff_dict)
maeh_dict = {'maeh' : self.maeh_class}
self.Namespace('ns21').update(maeh_dict)
el = etree.Element("{ns20}bluff")
self.assertTrue(hasattr(el, 'bluff'))
child = etree.SubElement(el, "{ns21}maeh")
self.assertTrue(hasattr(child, 'maeh'))
child = etree.SubElement(el, "{ns20}bluff")
self.assertTrue(hasattr(child, 'bluff'))
child = etree.SubElement(el, "{ns21}bluff")
self.assertFalse(hasattr(child, 'bluff'))
self.assertFalse(hasattr(child, 'maeh'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.assertTrue(hasattr(el[1], 'bluff'))
self.assertFalse(hasattr(el[2], 'bluff'))
self.assertFalse(hasattr(el[2], 'maeh'))
self.assertEqual(el.bluff(), 'bluff')
self.assertEqual(el[0].maeh(), 'maeh')
self.assertEqual(el[1].bluff(), 'bluff')
self.Namespace('ns20').clear()
self.Namespace('ns21').clear()
def test_create_element_default(self):
bluff_dict = {None : self.bluff_class}
self.Namespace('ns30').update(bluff_dict)
maeh_dict = {'maeh' : self.maeh_class}
self.Namespace(None).update(maeh_dict)
el = etree.Element("{ns30}bluff")
etree.SubElement(el, "maeh")
self.assertTrue(hasattr(el, 'bluff'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.assertEqual(el.bluff(), 'bluff')
self.assertEqual(el[0].maeh(), 'maeh')
self.Namespace(None).clear()
self.Namespace('ns30').clear()
def test_element_creation(self):
default, bluff, maeh = (
self.default_class, self.bluff_class, self.maeh_class)
class honk(etree.ElementBase):
TAG = 'HONK'
NAMESPACE = 'http://a.b/c'
el = default(
"test",
"text",
bluff(honk, "TaIL", maeh),
maeh("TeXT", bluff, honk(), "TAiL"),
"Tail")
self.assertEqual('default_class', el.tag)
self.assertEqual('testtext', el.text)
self.assertEqual(None, el.tail)
self.assertEqual(2, len(el))
self.assertEqual(7, len(list(el.iter())))
self.assertEqual('bluff_class', el[0].tag)
self.assertEqual('TaIL', el[0][0].tail)
self.assertEqual('TaIL', ''.join(el[0].itertext()))
self.assertEqual('{http://a.b/c}HONK',
el[0][0].tag)
self.assertEqual('maeh_class',
el[0][1].tag)
self.assertEqual('maeh_class', el[1].tag)
self.assertEqual('TeXT', el[1].text)
self.assertEqual('bluff_class', el[1][0].tag)
self.assertEqual('{http://a.b/c}HONK', el[1][1].tag)
self.assertEqual('TAiL', el[1][1].tail)
self.assertEqual('TeXTTAiL',
''.join(el[1].itertext()))
self.assertEqual('Tail', el[1].tail)
self.assertEqual('TAiL', el[1][1].tail)
self.assertEqual('bluff_class', el[1][0].tag)
self.assertEqual('{http://a.b/c}HONK', el[1][1].tag)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.defaultTestLoader.loadTestsFromTestCase(ETreeNamespaceClassesTestCase)])
suite.addTests(
[make_doctest('element_classes.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| ETreeNamespaceClassesTestCase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.