language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_sns.py | {
"start": 1231,
"end": 3649
} | class ____:
@pytest.fixture(autouse=True)
def _setup_test_cases(self):
self.default_op_kwargs = {
"task_id": TASK_ID,
"target_arn": TARGET_ARN,
"message": MESSAGE,
"subject": SUBJECT,
"message_attributes": MESSAGE_ATTRIBUTES,
}
def test_init(self):
op = SnsPublishOperator(**self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = SnsPublishOperator(
**self.default_op_kwargs,
aws_conn_id=AWS_CONN_ID,
region_name="us-west-1",
verify="/spam/egg.pem",
botocore_config={"read_timeout": 42},
message_deduplication_id="abc",
message_group_id="a",
)
assert op.hook.aws_conn_id == AWS_CONN_ID
assert op.hook._region_name == "us-west-1"
assert op.hook._verify == "/spam/egg.pem"
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
@mock.patch.object(SnsPublishOperator, "hook")
@pytest.mark.parametrize(
("message_deduplication_id_", "message_group_id_"),
[
("abc", "a"),
(None, None),
("abc", None),
(None, "a"),
],
)
def test_execute(self, mocked_hook, message_deduplication_id_, message_group_id_):
hook_response = {"MessageId": "foobar"}
mocked_hook.publish_to_target.return_value = hook_response
op = SnsPublishOperator(
**self.default_op_kwargs,
message_deduplication_id=message_deduplication_id_,
message_group_id=message_group_id_,
)
assert op.execute({}) == hook_response
mocked_hook.publish_to_target.assert_called_once_with(
message=MESSAGE,
message_attributes=MESSAGE_ATTRIBUTES,
subject=SUBJECT,
target_arn=TARGET_ARN,
message_deduplication_id=message_deduplication_id_,
message_group_id=message_group_id_,
)
def test_template_fields(self):
operator = SnsPublishOperator(
**self.default_op_kwargs, message_deduplication_id="abc", message_group_id="a"
)
validate_template_fields(operator)
| TestSnsPublishOperator |
python | pytest-dev__pytest | testing/test_monkeypatch.py | {
"start": 9660,
"end": 9739
} | class ____:
@staticmethod
def hello() -> bool:
return True
| Sample |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 5803,
"end": 6049
} | class ____(OAuth2Error):
"""
The request is missing a required parameter, includes an invalid
parameter value, includes a parameter more than once, or is
otherwise malformed.
"""
error = 'invalid_request'
| InvalidRequestError |
python | PyCQA__pylint | pylint/config/_pylint_config/utils.py | {
"start": 598,
"end": 3558
} | class ____(Exception):
"""Raised whenever a user input is invalid."""
def __init__(self, valid_input: str, input_value: str, *args: object) -> None:
self.valid = valid_input
self.input = input_value
super().__init__(*args)
def should_retry_after_invalid_input(
func: Callable[_P, _ReturnValueT],
) -> Callable[_P, _ReturnValueT]:
"""Decorator that handles InvalidUserInput exceptions and retries."""
def inner_function(*args: _P.args, **kwargs: _P.kwargs) -> _ReturnValueT:
called_once = False
while True:
try:
return func(*args, **kwargs)
except InvalidUserInput as exc:
if called_once and exc.input == "exit()":
print("Stopping 'pylint-config'.")
sys.exit()
print(f"Answer should be one of {exc.valid}.")
print("Type 'exit()' if you want to exit the program.")
called_once = True
return inner_function
@should_retry_after_invalid_input
def get_and_validate_format() -> Literal["toml", "ini"]:
"""Make sure that the output format is either .toml or .ini."""
# pylint: disable-next=bad-builtin
format_type = input(
"Please choose the format of configuration, (T)oml or (I)ni (.cfg): "
).lower()
if format_type not in SUPPORTED_FORMATS:
raise InvalidUserInput(", ".join(sorted(SUPPORTED_FORMATS)), format_type)
if format_type.startswith("t"):
return "toml"
return "ini"
@should_retry_after_invalid_input
def validate_yes_no(question: str, default: Literal["yes", "no"] | None) -> bool:
"""Validate that a yes or no answer is correct."""
question = f"{question} (y)es or (n)o "
if default:
question += f" (default={default}) "
# pylint: disable-next=bad-builtin
answer = input(question).lower()
if not answer and default:
answer = default
if answer not in YES_NO_ANSWERS:
raise InvalidUserInput(", ".join(sorted(YES_NO_ANSWERS)), answer)
return answer.startswith("y")
def get_minimal_setting() -> bool:
"""Ask the user if they want to use the minimal setting."""
return validate_yes_no(
"Do you want a minimal configuration without comments or default values?", "no"
)
def get_and_validate_output_file() -> tuple[bool, Path]:
"""Make sure that the output file is correct."""
to_file = validate_yes_no("Do you want to write the output to a file?", "no")
if not to_file:
return False, Path()
# pylint: disable-next=bad-builtin
file_name = Path(input("What should the file be called: "))
if file_name.exists():
overwrite = validate_yes_no(
f"{file_name} already exists. Are you sure you want to overwrite?", "no"
)
if not overwrite:
return False, file_name
return True, file_name
return True, file_name
| InvalidUserInput |
python | numpy__numpy | numpy/lib/tests/test_twodim_base.py | {
"start": 5663,
"end": 10617
} | class ____:
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
y = array(
[0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
xedges = np.linspace(0, 1, 10)
yedges = np.linspace(0, 1, 10)
H = histogram2d(x, y, (xedges, yedges))[0]
answer = array(
[[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
assert_array_equal(H, eye(10, 10))
assert_array_equal(xedges, np.linspace(0, 9, 11))
assert_array_equal(yedges, np.linspace(0, 9, 11))
def test_asym(self):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(
x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
answer = array(
[[0., 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
assert_array_almost_equal(H, answer / 8., 3)
assert_array_equal(xed, np.linspace(0, 6, 7))
assert_array_equal(yed, np.linspace(0, 5, 6))
def test_density(self):
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(
x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
answer = array([[1, 1, .5],
[1, 1, .5],
[.5, .5, .25]]) / 9.
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
assert_array_equal(H, 0)
def test_empty(self):
a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, array([[0.]]))
a, edge1, edge2 = histogram2d([], [], bins=4)
assert_array_max_ulp(a, np.zeros((4, 4)))
def test_binparameter_combination(self):
x = array(
[0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
[0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
[[2., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
[[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
def test_dispatch(self):
class ShouldDispatch:
def __array_function__(self, function, types, args, kwargs):
return types, args, kwargs
xy = [1, 2]
s_d = ShouldDispatch()
r = histogram2d(s_d, xy)
# Cannot use assert_equal since that dispatches...
assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
r = histogram2d(xy, s_d)
assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
r = histogram2d(xy, xy, bins=s_d)
assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d}))
r = histogram2d(xy, xy, bins=[s_d, 5])
assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]}))
assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
r = histogram2d(xy, xy, weights=s_d)
assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d}))
@pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)])
def test_bad_length(self, x_len, y_len):
x, y = np.ones(x_len), np.ones(y_len)
with pytest.raises(ValueError,
match='x and y must have the same length.'):
histogram2d(x, y)
| TestHistogram2d |
python | jazzband__django-polymorphic | example/orders/models.py | {
"start": 957,
"end": 1385
} | class ____(Payment):
"""
Credit card
"""
MONTH_CHOICES = [(i, n) for i, n in sorted(MONTHS_3.items())]
card_type = models.CharField(max_length=10)
expiry_month = models.PositiveSmallIntegerField(choices=MONTH_CHOICES)
expiry_year = models.PositiveIntegerField()
class Meta:
verbose_name = _("Credit Card Payment")
verbose_name_plural = _("Credit Card Payments")
| CreditCardPayment |
python | getsentry__sentry | tests/sentry/models/test_activity.py | {
"start": 490,
"end": 15634
} | class ____(TestCase):
def test_get_activities_for_group_none(self) -> None:
project = self.create_project(name="test_activities_group")
group = self.create_group(project)
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == 1
assert act_for_group[0].type == ActivityType.FIRST_SEEN.value
def test_get_activities_for_group_priority(self) -> None:
manager = EventManager(make_event(level=logging.FATAL))
project = self.create_project(name="test_activities_group")
event = manager.save(project.id)
user1 = self.create_user()
group = event.group
assert group is not None
group.refresh_from_db()
activities = [
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_PRIORITY,
user=user1,
data={"priority": PriorityLevel.LOW.to_str()},
send_notification=False,
),
]
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == 3
assert act_for_group[0] == activities[-1]
assert act_for_group[1] == activities[-2]
assert act_for_group[-1].type == ActivityType.FIRST_SEEN.value
assert act_for_group[-1].data["priority"] == PriorityLevel.HIGH.to_str()
def test_get_activities_for_group_simple_priority_ff_on_dups(self) -> None:
manager = EventManager(make_event(level=logging.FATAL))
project = self.create_project(name="test_activities_group")
event = manager.save(project.id)
user1 = self.create_user()
group = event.group
assert group is not None
group.refresh_from_db()
activities = [
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_PRIORITY,
user=user1,
data={"priority": PriorityLevel.LOW.to_str()},
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_PRIORITY,
user=user1,
data={"priority": PriorityLevel.LOW.to_str()},
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_PRIORITY,
user=user1,
data={"priority": PriorityLevel.MEDIUM.to_str()},
send_notification=False,
),
]
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == 3
assert act_for_group[0] == activities[-1]
assert act_for_group[1] == activities[-2]
assert act_for_group[-1].type == ActivityType.FIRST_SEEN.value
assert act_for_group[-1].data["priority"] == PriorityLevel.HIGH.to_str()
def test_get_activities_for_group_simple(self) -> None:
project = self.create_project(name="test_activities_group")
group = self.create_group(project)
user1 = self.create_user()
activities = [
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user1,
data=None,
send_notification=False,
),
]
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == 3
assert act_for_group[0] == activities[-1]
assert act_for_group[1] == activities[-2]
assert act_for_group[-1].type == ActivityType.FIRST_SEEN.value
def test_get_activities_for_group_collapse_same(self) -> None:
project = self.create_project(name="test_activities_group")
group = self.create_group(project)
user1 = self.create_user()
user2 = self.create_user()
user3 = self.create_user()
activities = [
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.NOTE,
user=user1,
data={"text": "text", "mentions": []},
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.NOTE,
user=user2,
data={"text": "text", "mentions": []},
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.NOTE,
user=user3,
data={"text": "text", "mentions": []},
send_notification=False,
),
]
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == 7
assert act_for_group[0] == activities[-1]
assert act_for_group[1] == activities[-2]
assert act_for_group[2] == activities[-3]
assert act_for_group[3] == activities[-4]
assert act_for_group[4] == activities[1]
assert act_for_group[5] == activities[0]
assert act_for_group[-1].type == ActivityType.FIRST_SEEN.value
def test_get_activities_for_group_flip_flop(self) -> None:
project = self.create_project(name="test_activities_group")
group = self.create_group(project)
user1 = self.create_user()
user2 = self.create_user()
user3 = self.create_user()
activities = [
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user2,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user2,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user3,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user3,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_UNRESOLVED,
user=user1,
data=None,
send_notification=False,
),
Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user1,
data=None,
send_notification=False,
),
]
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == len(activities) + 1
assert act_for_group[-1].type == ActivityType.FIRST_SEEN.value
for pair in chunked(act_for_group[:-1], 2):
assert pair[0].type == ActivityType.SET_IGNORED.value
assert pair[1].type == ActivityType.SET_UNRESOLVED.value
@patch("sentry.tasks.activity.send_activity_notifications.delay")
def test_skips_status_change_notifications_if_disabled(
self, mock_send_activity_notifications: MagicMock
) -> None:
project = self.create_project(name="test_activities_group")
group = self.create_group(project)
# Create an activity that would normally trigger a notification
activity = Activity.objects.create_group_activity(
group=group, type=ActivityType.SET_UNRESOLVED, data=None, send_notification=True
)
mock_send_activity_notifications.assert_called_once_with(activity.id)
mock_send_activity_notifications.reset_mock()
group.type = MetricIssue.type_id
group.save()
# Mock the MetricIssue to disable status change notifications
with patch.object(MetricIssue, "enable_status_change_workflow_notifications", False):
_ = Activity.objects.create_group_activity(
group=group, type=ActivityType.SET_RESOLVED, data=None, send_notification=True
)
mock_send_activity_notifications.assert_not_called()
@patch("sentry.tasks.activity.send_activity_notifications.delay")
def test_skips_workflow_notifications_if_disabled(
self, mock_send_activity_notifications: MagicMock
) -> None:
project = self.create_project(name="test_activities_group")
group = self.create_group(project)
# Create an assignment activity that would normally trigger a notification
activity = Activity.objects.create_group_activity(
group=group,
type=ActivityType.ASSIGNED,
data={"assignee": self.user},
send_notification=True,
)
mock_send_activity_notifications.assert_called_once_with(activity.id)
mock_send_activity_notifications.reset_mock()
group.type = MetricIssue.type_id
group.save()
# Mock the MetricIssue to disable workflow notifications
with patch.object(MetricIssue, "enable_workflow_notifications", False):
_ = Activity.objects.create_group_activity(
group=group,
type=ActivityType.ASSIGNED,
data={"assignee": self.user},
send_notification=True,
)
mock_send_activity_notifications.assert_not_called()
def test_create_group_activity_with_custom_datetime(self) -> None:
project = self.create_project(name="test_custom_datetime")
group = self.create_group(project)
user = self.create_user()
custom_datetime = datetime(2024, 1, 15, 10, 30, 0, tzinfo=timezone.utc)
activity = Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_RESOLVED,
user=user,
data={"reason": "test"},
send_notification=False,
datetime=custom_datetime,
)
assert activity.datetime == custom_datetime
assert activity.type == ActivityType.SET_RESOLVED.value
assert activity.user_id == user.id
def test_create_group_activity_without_custom_datetime(self) -> None:
project = self.create_project(name="test_default_datetime")
group = self.create_group(project)
user = self.create_user()
before = datetime.now(timezone.utc)
activity = Activity.objects.create_group_activity(
group=group,
type=ActivityType.SET_IGNORED,
user=user,
send_notification=False,
)
after = datetime.now(timezone.utc)
assert before <= activity.datetime <= after
| ActivityTest |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 22271,
"end": 22392
} | class ____(AbstractExternal2):
name = models.CharField(max_length=15, unique=True)
| OverrideModelNameUsingExternalModel1 |
python | pytorch__pytorch | torch/distributed/_composable/checkpoint_activation.py | {
"start": 879,
"end": 4801
} | class ____(_State):
enable_hook: bool = False
_ac_generator: Optional[Generator[None, None, None]]
@contract(_CheckpointState)
def checkpoint(module: nn.Module, **kwargs) -> nn.Module:
r"""
This is a composable activation checkpointing API. Unlike functional
activation checkpointing APIs, this one does not require changing model
source code. Unlike ``nn.Module`` wrapper activation checkpointing APIs,
this one does not modify model structure or fully-qualified names either.
Under the hood, it registers activation checkpointing logic as pre- and
post-forward hooks. Hence, this API can be easily applied to any model or
sub-modules in the model.
Args:
module (nn.Module): the target model or sub-module to apply activation
checkpointing.
Example::
>>> # xdoctest: +SKIP
>>> import torch.nn as nn
>>>
>>> class MyModel(nn.Module):
>>> def __init__(self) -> None:
>>> super().__init__()
>>> self.l1 = nn.Linear(10, 10)
>>> self.l2 = nn.Linear(10, 10)
>>>
>>> def forward(self, x):
>>> return self.l2(self.l1(x))
>>>
>>> model = MyModel()
>>> checkpoint(model.l1) # apply activation checkpointing only to l1
>>> model(torch.zeros(2, 10)).sum().backward()
"""
torch._C._log_api_usage_once("torch.distributed.checkpoint")
use_reentrant = kwargs.pop("use_reentrant", False)
if use_reentrant:
raise NotImplementedError(
"use_reentrant=True is not supported in composable checkpoint. "
"Please use torch.utils.checkpoint.checkpoint instead."
)
preserve_rng_state = kwargs.pop("preserve_rng_state", True)
user_context_fns = kwargs.pop("context_fn", None)
determinism_check = kwargs.pop("determinism_check", _DEFAULT_DETERMINISM_MODE)
debug = kwargs.pop("debug", False)
early_stop = kwargs.pop("early_stop", True)
if kwargs:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
def forward_pre_hook(
module: nn.Module, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> None:
if checkpoint.state(module).enable_hook:
def context_fns():
if user_context_fns is not None:
ctx1, ctx2 = user_context_fns()
return ctx1, _no_hook(module, ctx2)
else:
return nullcontext(), _no_hook(module)
gen = _checkpoint_without_reentrant_generator(
module,
preserve_rng_state,
context_fns,
determinism_check,
debug,
early_stop,
*args,
**kwargs,
)
checkpoint.state(module)._ac_generator = gen
next(gen)
def forward_hook(module: nn.Module, inputs: tuple[Any, ...], output: Any) -> Any:
if checkpoint.state(module).enable_hook:
try:
gen = checkpoint.state(module)._ac_generator
assert gen is not None
next(gen)
except StopIteration:
pass
else:
raise RuntimeError(
"Expected non-reentrant activation checkpoint generator to be exhausted, but it was not!"
)
# Ensure that we no longer hold on to the generator. always_call=True helps ensure we
# clear this even in the case of exception in fwd pass.
checkpoint.state(module)._ac_generator = None
checkpoint.state(module).enable_hook = True
module.register_forward_pre_hook(forward_pre_hook, with_kwargs=True)
module.register_forward_hook(forward_hook, prepend=True, always_call=True)
return module
| _CheckpointState |
python | pydantic__pydantic | pydantic/v1/main.py | {
"start": 3641,
"end": 13188
} | class ____(ABCMeta):
@no_type_check # noqa C901
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901
fields: Dict[str, ModelField] = {}
config = BaseConfig
validators: 'ValidatorListDict' = {}
pre_root_validators, post_root_validators = [], []
private_attributes: Dict[str, ModelPrivateAttr] = {}
base_private_attributes: Dict[str, ModelPrivateAttr] = {}
slots: SetStr = namespace.get('__slots__', ())
slots = {slots} if isinstance(slots, str) else set(slots)
class_vars: SetStr = set()
hash_func: Optional[Callable[[Any], int]] = None
for base in reversed(bases):
if _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel:
fields.update(smart_deepcopy(base.__fields__))
config = inherit_config(base.__config__, config)
validators = inherit_validators(base.__validators__, validators)
pre_root_validators += base.__pre_root_validators__
post_root_validators += base.__post_root_validators__
base_private_attributes.update(base.__private_attributes__)
class_vars.update(base.__class_vars__)
hash_func = base.__hash__
resolve_forward_refs = kwargs.pop('__resolve_forward_refs__', True)
allowed_config_kwargs: SetStr = {
key
for key in dir(config)
if not (key.startswith('__') and key.endswith('__')) # skip dunder methods and attributes
}
config_kwargs = {key: kwargs.pop(key) for key in kwargs.keys() & allowed_config_kwargs}
config_from_namespace = namespace.get('Config')
if config_kwargs and config_from_namespace:
raise TypeError('Specifying config in two places is ambiguous, use either Config attribute or class kwargs')
config = inherit_config(config_from_namespace, config, **config_kwargs)
validators = inherit_validators(extract_validators(namespace), validators)
vg = ValidatorGroup(validators)
for f in fields.values():
f.set_config(config)
extra_validators = vg.get_validators(f.name)
if extra_validators:
f.class_validators.update(extra_validators)
# re-run prepare to add extra validators
f.populate_validators()
prepare_config(config, name)
untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES
def is_untouched(v: Any) -> bool:
return isinstance(v, untouched_types) or v.__class__.__name__ == 'cython_function_or_method'
if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'):
annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None))
# annotation only fields need to come first in fields
for ann_name, ann_type in annotations.items():
if is_classvar(ann_type):
class_vars.add(ann_name)
elif is_finalvar_with_default_val(ann_type, namespace.get(ann_name, Undefined)):
class_vars.add(ann_name)
elif is_valid_field(ann_name):
validate_field_name(bases, ann_name)
value = namespace.get(ann_name, Undefined)
allowed_types = get_args(ann_type) if is_union(get_origin(ann_type)) else (ann_type,)
if (
is_untouched(value)
and ann_type != PyObject
and not any(
lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types
)
):
continue
fields[ann_name] = ModelField.infer(
name=ann_name,
value=value,
annotation=ann_type,
class_validators=vg.get_validators(ann_name),
config=config,
)
elif ann_name not in namespace and config.underscore_attrs_are_private:
private_attributes[ann_name] = PrivateAttr()
untouched_types = UNTOUCHED_TYPES + config.keep_untouched
for var_name, value in namespace.items():
can_be_changed = var_name not in class_vars and not is_untouched(value)
if isinstance(value, ModelPrivateAttr):
if not is_valid_private_name(var_name):
raise NameError(
f'Private attributes "{var_name}" must not be a valid field name; '
f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"'
)
private_attributes[var_name] = value
elif config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed:
private_attributes[var_name] = PrivateAttr(default=value)
elif is_valid_field(var_name) and var_name not in annotations and can_be_changed:
validate_field_name(bases, var_name)
inferred = ModelField.infer(
name=var_name,
value=value,
annotation=annotations.get(var_name, Undefined),
class_validators=vg.get_validators(var_name),
config=config,
)
if var_name in fields:
if lenient_issubclass(inferred.type_, fields[var_name].type_):
inferred.type_ = fields[var_name].type_
else:
raise TypeError(
f'The type of {name}.{var_name} differs from the new default value; '
f'if you wish to change the type of this field, please use a type annotation'
)
fields[var_name] = inferred
_custom_root_type = ROOT_KEY in fields
if _custom_root_type:
validate_custom_root_type(fields)
vg.check_for_unused()
if config.json_encoders:
json_encoder = partial(custom_pydantic_encoder, config.json_encoders)
else:
json_encoder = pydantic_encoder
pre_rv_new, post_rv_new = extract_root_validators(namespace)
if hash_func is None:
hash_func = generate_hash_function(config.frozen)
exclude_from_namespace = fields | private_attributes.keys() | {'__slots__'}
new_namespace = {
'__config__': config,
'__fields__': fields,
'__exclude_fields__': {
name: field.field_info.exclude for name, field in fields.items() if field.field_info.exclude is not None
}
or None,
'__include_fields__': {
name: field.field_info.include for name, field in fields.items() if field.field_info.include is not None
}
or None,
'__validators__': vg.validators,
'__pre_root_validators__': unique_list(
pre_root_validators + pre_rv_new,
name_factory=lambda v: v.__name__,
),
'__post_root_validators__': unique_list(
post_root_validators + post_rv_new,
name_factory=lambda skip_on_failure_and_v: skip_on_failure_and_v[1].__name__,
),
'__schema_cache__': {},
'__json_encoder__': staticmethod(json_encoder),
'__custom_root_type__': _custom_root_type,
'__private_attributes__': {**base_private_attributes, **private_attributes},
'__slots__': slots | private_attributes.keys(),
'__hash__': hash_func,
'__class_vars__': class_vars,
**{n: v for n, v in namespace.items() if n not in exclude_from_namespace},
}
cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)
# set __signature__ attr only for model class, but not for its instances
cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config))
if not _is_base_model_class_defined:
# Cython does not understand the `if TYPE_CHECKING:` condition in the
# BaseModel's body (where annotations are set), so clear them manually:
getattr(cls, '__annotations__', {}).clear()
if resolve_forward_refs:
cls.__try_update_forward_refs__()
# preserve `__set_name__` protocol defined in https://peps.python.org/pep-0487
# for attributes not in `new_namespace` (e.g. private attributes)
for name, obj in namespace.items():
if name not in new_namespace:
set_name = getattr(obj, '__set_name__', None)
if callable(set_name):
set_name(cls, name)
return cls
def __instancecheck__(self, instance: Any) -> bool:
"""
Avoid calling ABC _abc_subclasscheck unless we're pretty sure.
See #3829 and python/cpython#92810
"""
return hasattr(instance, '__post_root_validators__') and super().__instancecheck__(instance)
object_setattr = object.__setattr__
| ModelMetaclass |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 32488,
"end": 43753
} | class ____(IR):
"""Sink a dataframe to a file."""
__slots__ = ("cloud_options", "kind", "options", "parquet_options", "path")
_non_child = (
"schema",
"kind",
"path",
"parquet_options",
"options",
"cloud_options",
)
kind: str
"""The type of file to write to. Eg. Parquet, CSV, etc."""
path: str
"""The path to write to"""
parquet_options: ParquetOptions
"""GPU-specific configuration options"""
cloud_options: dict[str, Any] | None
"""Cloud-related authentication options, currently ignored."""
options: dict[str, Any]
"""Sink options from Polars"""
def __init__(
self,
schema: Schema,
kind: str,
path: str,
parquet_options: ParquetOptions,
options: dict[str, Any],
cloud_options: dict[str, Any],
df: IR,
):
self.schema = schema
self.kind = kind
self.path = path
self.parquet_options = parquet_options
self.options = options
self.cloud_options = cloud_options
self.children = (df,)
self._non_child_args = (schema, kind, path, parquet_options, options)
if self.cloud_options is not None and any(
self.cloud_options.get(k) is not None
for k in ("config", "credential_provider")
):
raise NotImplementedError(
"Write to cloud storage"
) # pragma: no cover; no test yet
sync_on_close = options.get("sync_on_close")
if sync_on_close not in {"None", None}:
raise NotImplementedError(
f"sync_on_close='{sync_on_close}' is not supported."
) # pragma: no cover; no test yet
child_schema = df.schema.values()
if kind == "Csv":
if not all(
plc.io.csv.is_supported_write_csv(dtype.plc_type)
for dtype in child_schema
):
# Nested types are unsupported in polars and libcudf
raise NotImplementedError(
"Contains unsupported types for CSV writing"
) # pragma: no cover
serialize = options["serialize_options"]
if options["include_bom"]:
raise NotImplementedError("include_bom is not supported.")
for key in (
"date_format",
"time_format",
"datetime_format",
"float_scientific",
"float_precision",
):
if serialize[key] is not None:
raise NotImplementedError(f"{key} is not supported.")
if serialize["quote_style"] != "Necessary":
raise NotImplementedError("Only quote_style='Necessary' is supported.")
if chr(serialize["quote_char"]) != '"':
raise NotImplementedError("Only quote_char='\"' is supported.")
elif kind == "Parquet":
compression = options["compression"]
if isinstance(compression, dict):
if len(compression) != 1:
raise NotImplementedError(
"Compression dict with more than one entry."
) # pragma: no cover
compression, compression_level = next(iter(compression.items()))
options["compression"] = compression
if compression_level is not None:
raise NotImplementedError(
"Setting compression_level is not supported."
)
if compression == "Lz4Raw":
compression = "Lz4"
options["compression"] = compression
if (
compression != "Uncompressed"
and not plc.io.parquet.is_supported_write_parquet(
getattr(plc.io.types.CompressionType, compression.upper())
)
):
raise NotImplementedError(
f"Compression type '{compression}' is not supported."
)
elif (
kind == "Json"
): # pragma: no cover; options are validated on the polars side
if not all(
plc.io.json.is_supported_write_json(dtype.plc_type)
for dtype in child_schema
):
# Nested types are unsupported in polars and libcudf
raise NotImplementedError(
"Contains unsupported types for JSON writing"
) # pragma: no cover
shared_writer_options = {"sync_on_close", "maintain_order", "mkdir"}
if set(options) - shared_writer_options:
raise NotImplementedError("Unsupported options passed JSON writer.")
else:
raise NotImplementedError(
f"Unhandled sink kind: {kind}"
) # pragma: no cover
def get_hashable(self) -> Hashable:
"""
Hashable representation of the node.
The option dictionary is serialised for hashing purposes.
"""
schema_hash = tuple(self.schema.items()) # pragma: no cover
return (
type(self),
schema_hash,
self.kind,
self.path,
self.parquet_options,
json.dumps(self.options),
json.dumps(self.cloud_options),
) # pragma: no cover
@classmethod
def _write_csv(
cls, target: plc.io.SinkInfo, options: dict[str, Any], df: DataFrame
) -> None:
"""Write CSV data to a sink."""
serialize = options["serialize_options"]
csv_writer_options = (
plc.io.csv.CsvWriterOptions.builder(target, df.table)
.include_header(options["include_header"])
.names(df.column_names if options["include_header"] else [])
.na_rep(serialize["null"])
.line_terminator(serialize["line_terminator"])
.inter_column_delimiter(chr(serialize["separator"]))
.build()
)
plc.io.csv.write_csv(csv_writer_options, stream=df.stream)
@classmethod
def _write_json(cls, target: plc.io.SinkInfo, df: DataFrame) -> None:
"""Write Json data to a sink."""
metadata = plc.io.TableWithMetadata(
df.table, [(col, []) for col in df.column_names]
)
options = (
plc.io.json.JsonWriterOptions.builder(target, df.table)
.lines(val=True)
.na_rep("null")
.include_nulls(val=True)
.metadata(metadata)
.utf8_escaped(val=False)
.build()
)
plc.io.json.write_json(options, stream=df.stream)
@staticmethod
def _make_parquet_metadata(df: DataFrame) -> plc.io.types.TableInputMetadata:
"""Create TableInputMetadata and set column names."""
metadata = plc.io.types.TableInputMetadata(df.table)
for i, name in enumerate(df.column_names):
metadata.column_metadata[i].set_name(name)
return metadata
@overload
@staticmethod
def _apply_parquet_writer_options(
builder: plc.io.parquet.ChunkedParquetWriterOptionsBuilder,
options: dict[str, Any],
) -> plc.io.parquet.ChunkedParquetWriterOptionsBuilder: ...
@overload
@staticmethod
def _apply_parquet_writer_options(
builder: plc.io.parquet.ParquetWriterOptionsBuilder,
options: dict[str, Any],
) -> plc.io.parquet.ParquetWriterOptionsBuilder: ...
@staticmethod
def _apply_parquet_writer_options(
builder: plc.io.parquet.ChunkedParquetWriterOptionsBuilder
| plc.io.parquet.ParquetWriterOptionsBuilder,
options: dict[str, Any],
) -> (
plc.io.parquet.ChunkedParquetWriterOptionsBuilder
| plc.io.parquet.ParquetWriterOptionsBuilder
):
"""Apply writer options to the builder."""
compression = options.get("compression")
if compression and compression != "Uncompressed":
compression_type = getattr(
plc.io.types.CompressionType, compression.upper()
)
builder = builder.compression(compression_type)
if (data_page_size := options.get("data_page_size")) is not None:
builder = builder.max_page_size_bytes(data_page_size)
if (row_group_size := options.get("row_group_size")) is not None:
builder = builder.row_group_size_rows(row_group_size)
return builder
@classmethod
def _write_parquet(
cls,
target: plc.io.SinkInfo,
parquet_options: ParquetOptions,
options: dict[str, Any],
df: DataFrame,
) -> None:
metadata: plc.io.types.TableInputMetadata = cls._make_parquet_metadata(df)
builder: (
plc.io.parquet.ChunkedParquetWriterOptionsBuilder
| plc.io.parquet.ParquetWriterOptionsBuilder
)
if (
parquet_options.chunked
and parquet_options.n_output_chunks != 1
and df.table.num_rows() != 0
):
chunked_builder = plc.io.parquet.ChunkedParquetWriterOptions.builder(
target
).metadata(metadata)
chunked_builder = cls._apply_parquet_writer_options(
chunked_builder, options
)
chunked_writer_options = chunked_builder.build()
writer = plc.io.parquet.ChunkedParquetWriter.from_options(
chunked_writer_options, stream=df.stream
)
# TODO: Can be based on a heuristic that estimates chunk size
# from the input table size and available GPU memory.
num_chunks = parquet_options.n_output_chunks
table_chunks = plc.copying.split(
df.table,
[i * df.table.num_rows() // num_chunks for i in range(1, num_chunks)],
stream=df.stream,
)
for chunk in table_chunks:
writer.write(chunk)
writer.close([])
else:
builder = plc.io.parquet.ParquetWriterOptions.builder(
target, df.table
).metadata(metadata)
builder = cls._apply_parquet_writer_options(builder, options)
writer_options = builder.build()
plc.io.parquet.write_parquet(writer_options, stream=df.stream)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Sink")
def do_evaluate(
cls,
schema: Schema,
kind: str,
path: str,
parquet_options: ParquetOptions,
options: dict[str, Any],
df: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Write the dataframe to a file."""
target = plc.io.SinkInfo([path])
if options.get("mkdir", False):
Path(path).parent.mkdir(parents=True, exist_ok=True)
if kind == "Csv":
cls._write_csv(target, options, df)
elif kind == "Parquet":
cls._write_parquet(target, parquet_options, options, df)
elif kind == "Json":
cls._write_json(target, df)
return DataFrame([], stream=df.stream)
| Sink |
python | huggingface__transformers | src/transformers/models/hiera/modeling_hiera.py | {
"start": 19044,
"end": 21348
} | class ____(nn.Module):
def __init__(
self,
config,
hidden_size: int,
hidden_size_output: int,
num_heads: int,
drop_path: float = 0.0,
query_stride: int = 1,
window_size: int = 0,
use_mask_unit_attn: bool = False,
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.hidden_size_output = hidden_size_output
self.query_stride = query_stride
self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.attn = HieraMaskUnitAttention(
hidden_size=hidden_size,
hidden_size_output=hidden_size_output,
num_heads=num_heads,
query_stride=query_stride,
window_size=window_size,
use_mask_unit_attn=use_mask_unit_attn,
)
self.layernorm_after = nn.LayerNorm(hidden_size_output, eps=config.layer_norm_eps)
self.mlp = HieraMlp(config, hidden_size_output)
self.drop_path = HieraDropPath(drop_path) if drop_path > 0 else nn.Identity()
if hidden_size != hidden_size_output:
self.proj = nn.Linear(hidden_size, hidden_size_output)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
batch_size, seq_len, _ = hidden_states.shape
# Attention + Q Pooling
hidden_states_norm = self.layernorm_before(hidden_states)
if self.hidden_size != self.hidden_size_output:
hidden_states = self.proj(hidden_states_norm)
# Refer to unroll to see how this performs a maxpool-Nd
hidden_states = (
hidden_states.view(batch_size, self.query_stride, -1, self.hidden_size_output).max(dim=1).values
)
(hidden_states_norm, attn_weights) = self.attn(hidden_states_norm, output_attentions=output_attentions)
hidden_states = hidden_states + self.drop_path(hidden_states_norm)
residual = hidden_states
hidden_states = self.layernorm_after(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.drop_path(hidden_states)
return (hidden_states, attn_weights)
| HieraLayer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/dataprep.py | {
"start": 1188,
"end": 1969
} | class ____(BaseSensorOperator):
"""
Check the status of the Dataprep task to be finished.
:param job_group_id: ID of the job group to check
"""
template_fields: Sequence[str] = ("job_group_id",)
def __init__(
self,
*,
job_group_id: int | str,
dataprep_conn_id: str = "dataprep_default",
**kwargs,
):
super().__init__(**kwargs)
self.job_group_id = job_group_id
self.dataprep_conn_id = dataprep_conn_id
def poke(self, context: Context) -> bool:
hooks = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
status = hooks.get_job_group_status(job_group_id=int(self.job_group_id))
return status != JobGroupStatuses.IN_PROGRESS
| DataprepJobGroupIsFinishedSensor |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 30293,
"end": 30395
} | class ____(float, AnsibleTaggedObject):
__slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
| _AnsibleTaggedFloat |
python | ApeWorX__ape | src/ape/managers/networks.py | {
"start": 5165,
"end": 36400
} | class ____(BaseManager, ExtraAttributesMixin):
"""
The set of all blockchain network ecosystems registered from the plugin system.
Typically, you set the provider via the ``--network`` command line option.
However, use this singleton for more granular access to networks.
Usage example::
from ape import networks
# "networks" is the NetworkManager singleton
with networks.ethereum.mainnet.use_provider("node"):
...
"""
_active_provider: Optional["ProviderAPI"] = None
_default_ecosystem_name: Optional[str] = None
# For adhoc adding custom networks, or incorporating some defined
# in other projects' configs.
_custom_networks: list[dict] = []
@log_instead_of_fail(default="<NetworkManager>")
def __repr__(self) -> str:
provider = self.active_provider
class_name = NetworkManager.__name__
content = f"{class_name} active_provider={provider!r}" if provider else class_name
return f"<{content}>"
@property
def active_provider(self) -> Optional["ProviderAPI"]:
"""
The currently connected provider if one exists. Otherwise, returns ``None``.
"""
return self._active_provider
@active_provider.setter
def active_provider(self, new_value: "ProviderAPI"):
self._active_provider = new_value
@property
def connected(self) -> bool:
"""
``True`` when there is an active provider.
"""
return self.active_provider is not None
@property
def network(self) -> "NetworkAPI":
"""
The current network if connected to one.
Raises:
:class:`~ape.exceptions.ProviderNotConnectedError`: When there is
no active provider at runtime.
Returns:
:class:`~ape.api.networks.NetworkAPI`
"""
return self.provider.network
@property
def ecosystem(self) -> "EcosystemAPI":
"""
The current ecosystem if connected to one.
Raises:
:class:`~ape.exceptions.ProviderNotConnectedError`: When there is
no active provider at runtime.
Returns:
:class:`~ape.api.providers.ProviderAPI`
"""
return self.network.ecosystem
@cached_property
def running_nodes(self) -> NodeProcessMap:
"""
All running development nodes managed by Ape.
"""
path = self.config_manager.DATA_FOLDER / "processes" / "nodes.json"
try:
return NodeProcessMap.model_validate_file(path)
except ValidationError:
path.unlink(missing_ok=True)
return NodeProcessMap.model_validate_file(path)
def get_running_node(self, pid: int) -> "SubprocessProvider":
"""
Get a running subprocess provider for the given ``pid``.
Args:
pid (int): The process ID.
Returns:
class:`~ape.api.providers.SubprocessProvider`
"""
if not (data := self.running_nodes.get(pid)):
raise NetworkError(f"No running node for pid '{pid}'.")
uri: Optional[Union[str, Path]] = None
if ipc := data.ipc_path:
if ipc.exists():
uri = ipc
else:
uri = data.http_uri or data.ws_uri
if uri is None:
NetworkError(f"Cannot connect to node on PID '{pid}': Missing URI data.")
# In this case, we want the more connectable network choice.
network_parts = data.network_choice.split(":")
network_choice = f"{':'.join(network_parts[:2])}:{uri}"
provider_settings: dict = {
network_parts[0]: {
network_parts[1]: {
"ipc_path": data.ipc_path,
"http_uri": data.http_uri,
"ws_uri": data.ws_uri,
"uri": None,
}
}
}
provider = self.get_provider_from_choice(
network_choice=network_choice, provider_settings=provider_settings or None
)
# If this is not a subprocess provider, it may be ok to proceed.
# However, the rest of Ape will assume it is.
return provider # type: ignore[return-value]
def kill_node_process(self, *process_ids: int) -> dict[int, NodeProcessData]:
"""
Kill a node process managed by Ape.
Args:
*process_ids (int): The process ID to kill.
Returns:
dict[str, :class:`~ape.managers.networks.NodeProcessData`]: The process data
of all terminated processes.
"""
if not self.running_nodes:
return {}
pids_killed = {}
for pid in process_ids:
if not (data := self.running_nodes.nodes.get(pid)):
continue
try:
provider = self.get_running_node(pid)
except Exception:
# Still try to kill the process (below).
pass
else:
# Gracefully disconnect _before_ killing process.
provider.disconnect()
try:
os.kill(pid, signal.SIGTERM)
except Exception:
pass
else:
pids_killed[pid] = data
self.running_nodes.remove_processes(*process_ids)
return pids_killed
def get_request_headers(
self, ecosystem_name: str, network_name: str, provider_name: str
) -> "RPCHeaders":
"""
All request headers to be used when connecting to this network.
"""
ecosystem = self.get_ecosystem(ecosystem_name)
network = ecosystem.get_network(network_name)
provider = network.get_provider(provider_name)
headers = self.config_manager._get_request_headers()
for obj in (ecosystem, network, provider):
for key, value in obj._get_request_headers().items():
headers[key] = value
return headers
def fork(
self,
provider_name: Optional[str] = None,
provider_settings: Optional[dict] = None,
block_number: Optional[int] = None,
) -> ProviderContextManager:
"""
Fork the currently connected network.
Args:
provider_name (str, optional): The name of the provider to get. Defaults to ``None``.
When ``None``, returns the default provider.
provider_settings (dict, optional): Settings to apply to the provider. Defaults to
``None``.
block_number (Optional[int]): Optionally specify the block number you wish to fork.
Negative block numbers are relative to HEAD. Defaults to the configured fork
block number or HEAD.
Returns:
:class:`~ape.api.networks.ProviderContextManager`
"""
network_name = self.network.name
is_fork_already = network_name.endswith("-fork")
forked_network_name = network_name if is_fork_already else f"{network_name}-fork"
try:
forked_network = self.ecosystem.get_network(forked_network_name)
except NetworkNotFoundError as err:
raise NetworkError(f"Unable to fork network '{network_name}'.") from err
provider_settings = provider_settings or {}
if is_fork_already and "host" not in provider_settings:
# Forking a fork- to ensure is using a different Port,
# use the "auto-port" feature.
provider_settings["host"] = "auto"
fork_settings = {}
if block_number is not None:
# Negative block_number means relative to HEAD
if block_number < 0:
latest_block_number = self.provider.get_block("latest").number or 0
block_number = latest_block_number + block_number
if block_number < 0:
# If the block number is still negative, they have forked past genesis.
raise NetworkError("Unable to fork past genesis block.")
# Ensure block_number is set in config for this network
fork_settings["block_number"] = block_number
if uri := self.provider.connection_str:
fork_settings["upstream_provider"] = uri
_dict_overlay(
provider_settings,
{"fork": {self.ecosystem.name: {self.network.name: fork_settings}}},
)
shared_kwargs: dict = {
"provider_settings": provider_settings,
"disconnect_after": True,
}
return (
forked_network.use_provider(provider_name, **shared_kwargs)
if provider_name
else forked_network.use_default_provider(**shared_kwargs)
)
@property
def ecosystem_names(self) -> set[str]:
"""
The set of all ecosystem names in ``ape``.
"""
return set(self.ecosystems)
@property
def network_names(self) -> set[str]:
"""
The set of all network names in ``ape``.
"""
return {n for e in self.ecosystems.values() for n in e.networks}
@property
def provider_names(self) -> set[str]:
"""
The set of all provider names in ``ape``.
"""
return set(
provider
for ecosystem in self.ecosystems.values()
for network in ecosystem.networks.values()
for provider in network.providers
)
@property
def custom_networks(self) -> list[dict]:
"""
Custom network data defined in various ape-config files
or added adhoc to the network manager.
"""
return [*self._custom_networks_from_config, *self._custom_networks]
@cached_property
def _custom_networks_from_config(self) -> list[dict]:
return [
n.model_dump(by_alias=True)
for n in self.config_manager.get_config("networks").get("custom", [])
]
@property
def ecosystems(self) -> dict[str, "EcosystemAPI"]:
"""
All the registered ecosystems in ``ape``, such as ``ethereum``.
"""
return {
**self._evmchains_ecosystems,
**self._plugin_ecosystems,
**self._custom_ecosystems,
}
@cached_property
def _plugin_ecosystems(self) -> dict[str, "EcosystemAPI"]:
# Load plugins (possibly for first time).
plugins: list[tuple] = self.plugin_manager.ecosystems
return {n: cls(name=n) for n, cls in plugins}
@cached_property
def _custom_ecosystems(self) -> dict[str, "EcosystemAPI"]:
custom_networks: list = self.custom_networks
plugin_ecosystems = self._plugin_ecosystems
evm_chains = self._evmchains_ecosystems
custom_ecosystems: dict[str, EcosystemAPI] = {}
for custom_network in custom_networks:
ecosystem_name = custom_network["ecosystem"]
if (
ecosystem_name in plugin_ecosystems
or ecosystem_name in evm_chains
or ecosystem_name in custom_ecosystems
):
# Already included in a prior network.
continue
base_ecosystem_name = (
custom_network.get("base_ecosystem_plugin") or self.default_ecosystem_name
)
if base_ecosystem_name not in plugin_ecosystems:
name = custom_network.get("name", "?")
if eco := custom_network.get("ecosystem"):
name = f"{eco}:{name}"
msg = (
f"Custom network '{name}' specified unknown base-ecosystem class "
f"'{base_ecosystem_name}'. Are you missing plugin 'ape-{base_ecosystem_name}'?"
)
raise NetworkError(msg)
existing_cls = plugin_ecosystems[base_ecosystem_name]
ecosystem_cls = existing_cls.model_copy(
update={"name": ecosystem_name},
cache_clear=("_networks_from_plugins", "_networks_from_evmchains"),
)
custom_ecosystems[ecosystem_name] = ecosystem_cls
return custom_ecosystems
@cached_property
def _evmchains_ecosystems(self) -> dict[str, "EcosystemAPI"]:
ecosystems: dict[str, EcosystemAPI] = {}
for name in PUBLIC_CHAIN_META:
ecosystem_name = name.lower().replace(" ", "-")
symbol = None
for net in PUBLIC_CHAIN_META[ecosystem_name].values():
if not (native_currency := net.get("nativeCurrency")):
continue
if "symbol" not in native_currency:
continue
symbol = native_currency["symbol"]
break
symbol = symbol or "ETH"
# Is an EVM chain, can automatically make a class using evm-chains.
evm_class = self._plugin_ecosystems["ethereum"].__class__
ecosystems[name] = evm_class(name=ecosystem_name, fee_token_symbol=symbol)
return ecosystems
def create_custom_provider(
self,
connection_str: str,
provider_cls: type["ProviderAPI"] = EthereumNodeProvider,
provider_name: Optional[str] = None,
) -> "ProviderAPI":
"""
Create a custom connection to a URI using the EthereumNodeProvider provider.
**NOTE**: This provider will assume EVM-like behavior and this is generally not recommended.
Use plugins when possible!
Args:
connection_str (str): The connection string of the node, such as its URI
when using HTTP.
provider_cls (type[:class:`~ape.api.providers.ProviderAPI`]): Defaults to
:class:`~ape_ethereum.providers.EthereumNodeProvider`.
provider_name (Optional[str]): The name of the provider. Defaults to best guess.
Returns:
:class:`~ape.api.providers.ProviderAPI`: The Geth provider
implementation that comes with Ape.
"""
network = self.ethereum.custom_network
if provider_name is None:
if issubclass(provider_cls, EthereumNodeProvider):
name = "node"
elif cls_name := getattr(provider_cls, "name", None):
name = cls_name
elif cls_name := provider_cls.__name__:
name = cls_name.lower()
else:
# Would be unusual for this to happen though.
name = "provider"
else:
name = provider_name
provider_settings: dict = {}
if connection_str.startswith("https://") or connection_str.startswith("http://"):
provider_settings["uri"] = connection_str
elif connection_str.endswith(".ipc"):
provider_settings["ipc_path"] = connection_str
else:
raise NetworkError(f"Scheme for '{connection_str}' not yet supported.")
return (provider_cls or EthereumNodeProvider)(
name=name,
network=network,
provider_settings=provider_settings,
data_folder=self.ethereum.data_folder / name,
)
def __iter__(self) -> Iterator[str]:
"""
All the managed ecosystems in ``ape``, as an iterable.
Returns:
Iterator[:class:`~ape.api.networks.EcosystemAPI`]
"""
yield from self.ecosystems
def __ape_extra_attributes__(self) -> Iterator[ExtraModelAttributes]:
yield ExtraModelAttributes(
name="ecosystems",
attributes=lambda: self.ecosystems,
include_getitem=True,
)
@only_raise_attribute_error
def __getattr__(self, attr_name: str) -> "EcosystemAPI":
"""
Get an ecosystem via ``.`` access.
Args:
attr_name (str): The name of the ecosystem.
Returns:
:class:`~ape.api.networks.EcosystemAPI`
Usage example::
eth = networks.ethereum
"""
return get_attribute_with_extras(self, attr_name)
def get_network_choices(
self,
ecosystem_filter: Optional[Union[list[str], str]] = None,
network_filter: Optional[Union[list[str], str]] = None,
provider_filter: Optional[Union[list[str], str]] = None,
) -> Iterator[str]:
"""
The set of all possible network choices available as a "network selection"
e.g. ``--network [ECOSYSTEM:NETWORK:PROVIDER]``.
Each value is in the form ``ecosystem:network:provider`` and shortened options also
appear in the list. For example, ``::node`` would default to ``:ethereum:local:node``
and both will be in the returned list. The values come from each
:class:`~ape.api.providers.ProviderAPI` that is installed.
Use the CLI command ``ape networks list`` to list all the possible network
combinations.
Args:
ecosystem_filter (Optional[Union[list[str], str]]): Get only the specified ecosystems.
Defaults to getting all ecosystems.
network_filter (Optional[Union[list[str], str]]): Get only the specified networks.
Defaults to getting all networks in ecosystems.
provider_filter (Optional[Union[list[str], str]]): Get only the specified providers.
Defaults to getting all providers in networks.
Returns:
Iterator[str]: An iterator over all the network-choice possibilities.
"""
ecosystem_filter = _validate_filter(ecosystem_filter, self.ecosystem_names)
network_filter = _validate_filter(network_filter, self.network_names)
provider_filter = _validate_filter(provider_filter, self.provider_names)
ecosystem_items = self.ecosystems
if ecosystem_filter:
ecosystem_items = {n: e for n, e in ecosystem_items.items() if n in ecosystem_filter}
for ecosystem_name, ecosystem in ecosystem_items.items():
network_items = ecosystem.networks
if network_filter:
network_items = {n: net for n, net in network_items.items() if n in network_filter}
if not network_items:
continue
ecosystem_has_providers = False
for network_name, network in network_items.items():
providers = network.providers
if provider_filter:
providers = [n for n in providers if n in provider_filter]
network_has_providers = len(providers) > 0
if not ecosystem_has_providers:
# Only check if we still haven't found any
ecosystem_has_providers = network_has_providers
if not network_has_providers:
continue
for provider_name in providers:
if (
ecosystem_name == self.default_ecosystem.name
and network_name == ecosystem.default_network_name
):
yield f"::{provider_name}"
if ecosystem_name == self.default_ecosystem.name:
yield f":{network_name}:{provider_name}"
if network_name == ecosystem.default_network_name:
yield f"{ecosystem_name}::{provider_name}"
# Always include the full path as an option.
yield f"{ecosystem_name}:{network_name}:{provider_name}"
# Providers were yielded if we reached this point.
if ecosystem_name == self.default_ecosystem.name:
yield f":{network_name}"
yield f"{ecosystem_name}:{network_name}"
if ecosystem_has_providers:
yield ecosystem_name
def get_ecosystem(self, ecosystem_name: str) -> "EcosystemAPI":
"""
Get the ecosystem for the given name.
Args:
ecosystem_name (str): The name of the ecosystem to get.
Raises:
:class:`~ape.exceptions.NetworkError`: When the ecosystem is not found.
Returns:
:class:`~ape.api.networks.EcosystemAPI`
"""
# NOTE: This method purposely avoids "just checking self.ecosystems"
# for performance reasons and exiting the search as early as possible.
ecosystem_name = ecosystem_name.lower().replace(" ", "-")
try:
return self._plugin_ecosystems[ecosystem_name]
except KeyError:
pass
# Check if custom.
try:
return self._custom_ecosystems[ecosystem_name]
except KeyError:
pass
if ecosystem := self._get_ecosystem_from_evmchains(ecosystem_name):
return ecosystem
raise EcosystemNotFoundError(ecosystem_name, options=self.ecosystem_names)
def _get_ecosystem_from_evmchains(self, ecosystem_name: str) -> Optional["EcosystemAPI"]:
if ecosystem_name not in PUBLIC_CHAIN_META:
return None
symbol = None
for net in PUBLIC_CHAIN_META[ecosystem_name].values():
if not (native_currency := net.get("nativeCurrency")):
continue
if "symbol" not in native_currency:
continue
symbol = native_currency["symbol"]
break
symbol = symbol or "ETH"
# Is an EVM chain, can automatically make a class using evm-chains.
evm_class = self._plugin_ecosystems["ethereum"].__class__
return evm_class(name=ecosystem_name, fee_token_symbol=symbol)
def get_provider_from_choice(
self,
network_choice: Optional[str] = None,
provider_settings: Optional[dict] = None,
) -> "ProviderAPI":
"""
Get a :class:`~ape.api.providers.ProviderAPI` from a network choice.
A network choice is any value returned from
:meth:`~ape.managers.networks.NetworkManager.get_network_choices`. Use the
CLI command ``ape networks list`` to list all the possible network
combinations.
Raises:
:class:`~ape.exceptions.NetworkError`: When the given network choice does not
match any known network.
Args:
network_choice (str, optional): The network choice
(see :meth:`~ape.managers.networks.NetworkManager.get_network_choices`).
Defaults to the default ecosystem, network, and provider combination.
provider_settings (dict, optional): Settings for the provider. Defaults to None.
Returns:
:class:`~ape.api.providers.ProviderAPI`
"""
if network_choice is None:
default_network = self.default_ecosystem.default_network
return default_network.get_provider(provider_settings=provider_settings)
elif network_choice.startswith("pid://"):
# Was given a process ID (already running node on local machine).
pid_str = network_choice[len("pid://") :]
if not pid_str.isdigit():
raise ValueError(f"Invalid PID: {pid_str}")
return self.get_running_node(int(pid_str))
elif _is_adhoc_url(network_choice):
# Custom network w/o ecosystem & network spec.
return self.create_custom_provider(network_choice)
selections = network_choice.split(":")
# NOTE: Handle case when URI is passed e.g. "http://..."
if len(selections) > 3:
provider_value = ":".join(selections[2:])
selections[2] = provider_value
selections = selections[:3]
if _is_adhoc_url(provider_value):
selections[1] = selections[1] or "custom"
if selections == network_choice or len(selections) == 1:
# Either split didn't work (in which case it matches the start)
# or there was nothing after the ``:`` (e.g. "ethereum:")
ecosystem = self.get_ecosystem(selections[0] or self.default_ecosystem.name)
# By default, the "local" network should be specified for
# any ecosystem (this should not correspond to a production chain)
default_network = ecosystem.default_network
return default_network.get_provider(provider_settings=provider_settings)
elif len(selections) == 2:
# Only ecosystem and network were specified, not provider
ecosystem_name, network_name = selections
ecosystem = self.get_ecosystem(ecosystem_name or self.default_ecosystem.name)
network = ecosystem.get_network(network_name or ecosystem.default_network_name)
return network.get_provider(provider_settings=provider_settings)
elif len(selections) == 3:
# Everything is specified, use specified provider for ecosystem and network
ecosystem_name, network_name, provider_name = selections
ecosystem = (
self.get_ecosystem(ecosystem_name) if ecosystem_name else self.default_ecosystem
)
network = ecosystem.get_network(network_name or ecosystem.default_network_name)
return network.get_provider(
provider_name=provider_name, provider_settings=provider_settings
)
else:
# NOTE: Might be unreachable
raise NetworkError("Invalid network selection.")
def parse_network_choice(
self,
network_choice: Optional[str] = None,
provider_settings: Optional[dict] = None,
disconnect_after: bool = False,
disconnect_on_exit: bool = True,
) -> ProviderContextManager:
"""
Parse a network choice into a context manager for managing a temporary
connection to a provider. See
:meth:`~ape.managers.networks.NetworkManager.get_network_choices` for all
available choices (or use CLI command ``ape networks list``).
Raises:
:class:`~ape.exceptions.NetworkError`: When the given network choice does not
match any known network.
Args:
network_choice (str, optional): The network choice
(see :meth:`~ape.managers.networks.NetworkManager.get_network_choices`).
Defaults to the default ecosystem, network, and provider combination.
provider_settings (dict, optional): Settings for the provider. Defaults to None.
disconnect_after (bool): Set to True to terminate the connection completely
at the end of context. NOTE: May only work if the network was also started
from this session.
disconnect_on_exit (bool): Whether to disconnect on the exit of the python
session. Defaults to ``True``.
Returns:
:class:`~api.api.networks.ProviderContextManager`
"""
provider = self.get_provider_from_choice(
network_choice=network_choice, provider_settings=provider_settings
)
return ProviderContextManager(
provider=provider,
disconnect_after=disconnect_after,
disconnect_on_exit=disconnect_on_exit,
)
@property
def default_ecosystem_name(self) -> str:
if name := self._default_ecosystem_name:
return name
return self.local_project.config.default_ecosystem or "ethereum"
@cached_property
def default_ecosystem(self) -> "EcosystemAPI":
"""
The default ecosystem. Call
:meth:`~ape.managers.networks.NetworkManager.set_default_ecosystem` to
change the default ecosystem. If a default is not set and there is
only a single ecosystem installed, such as Ethereum, then get
that ecosystem.
"""
return self.get_ecosystem(self.default_ecosystem_name)
def set_default_ecosystem(self, ecosystem_name: str):
"""
Change the default ecosystem.
Raises:
:class:`~ape.exceptions.NetworkError`: When the given ecosystem name is unknown.
Args:
ecosystem_name (str): The name of the ecosystem to set
as the default.
"""
if ecosystem_name in self.ecosystem_names:
self._default_ecosystem_name = ecosystem_name
else:
raise EcosystemNotFoundError(ecosystem_name, options=self.ecosystem_names)
@property
def network_data(self) -> dict:
"""
Get a dictionary containing data about networks in the ecosystem.
**NOTE**: The keys are added in an opinionated order for nicely
translating into ``yaml``.
Returns:
dict
"""
return self.get_network_data()
def get_network_data(
self,
ecosystem_filter: Optional[Collection[str]] = None,
network_filter: Optional[Collection[str]] = None,
provider_filter: Optional[Collection[str]] = None,
):
data: dict = {"ecosystems": []}
for ecosystem_name in self:
if ecosystem_filter and ecosystem_name not in ecosystem_filter:
continue
ecosystem_data = self._get_ecosystem_data(
ecosystem_name,
network_filter=network_filter,
provider_filter=provider_filter,
)
data["ecosystems"].append(ecosystem_data)
return data
def _get_ecosystem_data(
self,
ecosystem_name: str,
network_filter: Optional[Collection[str]] = None,
provider_filter: Optional[Collection[str]] = None,
) -> dict:
ecosystem = self[ecosystem_name]
ecosystem_data: dict = {"name": str(ecosystem_name)}
# Only add isDefault key when True
if ecosystem_name == self.default_ecosystem.name:
ecosystem_data["isDefault"] = True
ecosystem_data["networks"] = []
networks = getattr(self, ecosystem_name).networks
for network_name in networks:
if network_filter and network_name not in network_filter:
continue
network_data = ecosystem.get_network_data(network_name, provider_filter=provider_filter)
ecosystem_data["networks"].append(network_data)
return ecosystem_data
def _invalidate_cache(self):
# NOTE: Called when changing config programmatically.
self.__dict__.pop("_custom_ecosystems", None)
self.__dict__.pop("_custom_networks_from_config", None)
self._custom_networks = []
def _validate_filter(arg: Optional[Union[list[str], str]], options: set[str]):
filters = arg or []
if isinstance(filters, str):
filters = [filters]
for _filter in filters:
if _filter not in options:
raise NetworkError(f"Unknown option '{_filter}'.")
return filters
def _is_adhoc_url(value: str) -> bool:
return (
value.startswith("http://")
or value.startswith("https://")
or value.startswith("ws://")
or value.startswith("wss://")
or (value.endswith(".ipc") and ":" not in value)
)
| NetworkManager |
python | pytorch__pytorch | torch/nn/modules/batchnorm.py | {
"start": 10139,
"end": 13971
} | class ____(_BatchNorm):
r"""Applies Batch Normalization over a 2D or 3D input.
Method described in the paper
`Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`__ .
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
The mean and standard-deviation are calculated per-dimension over
the mini-batches and :math:`\gamma` and :math:`\beta` are learnable parameter vectors
of size `C` (where `C` is the number of features or channels of the input). By default, the
elements of :math:`\gamma` are set to 1 and the elements of :math:`\beta` are set to 0.
At train time in the forward pass, the variance is calculated via the biased estimator,
equivalent to ``torch.var(input, correction=0)``. However, the value stored in the
moving average of the variance is calculated via the unbiased estimator, equivalent to
``torch.var(input, correction=1)``.
Also by default, during training this layer keeps running estimates of its
computed mean and variance, which are then used for normalization during
evaluation. The running estimates are kept with a default :attr:`momentum`
of 0.1.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and batch statistics are instead used during
evaluation time as well.
.. note::
This :attr:`momentum` argument is different from one used in optimizer
classes and the conventional notion of momentum. Mathematically, the
update rule for running statistics here is
:math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
new observed value.
Because the Batch Normalization is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.
Args:
num_features: number of features or channels :math:`C` of the input
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Can be set to ``None`` for cumulative moving average
(i.e. simple average). Default: 0.1
affine: a boolean value that when set to ``True``, this module has
learnable affine parameters. Default: ``True``
track_running_stats: a boolean value that when set to ``True``, this
module tracks the running mean and variance, and when set to ``False``,
this module does not track such statistics, and initializes statistics
buffers :attr:`running_mean` and :attr:`running_var` as ``None``.
When these buffers are ``None``, this module always uses batch statistics.
in both training and eval modes. Default: ``True``
Shape:
- Input: :math:`(N, C)` or :math:`(N, C, L)`, where :math:`N` is the batch size,
:math:`C` is the number of features or channels, and :math:`L` is the sequence length
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples::
>>> # With Learnable Parameters
>>> m = nn.BatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = nn.BatchNorm1d(100, affine=False)
>>> input = torch.randn(20, 100)
>>> output = m(input)
"""
def _check_input_dim(self, input) -> None:
if input.dim() != 2 and input.dim() != 3:
raise ValueError(f"expected 2D or 3D input (got {input.dim()}D input)")
# pyrefly: ignore [inconsistent-inheritance]
| BatchNorm1d |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/deduplicate.py | {
"start": 204,
"end": 1436
} | class ____(Completer):
"""
Wrapper around a completer that removes duplicates. Only the first unique
completions are kept.
Completions are considered to be a duplicate if they result in the same
document text when they would be applied.
"""
def __init__(self, completer: Completer) -> None:
self.completer = completer
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Keep track of the document strings we'd get after applying any completion.
found_so_far: set[str] = set()
for completion in self.completer.get_completions(document, complete_event):
text_if_applied = (
document.text[: document.cursor_position + completion.start_position]
+ completion.text
+ document.text[document.cursor_position :]
)
if text_if_applied == document.text:
# Don't include completions that don't have any effect at all.
continue
if text_if_applied in found_so_far:
continue
found_so_far.add(text_if_applied)
yield completion
| DeduplicateCompleter |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 25624,
"end": 31270
} | class ____(NestedWrappedModule):
def __init__(
self,
group: dist.ProcessGroup,
wrap_fsdp: bool,
device_init_mode: DEVICEInitMode,
delay_before_free_ms: int,
deterministic: bool,
**fsdp_kwargs,
):
super().__init__(
group=group,
wrap_fsdp=wrap_fsdp,
device_init_mode=device_init_mode,
deterministic=deterministic,
)
self.group = group
self.delay_before_free_ms = delay_before_free_ms
self.wrap_fsdp = wrap_fsdp
self.move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE
if deterministic:
# Give each rank different expert parameters
torch.manual_seed(42 + self.rank)
d_expert = 23
d_shared = 12
d_input = 8
expert = _move_to_device(nn.Linear(d_expert, d_shared), self.move_to_device)
self.num_expert_params = sum(p.numel() for p in expert.parameters())
for p in expert.parameters():
p.expert = True # type: ignore[attr-defined]
if deterministic:
# Keep all other parameters the same across ranks
torch.manual_seed(0)
shared = _move_to_device(nn.Linear(d_shared, d_expert), self.move_to_device)
if wrap_fsdp:
# we create a process group of size 1 for the expert params
expert_group = torch.distributed.new_group(
[group.rank()]
) # world size 1 means no shard
expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment]
shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment]
self.module = nn.Sequential(
_move_to_device(nn.Linear(d_input, d_shared), self.move_to_device),
shared,
expert,
_move_to_device(nn.Linear(d_shared, d_input), self.move_to_device),
)
def forward(self, x):
if self.delay_before_free_ms > 0:
expert = self.module[2]
if isinstance(expert, FSDP):
orig_reshard = torch.distributed.fsdp._runtime_utils._reshard
def _delayed_reshard(*args, **kwargs):
if TEST_CUDA:
torch.cuda._sleep(
int(self.delay_before_free_ms * get_cycles_per_ms())
)
elif TEST_HPU or TEST_XPU:
time.sleep(self.delay_before_free_ms / 1000)
return orig_reshard(*args, **kwargs)
# This patch covers any `import torch..._reshard` uses.
with mock.patch(
"torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard
):
return self.module(x)
return self.module(x)
def run_backward(self, loss):
loss.backward()
# Manually reduce gradients if not wrapped in FullyShardedDataParallel
if not self.wrap_fsdp:
with torch.no_grad():
for p in self.parameters():
if hasattr(p, "expert"):
continue # these params don't need grad reduction
if p.grad is not None:
p.grad.div_(self.world_size)
torch.distributed.all_reduce(p.grad, group=self.group)
@staticmethod
def init(
group: dist.ProcessGroup,
fsdp_init_mode: FSDPInitMode,
device_init_mode: DEVICEInitMode,
fsdp_kwargs: Optional[dict[str, Any]] = None,
deterministic: bool = False,
delay_before_free_ms: int = 0,
):
"""
Initializes a :class:`MixtureOfExperts` instance.
Args:
fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap
any modules with FSDP. If ``RECURSIVE``, then wraps some nested
modules with FSDP, including the expert and shared layers, but
not the top-level module. The model may later be wrapped with a
top-level FSDP external to this method if desired.
device_init_mode (DEVICEInitMode): Determines model movement to DEVICE.
fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments
forwarded to the FSDP constructor.
deterministic (bool): Whether to make the model deterministic
across constructions.
delay_before_free_ms (int): Delay before resharding expert
parameters in the forward pass (in ms).
"""
if fsdp_kwargs is None:
fsdp_kwargs = {}
if fsdp_init_mode == FSDPInitMode.NO_FSDP:
return MixtureOfExperts(
group,
wrap_fsdp=False,
device_init_mode=device_init_mode,
delay_before_free_ms=delay_before_free_ms,
deterministic=deterministic,
)
elif fsdp_init_mode == FSDPInitMode.RECURSIVE:
# Does not wrap with top-level FSDP
fsdp_model = MixtureOfExperts(
group,
wrap_fsdp=True,
device_init_mode=device_init_mode,
delay_before_free_ms=delay_before_free_ms,
deterministic=deterministic,
**fsdp_kwargs,
)
if device_init_mode == DEVICEInitMode.DEVICE_AFTER:
fsdp_model = fsdp_model.to(DEVICE_TYPE)
return fsdp_model
raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}")
| MixtureOfExperts |
python | huggingface__transformers | tests/models/kosmos2/test_modeling_kosmos2.py | {
"start": 20164,
"end": 35115
} | class ____(unittest.TestCase):
def run_example(self, prompt, image, model, processor):
inputs = processor(text=prompt, images=image, return_tensors="pt", padding=True).to(torch_device)
generation_outputs = model.generate(
pixel_values=inputs["pixel_values"],
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
image_embeds=None,
image_embeds_position_mask=inputs["image_embeds_position_mask"],
use_cache=True,
max_new_tokens=128,
output_scores=True,
return_dict_in_generate=True,
)
scores = generation_outputs.scores
generated_ids = generation_outputs.sequences
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)
# Specify `cleanup_and_extract=False` in order to see the raw model generation.
processed_text = [processor.post_process_generation(x, cleanup_and_extract=False) for x in generated_text]
# By default, the generated text is cleanup and the entities are extracted.
final_text_with_entities = [processor.post_process_generation(x) for x in generated_text]
return scores, generated_ids, generated_text, processed_text, final_text_with_entities
def test_snowman_image_captioning(self):
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png"
image = Image.open(requests.get(url, stream=True).raw)
image.save("new_image.jpg")
image = Image.open("new_image.jpg")
model = AutoModelForImageTextToText.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device)
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
prompt = "<grounding>An image of"
scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example(
prompt, image, model, processor
)
processed_text = processed_text[0]
final_text, entities = final_text_with_entities[0]
atol = 1e-4 if (IS_ROCM_SYSTEM or IS_XPU_SYSTEM) else 1e-5
np.testing.assert_allclose(
torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(),
np.array(
[
[-1.5672581195831299, -5.007406711578369, 4.36448860168457],
[-2.147017002105713, -4.966302871704102, 4.592559337615967],
[-0.9352350831031799, -4.688288688659668, 6.240612983703613],
]
),
atol=atol,
)
np.testing.assert_allclose(
torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(),
np.array(
[
[2.9916205406188965, 2.481820583343506, 4.646594524383545],
[-2.8381078243255615, -2.9687185287475586, -2.6926779747009277],
[-2.8909168243408203, -3.2228589057922363, -1.7056822776794434],
]
),
atol=1e-5,
)
# fmt: off
EXPECTED_IDS = [
[
0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 712, 1648, 9, 64007, 10, 43867, 64008,
64009, 64057, 64876, 64010, 5950, 597, 32, 64007, 10, 646, 64008, 64009, 64018, 64924, 64010, 4, 2
]
]
# fmt: on
self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS)
EXPECTED_PROCESSED_TEXT = (
"<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> "
"warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
)
self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT)
self.assertEqual(final_text, "An image of a snowman warming himself by a fire.")
EXPECTED_ENTITIES = [
("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]),
("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]),
]
self.assertListEqual(entities, EXPECTED_ENTITIES)
# test with the detail caption generation
prompt = "<grounding>Describe this image in detail:"
scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example(
prompt, image, model, processor
)
processed_text = processed_text[0]
final_text, entities = final_text_with_entities[0]
np.testing.assert_allclose(
torch.concat(scores[1:4])[:3, :3].to("cpu").numpy(),
np.array(
[
[-0.9093570113182068, -4.578373908996582, 5.96360969543457],
[2.452126979827881, -4.090598106384277, 8.738677024841309],
[-0.7624598741531372, -4.771658897399902, 6.576295852661133],
]
),
atol=atol,
)
np.testing.assert_allclose(
torch.concat(scores[-3:])[-3:, -3:].to("cpu").numpy(),
np.array(
[
[-1.673659086227417, -2.162452220916748, -1.95430588722229],
[-2.006824493408203, -2.2038745880126953, -1.24686861038208],
[-3.2783470153808594, -2.814181089401245, -1.390632152557373],
]
),
atol=1e-5,
)
# fmt: off
EXPECTED_IDS_LONG = [
[
0, 64003, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 64004, 64012, 34645, 247, 38, 1648, 12, 3391, 55,
24, 1648, 1338, 10, 43867, 1280, 32, 64007, 10, 30879, 64008, 64009, 64018, 65020, 64010, 12, 5, 1842,
4, 71, 17, 1679, 64007, 10, 3958, 64008, 64009, 64061, 64263, 64010, 6, 64007, 15719, 64008, 64009,
64253, 64617, 64010, 6, 8, 64007, 9626, 64008, 64009, 64413, 64545, 64010, 6, 23, 64007, 10, 4363,
64008, 64009, 64623, 64885, 64010, 2255, 8, 64007, 10, 3486, 64008, 64009, 64809, 65036, 64010, 1560,
2255, 4, 24, 43867, 1684, 7, 27, 3774, 5, 10356, 9, 5, 646, 6, 8, 22, 1684, 7, 30, 10, 2007, 8, 16239,
4337, 4, 2
]
]
# fmt: on
self.assertListEqual(generated_ids.to("cpu").numpy().tolist(), EXPECTED_IDS_LONG)
EXPECTED_PROCESSED_TEXT_LONG = (
"<grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire"
"</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat"
"</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object>"
"<patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400>"
"<patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872>"
"</object> nearby and<phrase> a cup</phrase><object><patch_index_0796><patch_index_1023></object> placed "
"nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy "
"atmosphere."
)
self.assertEqual(processed_text, EXPECTED_PROCESSED_TEXT_LONG)
EXPECTED_FINAL_TEXT_LONG = (
"Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is "
"wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be "
"enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere."
)
self.assertEqual(final_text, EXPECTED_FINAL_TEXT_LONG)
EXPECTED_ENTITIES_LONG = [
("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]),
("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]),
("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]),
("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]),
("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]),
("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]),
]
self.assertListEqual(entities, EXPECTED_ENTITIES_LONG)
def test_snowman_image_captioning_batch(self):
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.png"
image = Image.open(requests.get(url, stream=True).raw)
image.save("new_image.jpg")
image = Image.open("new_image.jpg")
model = AutoModelForImageTextToText.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device)
prompt = ["<grounding>Describe this image in detail:", "<grounding>An image of"]
# left padding
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left")
scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example(
prompt, [image] * len(prompt), model, processor
)
all_final_text = [x[0] for x in final_text_with_entities]
all_entities = [x[1] for x in final_text_with_entities]
# left padding gives identical results as non-padding
EXPECTED_PROCESSED_TEXT_0 = (
"<grounding> Describe this image in detail: The image features a snowman sitting by<phrase> a campfire"
"</phrase><object><patch_index_0005><patch_index_1007></object> in the snow. He is wearing<phrase> a hat"
"</phrase><object><patch_index_0048><patch_index_0250></object>,<phrase> scarf</phrase><object>"
"<patch_index_0240><patch_index_0604></object>, and<phrase> gloves</phrase><object><patch_index_0400>"
"<patch_index_0532></object>, with<phrase> a pot</phrase><object><patch_index_0610><patch_index_0872>"
"</object> nearby and<phrase> a cup</phrase><object><patch_index_0796><patch_index_1023></object> placed "
"nearby. The snowman appears to be enjoying the warmth of the fire, and it appears to have a warm and cozy "
"atmosphere."
)
EXPECTED_PROCESSED_TEXT_1 = (
"<grounding> An image of<phrase> a snowman</phrase><object><patch_index_0044><patch_index_0863></object> "
"warming himself by<phrase> a fire</phrase><object><patch_index_0005><patch_index_0911></object>."
)
self.assertListEqual(processed_text, [EXPECTED_PROCESSED_TEXT_0, EXPECTED_PROCESSED_TEXT_1])
EXPECTED_FINAL_TEXT_0 = (
"Describe this image in detail: The image features a snowman sitting by a campfire in the snow. He is "
"wearing a hat, scarf, and gloves, with a pot nearby and a cup placed nearby. The snowman appears to be "
"enjoying the warmth of the fire, and it appears to have a warm and cozy atmosphere."
)
EXPECTED_FINAL_TEXT_1 = "An image of a snowman warming himself by a fire."
self.assertListEqual(all_final_text, [EXPECTED_FINAL_TEXT_0, EXPECTED_FINAL_TEXT_1])
EXPECTED_ENTITIES_0 = [
("a campfire", (71, 81), [(0.171875, 0.015625, 0.484375, 0.984375)]),
("a hat", (109, 114), [(0.515625, 0.046875, 0.828125, 0.234375)]),
("scarf", (116, 121), [(0.515625, 0.234375, 0.890625, 0.578125)]),
("gloves", (127, 133), [(0.515625, 0.390625, 0.640625, 0.515625)]),
("a pot", (140, 145), [(0.078125, 0.609375, 0.265625, 0.859375)]),
("a cup", (157, 162), [(0.890625, 0.765625, 0.984375, 0.984375)]),
]
EXPECTED_ENTITIES_1 = [
("a snowman", (12, 21), [(0.390625, 0.046875, 0.984375, 0.828125)]),
("a fire", (41, 47), [(0.171875, 0.015625, 0.484375, 0.890625)]),
]
self.assertListEqual(all_entities, [EXPECTED_ENTITIES_0, EXPECTED_ENTITIES_1])
# right padding
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
scores, generated_ids, generated_text, processed_text, final_text_with_entities = self.run_example(
prompt, [image] * len(prompt), model, processor
)
all_final_text = [x[0] for x in final_text_with_entities]
all_entities = [x[1] for x in final_text_with_entities]
# For right padding, only the non-padded sequences will give the same results as non-padding
self.assertEqual(processed_text[0], EXPECTED_PROCESSED_TEXT_0)
self.assertEqual(all_final_text[0], EXPECTED_FINAL_TEXT_0)
self.assertListEqual(all_entities[0], EXPECTED_ENTITIES_0)
@slow
def test_inference_interpolate_pos_encoding(self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = Kosmos2Model.from_pretrained("microsoft/kosmos-2-patch14-224").to(torch_device)
processor = AutoProcessor.from_pretrained(
"microsoft/kosmos-2-patch14-224", size={"shortest_edge": 180}, crop_size={"height": 180, "width": 180}
)
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device)
# interpolate_pos_encodiung false should return value error
with self.assertRaises(ValueError, msg="doesn't match model"):
with torch.no_grad():
model(**inputs, interpolate_pos_encoding=False)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 145, 1024))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.9148, -1.4148, 3.8040], [3.3443, 1.9478, 0.2080], [1.6604, 2.8184, -0.3618]]
).to(torch_device)
torch.testing.assert_close(
outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-2, atol=1e-2
)
| Kosmos2ModelIntegrationTest |
python | crytic__slither | slither/tools/read_storage/read_storage.py | {
"start": 1397,
"end": 1717
} | class ____:
name: str
type_string: str
slot: int
size: int
offset: int
value: Optional[Union[int, bool, str, ChecksumAddress]] = None
# For structure and array, str->SlotInfo
elems: Union[Elem, NestedElem] = dataclasses.field(default_factory=lambda: {}) # type: ignore[assignment]
| SlotInfo |
python | ray-project__ray | python/ray/train/huggingface/transformers/_transformers_utils.py | {
"start": 763,
"end": 3431
} | class ____(TrainerCallback):
"""A simple callback to report checkpoints and metrics to Ray Train.
This callback is a subclass of `transformers.TrainerCallback
<https://huggingface.co/docs/transformers/main/en/main_classes/callback#transformers.TrainerCallback>`_
and overrides the `TrainerCallback.on_save()` method. After
a new checkpoint get saved, it fetches the latest metric dictionary
from `TrainerState.log_history` and reports it with the latest checkpoint
to Ray Train.
Checkpoints will be saved in the following structure::
checkpoint_00000*/ Ray Train Checkpoint
└─ checkpoint/ Hugging Face Transformers Checkpoint
For customized reporting and checkpointing logic, implement your own
`transformers.TrainerCallback` following this user
guide: :ref:`Saving and Loading Checkpoints <train-dl-saving-checkpoints>`.
Note that users should ensure that the logging, evaluation, and saving frequencies
are properly configured so that the monitoring metric is always up-to-date
when `transformers.Trainer` saves a checkpoint.
Suppose the monitoring metric is reported from evaluation stage:
Some valid configurations:
- evaluation_strategy == save_strategy == "epoch"
- evaluation_strategy == save_strategy == "steps", save_steps % eval_steps == 0
Some invalid configurations:
- evaluation_strategy != save_strategy
- evaluation_strategy == save_strategy == "steps", save_steps % eval_steps != 0
"""
CHECKPOINT_NAME = "checkpoint"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
record_extra_usage_tag(TagKey.TRAIN_TRANSFORMERS_RAYTRAINREPORTCALLBACK, "1")
def on_save(self, args, state, control, **kwargs):
"""Event called after a checkpoint save."""
with TemporaryDirectory() as tmpdir:
# Aggregate all the logged metrics
metrics = {}
for log in state.log_history:
metrics.update(log)
# Copy ckpt files and construct a Ray Train Checkpoint
source_ckpt_path = transformers.trainer.get_last_checkpoint(args.output_dir)
if source_ckpt_path is not None:
target_ckpt_path = Path(tmpdir, self.CHECKPOINT_NAME).as_posix()
shutil.copytree(source_ckpt_path, target_ckpt_path)
checkpoint = Checkpoint.from_directory(tmpdir)
else:
checkpoint = None
# Report latest metrics and checkpoint to Ray Train
ray.train.report(metrics=metrics, checkpoint=checkpoint)
| RayTrainReportCallback |
python | dagster-io__dagster | python_modules/dagster/dagster/_serdes/config_class.py | {
"start": 957,
"end": 1407
} | class ____(NamedTupleSerializer["ConfigurableClassData"]):
def pack_items(self, *args, **kwargs):
for k, v in super().pack_items(*args, **kwargs):
if k == "module_name":
yield k, convert_dagster_submodule_name(v, "public") # pyright: ignore[reportArgumentType]
else:
yield k, v
@whitelist_for_serdes(serializer=ConfigurableClassDataSerializer)
@public
| ConfigurableClassDataSerializer |
python | pytorch__pytorch | test/distributed/tensor/test_utils.py | {
"start": 22402,
"end": 30735
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 4
@with_comms
def test_1d_mesh_strided_sharding(self):
mesh_1d = init_device_mesh(self.device_type, (self.world_size,))
# Test 1: 1-d tensor over 1-d mesh
x = torch.arange(2 * self.world_size, device=self.device_type)
"""
contiguous sharding: [0, 1 | 2, 3 | 4, 5 | 6, 7]
"""
shard_placement = _StridedShard(0, split_factor=1) # same as Shard(0)
tensor_list, _ = shard_placement._split_tensor(x, self.world_size)
shard_x = tensor_list[self.rank]
self.assertEqual(shard_x, x.view(self.world_size, -1)[self.rank])
# shard_to_replicate
full_tensor = shard_placement._to_replicate_tensor(
shard_x,
mesh_1d,
mesh_dim=0,
current_logical_shape=list(x.shape),
)
self.assertEqual(full_tensor, x)
"""
strided sharding: [0, 4 | 1, 5 | 2, 6 | 3, 7]
"""
shard_placement = _StridedShard(0, split_factor=2)
tensor_list, _ = shard_placement._split_tensor(x, self.world_size)
shard_x = tensor_list[self.rank]
self.assertEqual(
shard_x, x.view(-1, self.world_size).swapdims(-1, 0)[self.rank]
)
# shard_to_replicate
full_tensor = shard_placement._to_replicate_tensor(
shard_x,
mesh_1d,
mesh_dim=0,
current_logical_shape=list(x.shape),
)
self.assertEqual(full_tensor, x)
@with_comms
def test_2d_mesh_strided_sharding(self):
# Test 2: 1-d tensor over 2-d mesh
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dim0", "dim1")
)
mesh_dim0_size = mesh_2d["dim0"].size()
mesh_dim1_size = mesh_2d["dim1"].size()
mesh_dim0_local_rank = mesh_2d["dim0"].get_local_rank(mesh_dim=0)
mesh_dim1_local_rank = mesh_2d["dim1"].get_local_rank(mesh_dim=0)
x = torch.arange(2 * self.world_size, device=self.device_type)
"""
contiguous sharding: [
[ 0, 1 | 2, 3 ],
[ 4, 5 | 6, 7 ],
]
"""
# shard on mesh dim-0
shard_placement_dim0 = _StridedShard(0, split_factor=1) # same as Shard(0)
tensor_list, _ = shard_placement_dim0._split_tensor(x, mesh_dim0_size)
expected_shard_dim0 = x.view(mesh_dim0_size, -1)[mesh_dim0_local_rank]
shard_x = tensor_list[mesh_dim0_local_rank]
self.assertEqual(shard_x, expected_shard_dim0)
# shard on mesh dim-1
shard_placement_dim1 = _StridedShard(0, split_factor=1) # same as Shard(0)
tensor_list, _ = shard_placement_dim1._split_tensor(shard_x, mesh_dim1_size)
expected_shard_dim1 = shard_x.view(mesh_dim1_size, -1)[mesh_dim1_local_rank]
shard_x = tensor_list[mesh_dim1_local_rank]
self.assertEqual(shard_x, expected_shard_dim1)
# shard_to_replicate on mesh dim-1
full_tensor = shard_placement_dim1._to_replicate_tensor(
shard_x,
mesh_2d,
mesh_dim=1,
current_logical_shape=list(expected_shard_dim0.shape),
)
self.assertEqual(full_tensor, expected_shard_dim0)
# shard_to_replicate on mesh dim-0
full_tensor = shard_placement_dim0._to_replicate_tensor(
full_tensor,
mesh_2d,
mesh_dim=0,
current_logical_shape=list(x.shape),
)
self.assertEqual(full_tensor, x)
"""
strided sharding: [
[ 0, 1 | 4, 5 ],
[ 2, 3 | 6, 7 ],
]
"""
split_factor = 2
# shard on mesh dim-0
shard_placement_dim0 = _StridedShard(0, split_factor=split_factor)
tensor_list, _ = shard_placement_dim0._split_tensor(x, mesh_dim0_size)
shard_x = tensor_list[mesh_dim0_local_rank]
expected_shard_dim0 = (
torch.tensor([0, 1, 4, 5], device=self.device_type)
if mesh_dim0_local_rank == 0
else torch.tensor([2, 3, 6, 7], device=self.device_type)
)
self.assertEqual(shard_x, expected_shard_dim0)
# shard on mesh dim-1
shard_placement_dim1 = _StridedShard(0, split_factor=1) # same as Shard(0)
tensor_list, _ = shard_placement_dim1._split_tensor(shard_x, mesh_dim1_size)
shard_x = tensor_list[mesh_dim1_local_rank]
expected_shard_dim1 = expected_shard_dim0.view(mesh_dim1_size, -1)[
mesh_dim1_local_rank
]
self.assertEqual(shard_x, expected_shard_dim1)
# shard_to_replicate on mesh dim-1
full_tensor = shard_placement_dim1._to_replicate_tensor(
shard_x,
mesh_2d,
mesh_dim=1,
current_logical_shape=list(expected_shard_dim0.shape),
)
self.assertEqual(full_tensor, expected_shard_dim0)
# shard_to_replicate on mesh dim-0
full_tensor = shard_placement_dim0._to_replicate_tensor(
full_tensor,
mesh_2d,
mesh_dim=0,
current_logical_shape=list(x.shape),
)
self.assertEqual(full_tensor, x)
@with_comms
def test_2d_mesh_2d_tensor_strided_sharding(self):
# Test 2: 1-d tensor over 2-d mesh
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dim0", "dim1")
)
mesh_dim0_size = mesh_2d["dim0"].size()
mesh_dim1_size = mesh_2d["dim1"].size()
mesh_dim0_local_rank = mesh_2d["dim0"].get_local_rank(mesh_dim=0)
mesh_dim1_local_rank = mesh_2d["dim1"].get_local_rank(mesh_dim=0)
x = torch.arange(2 * self.world_size, device=self.device_type).reshape(2, -1)
"""
strided sharding:
rank 0: [[0], [4]]
rank 1: [[2], [6]]
rank 2: [[1], [5]]
rank 3: [[3], [7]]
"""
split_factor = 2
# shard on mesh dim-0
shard_placement_dim0 = _StridedShard(1, split_factor=split_factor)
tensor_list, _ = shard_placement_dim0._split_tensor(x, mesh_dim0_size)
shard_x = tensor_list[mesh_dim0_local_rank]
expected_shard_dim0 = (
torch.tensor([[0, 2], [4, 6]], device=self.device_type)
if mesh_dim0_local_rank == 0
else torch.tensor([[1, 3], [5, 7]], device=self.device_type)
)
self.assertEqual(shard_x, expected_shard_dim0)
# shard on mesh dim-1
shard_placement_dim1 = _StridedShard(1, split_factor=1) # same as Shard(1)
tensor_list, _ = shard_placement_dim1._split_tensor(shard_x, mesh_dim1_size)
shard_x = tensor_list[mesh_dim1_local_rank]
expected_shard_dim1 = [
torch.tensor(value, device=self.device_type)
for value in [[[0], [4]], [[2], [6]], [[1], [5]], [[3], [7]]]
][self.rank]
self.assertEqual(shard_x, expected_shard_dim1)
# shard_to_replicate on mesh dim-1
full_tensor = shard_placement_dim1._to_replicate_tensor(
shard_x,
mesh_2d,
mesh_dim=1,
current_logical_shape=list(expected_shard_dim0.shape),
)
self.assertEqual(full_tensor, expected_shard_dim0)
# shard_to_replicate on mesh dim-0
full_tensor = shard_placement_dim0._to_replicate_tensor(
full_tensor,
mesh_2d,
mesh_dim=0,
current_logical_shape=list(x.shape),
)
self.assertEqual(full_tensor, x)
@with_comms
def test_2d_mesh_uneven_strided_shard(self):
mesh = init_device_mesh(
self.device_type,
(self.world_size // 2, 2),
mesh_dim_names=("fsdp", "tp"),
)
for size in (2, 3, 5, 11):
tensor = torch.arange(size, device=self.device_type).view(1, -1)
dtensor = distribute_tensor(
tensor,
device_mesh=mesh,
placements=(Replicate(), Replicate()),
).redistribute(
mesh, placements=(_StridedShard(dim=1, split_factor=2), Shard(1))
)
self.assertEqual(dtensor.full_tensor(), tensor)
| TestStridedSharding |
python | great-expectations__great_expectations | great_expectations/metrics/column/descriptive_stats.py | {
"start": 379,
"end": 570
} | class ____(ColumnMetric[ColumnDescriptiveStatsResult]):
"""Summary statistics for a column: min, mean, max, standard deviation"""
name = "column.descriptive_stats"
| ColumnDescriptiveStats |
python | huggingface__transformers | tests/models/video_llama_3/test_modeling_video_llama_3.py | {
"start": 25854,
"end": 32772
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `VideoLlama3ForConditionalGeneration`.
"""
all_model_classes = (
(
VideoLlama3Model,
VideoLlama3ForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"image-text-to-text": VideoLlama3ForConditionalGeneration}
test_pruning = False
test_head_masking = False
_is_composite = True
def setUp(self):
self.model_tester = VideoLlama3VisionText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=VideoLlama3Config, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompt has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
curr_input_dict = copy.deepcopy(input_dict)
_ = model(**curr_input_dict) # successfull forward with no modifications
# remove one image but leave the image token in text
patch_size = config.vision_config.patch_size
one_img_length = (self.model_tester.image_size**2) // (patch_size**2)
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...]
curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...]
curr_input_dict["image_merge_sizes"] = curr_input_dict["image_merge_sizes"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:one_img_length]
image_grid_thw = curr_input_dict["image_grid_thw"][:1]
image_merge_sizes = curr_input_dict["image_merge_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
image_merge_sizes=image_merge_sizes,
)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0)
image_merge_sizes = torch.cat([image_merge_sizes, image_merge_sizes], dim=0)
_ = model(
input_ids=input_ids,
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
image_merge_sizes=image_merge_sizes,
)
def attention_mask_padding_matches_padding_free_with_position_ids(
self, attn_implementation: str, fa_kwargs: bool = False
):
max_new_tokens = 30
for model_class in self.all_generative_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
dummy_input = inputs_dict[model_class.main_input_name]
if dummy_input.dtype in [torch.float32, torch.float16]:
dummy_input = dummy_input.to(torch.bfloat16)
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
)
.to(torch_device)
.eval()
)
# flatten
padfree_positions = torch.cat(
[torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()]
)
padfree_positions = padfree_positions.long().unsqueeze(0).to(torch_device)
padfree_inputs_dict = {
"pixel_values": inputs_dict["pixel_values"],
"image_grid_thw": inputs_dict["image_grid_thw"],
"image_merge_sizes": inputs_dict["image_merge_sizes"],
"input_ids": inputs_dict["input_ids"][dummy_attention_mask.bool()].unsqueeze(0),
"position_ids": padfree_positions,
}
if fa_kwargs:
cu_seq_lens = [0] + dummy_attention_mask.sum(1).tolist()
cu_seq_lens = torch.tensor(cu_seq_lens, device=torch_device)
max_length = cu_seq_lens.diff().max().item()
padfree_inputs_dict.update(
{
"cu_seq_lens_q": cu_seq_lens.cumsum(-1).to(dtype=torch.int32),
"cu_seq_lens_k": cu_seq_lens.cumsum(-1).to(dtype=torch.int32),
"max_length_q": max_length,
"max_length_k": max_length,
}
)
# We need to do simple forward without cache in roder to trigger packed SDPA/FLEX/EAGER path
res_padded = model(**inputs_dict, use_cache=False)
res_padfree = model(**padfree_inputs_dict, use_cache=False)
logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()]
logits_padfree = res_padfree.logits[0]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
@require_torch
@slow
| VideoLlama3ModelTest |
python | apache__airflow | airflow-core/tests/unit/models/test_dagrun.py | {
"start": 3411,
"end": 106387
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self):
self._clean_db()
yield
self._clean_db()
@staticmethod
def _clean_db():
db.clear_db_runs()
db.clear_db_pools()
db.clear_db_dags()
db.clear_db_dag_bundles()
db.clear_db_variables()
db.clear_db_assets()
db.clear_db_xcom()
db.clear_db_dags()
@staticmethod
def create_dag_run(
dag: SerializedDAG,
*,
task_states: Mapping[str, TaskInstanceState] | None = None,
logical_date: datetime.datetime | None = None,
is_backfill: bool = False,
state: DagRunState = DagRunState.RUNNING,
session: Session,
):
now = timezone.utcnow()
logical_date = pendulum.instance(logical_date or now)
if is_backfill:
run_type = DagRunType.BACKFILL_JOB
data_interval = infer_automated_data_interval(dag.timetable, logical_date)
else:
run_type = DagRunType.MANUAL
data_interval = dag.timetable.infer_manual_data_interval(run_after=logical_date)
dag_run = dag.create_dagrun(
run_id=dag.timetable.generate_run_id(
run_type=run_type,
run_after=logical_date,
data_interval=data_interval,
),
run_type=run_type,
logical_date=logical_date,
data_interval=data_interval,
run_after=data_interval.end,
start_date=now,
state=state,
triggered_by=DagRunTriggeredByType.TEST,
session=session,
)
if task_states is not None:
for task_id, task_state in task_states.items():
ti = dag_run.get_task_instance(task_id)
if TYPE_CHECKING:
assert ti
ti.set_state(task_state, session)
session.flush()
return dag_run
def test_clear_task_instances_for_backfill_running_dagrun(self, dag_maker, session):
now = timezone.utcnow()
state = DagRunState.RUNNING
dag_id = "test_clear_task_instances_for_backfill_running_dagrun"
with dag_maker(dag_id=dag_id) as dag:
EmptyOperator(task_id="backfill_task_0")
self.create_dag_run(dag, logical_date=now, is_backfill=True, state=state, session=session)
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.flush()
dr0 = session.query(DagRun).filter(DagRun.dag_id == dag_id, DagRun.logical_date == now).first()
assert dr0.state == state
assert dr0.clear_number < 1
@pytest.mark.parametrize("state", [DagRunState.SUCCESS, DagRunState.FAILED])
def test_clear_task_instances_for_backfill_finished_dagrun(self, dag_maker, state, session):
now = timezone.utcnow()
dag_id = "test_clear_task_instances_for_backfill_finished_dagrun"
with dag_maker(dag_id=dag_id) as dag:
EmptyOperator(task_id="backfill_task_0")
self.create_dag_run(dag, logical_date=now, is_backfill=True, state=state, session=session)
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.flush()
dr0 = session.query(DagRun).filter(DagRun.dag_id == dag_id, DagRun.logical_date == now).first()
assert dr0.state == DagRunState.QUEUED
assert dr0.clear_number == 1
def test_dagrun_find(self, session):
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = DagRun(
dag_id=dag_id1,
run_id=dag_id1,
run_type=DagRunType.MANUAL,
logical_date=now,
start_date=now,
state=DagRunState.RUNNING,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = DagRun(
dag_id=dag_id2,
run_id=dag_id2,
run_type=DagRunType.SCHEDULED,
logical_date=now,
start_date=now,
state=DagRunState.RUNNING,
)
session.add(dag_run)
session.commit()
assert len(DagRun.find(dag_id=dag_id1, run_type=DagRunType.MANUAL)) == 1
assert len(DagRun.find(run_id=dag_id1)) == 1
assert len(DagRun.find(run_id=[dag_id1, dag_id2])) == 2
assert len(DagRun.find(logical_date=[now, now])) == 2
assert len(DagRun.find(logical_date=now)) == 2
assert len(DagRun.find(dag_id=dag_id1, run_type=DagRunType.SCHEDULED)) == 0
assert len(DagRun.find(dag_id=dag_id2, run_type=DagRunType.MANUAL)) == 0
assert len(DagRun.find(dag_id=dag_id2)) == 1
def test_dagrun_find_duplicate(self, session):
now = timezone.utcnow()
dag_id = "test_dagrun_find_duplicate"
dag_run = DagRun(
dag_id=dag_id,
run_id=dag_id,
run_type=DagRunType.MANUAL,
logical_date=now,
start_date=now,
state=DagRunState.RUNNING,
)
session.add(dag_run)
session.commit()
assert DagRun.find_duplicate(dag_id=dag_id, run_id=dag_id) is not None
assert DagRun.find_duplicate(dag_id=dag_id, run_id=dag_id) is not None
assert DagRun.find_duplicate(dag_id=dag_id, run_id=None) is None
def test_dagrun_success_when_all_skipped(self, dag_maker, session):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
with dag_maker(
dag_id="test_dagrun_success_when_all_skipped",
schedule=datetime.timedelta(days=1),
start_date=timezone.datetime(2017, 1, 1),
) as dag:
dag_task1 = ShortCircuitOperator(task_id="test_short_circuit_false", python_callable=bool)
dag_task2 = EmptyOperator(task_id="test_state_skipped1")
dag_task3 = EmptyOperator(task_id="test_state_skipped2")
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
"test_short_circuit_false": TaskInstanceState.SUCCESS,
"test_state_skipped1": TaskInstanceState.SKIPPED,
"test_state_skipped2": TaskInstanceState.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
dag_run.update_state()
assert dag_run.state == DagRunState.SUCCESS
def test_dagrun_not_stuck_in_running_when_all_tasks_instances_are_removed(self, dag_maker, session):
"""
Tests that a DAG run succeeds when all tasks are removed
"""
with dag_maker(
dag_id="test_dagrun_success_when_all_skipped",
schedule=datetime.timedelta(days=1),
start_date=timezone.datetime(2017, 1, 1),
) as dag:
dag_task1 = ShortCircuitOperator(task_id="test_short_circuit_false", python_callable=bool)
dag_task2 = EmptyOperator(task_id="test_state_skipped1")
dag_task3 = EmptyOperator(task_id="test_state_skipped2")
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
"test_short_circuit_false": TaskInstanceState.REMOVED,
"test_state_skipped1": TaskInstanceState.REMOVED,
"test_state_skipped2": TaskInstanceState.REMOVED,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
dag_run.update_state()
assert dag_run.state == DagRunState.SUCCESS
def test_dagrun_success_conditions(self, dag_maker, session):
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag_maker(schedule=datetime.timedelta(days=1), session=session):
op1 = EmptyOperator(task_id="A")
op2 = EmptyOperator(task_id="B")
op3 = EmptyOperator(task_id="C")
op4 = EmptyOperator(task_id="D")
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dr = dag_maker.create_dagrun()
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=TaskInstanceState.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
dr.update_state()
assert dr.state == DagRunState.RUNNING
# one has failed, but root is successful
ti_op2.set_state(state=TaskInstanceState.FAILED, session=session)
ti_op3.set_state(state=TaskInstanceState.SUCCESS, session=session)
ti_op4.set_state(state=TaskInstanceState.SUCCESS, session=session)
dr.update_state()
assert dr.state == DagRunState.SUCCESS
def test_dagrun_deadlock(self, dag_maker, session):
with dag_maker(schedule=datetime.timedelta(days=1), session=session):
op1 = EmptyOperator(task_id="A")
op2 = EmptyOperator(task_id="B")
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dr = dag_maker.create_dagrun()
ti_op1: TI = dr.get_task_instance(task_id=op1.task_id, session=session)
ti_op2: TI = dr.get_task_instance(task_id=op2.task_id, session=session)
ti_op1.set_state(state=TaskInstanceState.SUCCESS, session=session)
ti_op2.set_state(state=None, session=session)
dr.update_state(session=session)
assert dr.state == DagRunState.RUNNING
ti_op2.set_state(state=None, session=session)
ti_op2.task.trigger_rule = "invalid"
dr.update_state(session=session)
assert dr.state == DagRunState.FAILED
def test_dagrun_no_deadlock_with_restarting(self, dag_maker, session):
with dag_maker(schedule=datetime.timedelta(days=1)):
op1 = EmptyOperator(task_id="upstream_task")
op2 = EmptyOperator(task_id="downstream_task")
op2.set_upstream(op1)
dr = dag_maker.create_dagrun()
upstream_ti = dr.get_task_instance(task_id="upstream_task")
upstream_ti.set_state(TaskInstanceState.RESTARTING, session=session)
dr.update_state()
assert dr.state == DagRunState.RUNNING
def test_dagrun_no_deadlock_with_depends_on_past(self, dag_maker, session):
with dag_maker(schedule=datetime.timedelta(days=1)):
EmptyOperator(task_id="dop", depends_on_past=True)
EmptyOperator(task_id="tc", max_active_tis_per_dag=1)
dr = dag_maker.create_dagrun(
run_id="test_dagrun_no_deadlock_1",
run_type=DagRunType.SCHEDULED,
start_date=DEFAULT_DATE,
)
next_date = DEFAULT_DATE + datetime.timedelta(days=1)
dr2 = dag_maker.create_dagrun(
run_id="test_dagrun_no_deadlock_2",
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
logical_date=next_date,
)
ti1_op1 = dr.get_task_instance(task_id="dop")
dr2.get_task_instance(task_id="dop")
ti2_op1 = dr.get_task_instance(task_id="tc")
dr.get_task_instance(task_id="tc")
ti1_op1.set_state(state=TaskInstanceState.RUNNING, session=session)
dr.update_state()
dr2.update_state()
assert dr.state == DagRunState.RUNNING
assert dr2.state == DagRunState.RUNNING
ti2_op1.set_state(state=TaskInstanceState.RUNNING, session=session)
dr.update_state()
dr2.update_state()
assert dr.state == DagRunState.RUNNING
assert dr2.state == DagRunState.RUNNING
def test_dagrun_success_callback(self, dag_maker, session):
def on_success_callable(context):
assert context["dag_run"].dag_id == "test_dagrun_success_callback"
with dag_maker(
dag_id="test_dagrun_success_callback",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
) as dag:
dag_task1 = EmptyOperator(task_id="test_state_succeeded1")
dag_task2 = EmptyOperator(task_id="test_state_succeeded2")
dag_task1.set_downstream(dag_task2)
initial_task_states = {
"test_state_succeeded1": TaskInstanceState.SUCCESS,
"test_state_succeeded2": TaskInstanceState.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
with mock.patch.object(dag_run, "handle_dag_callback") as handle_dag_callback:
_, callback = dag_run.update_state()
assert handle_dag_callback.mock_calls == [mock.call(dag=dag, success=True, reason="success")]
assert dag_run.state == DagRunState.SUCCESS
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback is None
def test_dagrun_failure_callback(self, dag_maker, session):
def on_failure_callable(context):
assert context["dag_run"].dag_id == "test_dagrun_failure_callback"
with dag_maker(
dag_id="test_dagrun_failure_callback",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
) as dag:
dag_task1 = EmptyOperator(task_id="test_state_succeeded1")
dag_task2 = EmptyOperator(task_id="test_state_failed2")
initial_task_states = {
"test_state_succeeded1": TaskInstanceState.SUCCESS,
"test_state_failed2": TaskInstanceState.FAILED,
}
dag_task1.set_downstream(dag_task2)
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
with mock.patch.object(dag_run, "handle_dag_callback") as handle_dag_callback:
_, callback = dag_run.update_state()
assert handle_dag_callback.mock_calls == [mock.call(dag=dag, success=False, reason="task_failure")]
assert dag_run.state == DagRunState.FAILED
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback is None
def test_on_success_callback_when_task_skipped(self, session, testing_dag_bundle):
mock_on_success = mock.MagicMock()
mock_on_success.__name__ = "mock_on_success"
dag = DAG(
dag_id="test_dagrun_update_state_with_handle_callback_success",
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=mock_on_success,
schedule=datetime.timedelta(days=1),
)
_ = EmptyOperator(task_id="test_state_succeeded1", dag=dag)
# Create DagModel directly with bundle_name
dag_model = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
)
session.merge(dag_model)
session.flush()
scheduler_dag = sync_dag_to_db(dag, session=session)
scheduler_dag.on_success_callback = mock_on_success
initial_task_states = {
"test_state_succeeded1": TaskInstanceState.SKIPPED,
}
dag_run = self.create_dag_run(scheduler_dag, task_states=initial_task_states, session=session)
_, _ = dag_run.update_state(execute_callbacks=True)
task = dag_run.get_task_instances()[0]
assert task.state == TaskInstanceState.SKIPPED
assert dag_run.state == DagRunState.SUCCESS
mock_on_success.assert_called_once()
def test_start_dr_spans_if_needed_new_span(self, dag_maker, session):
with dag_maker(
dag_id="test_start_dr_spans_if_needed_new_span",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
) as dag:
dag_task1 = EmptyOperator(task_id="test_task1")
dag_task2 = EmptyOperator(task_id="test_task2")
dag_task1.set_downstream(dag_task2)
initial_task_states = {
"test_task1": TaskInstanceState.QUEUED,
"test_task2": TaskInstanceState.QUEUED,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
active_spans = ThreadSafeDict()
dag_run.set_active_spans(active_spans)
tis = dag_run.get_task_instances()
assert dag_run.active_spans is not None
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is None
assert dag_run.span_status == SpanStatus.NOT_STARTED
dag_run.start_dr_spans_if_needed(tis=tis)
assert dag_run.span_status == SpanStatus.ACTIVE
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is not None
def test_start_dr_spans_if_needed_span_with_continuance(self, dag_maker, session):
with dag_maker(
dag_id="test_start_dr_spans_if_needed_span_with_continuance",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
) as dag:
dag_task1 = EmptyOperator(task_id="test_task1")
dag_task2 = EmptyOperator(task_id="test_task2")
dag_task1.set_downstream(dag_task2)
initial_task_states = {
"test_task1": TaskInstanceState.RUNNING,
"test_task2": TaskInstanceState.QUEUED,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
active_spans = ThreadSafeDict()
dag_run.set_active_spans(active_spans)
dag_run.span_status = SpanStatus.NEEDS_CONTINUANCE
tis = dag_run.get_task_instances()
first_ti = tis[0]
first_ti.span_status = SpanStatus.NEEDS_CONTINUANCE
assert dag_run.active_spans is not None
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is None
assert dag_run.active_spans.get("ti:" + first_ti.id) is None
assert dag_run.span_status == SpanStatus.NEEDS_CONTINUANCE
assert first_ti.span_status == SpanStatus.NEEDS_CONTINUANCE
dag_run.start_dr_spans_if_needed(tis=tis)
assert dag_run.span_status == SpanStatus.ACTIVE
assert first_ti.span_status == SpanStatus.ACTIVE
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is not None
assert dag_run.active_spans.get("ti:" + first_ti.id) is not None
def test_end_dr_span_if_needed(self, testing_dag_bundle, dag_maker, session):
with dag_maker(
dag_id="test_end_dr_span_if_needed",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
) as dag:
dag_task1 = EmptyOperator(task_id="test_task1")
dag_task2 = EmptyOperator(task_id="test_task2")
dag_task1.set_downstream(dag_task2)
initial_task_states = {
"test_task1": TaskInstanceState.SUCCESS,
"test_task2": TaskInstanceState.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
active_spans = ThreadSafeDict()
dag_run.set_active_spans(active_spans)
from airflow.traces.tracer import Trace
dr_span = Trace.start_root_span(span_name="test_span", start_as_current=False)
active_spans.set("dr:" + str(dag_run.id), dr_span)
assert dag_run.active_spans is not None
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is not None
dag_run.end_dr_span_if_needed()
assert dag_run.span_status == SpanStatus.ENDED
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is None
def test_end_dr_span_if_needed_with_span_from_another_scheduler(
self, testing_dag_bundle, dag_maker, session
):
with dag_maker(
dag_id="test_end_dr_span_if_needed_with_span_from_another_scheduler",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
) as dag:
dag_task1 = EmptyOperator(task_id="test_task1")
dag_task2 = EmptyOperator(task_id="test_task2")
dag_task1.set_downstream(dag_task2)
initial_task_states = {
"test_task1": TaskInstanceState.SUCCESS,
"test_task2": TaskInstanceState.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
active_spans = ThreadSafeDict()
dag_run.set_active_spans(active_spans)
dag_run.span_status = SpanStatus.ACTIVE
assert dag_run.active_spans is not None
assert dag_run.active_spans.get("dr:" + str(dag_run.id)) is None
dag_run.end_dr_span_if_needed()
assert dag_run.span_status == SpanStatus.SHOULD_END
def test_dagrun_update_state_with_handle_callback_success(self, testing_dag_bundle, dag_maker, session):
def on_success_callable(context):
assert context["dag_run"].dag_id == "test_dagrun_update_state_with_handle_callback_success"
relative_fileloc = "test_dagrun_update_state_with_handle_callback_success.py"
with dag_maker(
dag_id="test_dagrun_update_state_with_handle_callback_success",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
) as dag:
dag_task1 = EmptyOperator(task_id="test_state_succeeded1")
dag_task2 = EmptyOperator(task_id="test_state_succeeded2")
dag_task1.set_downstream(dag_task2)
dm = DagModel.get_dagmodel(dag.dag_id, session=session)
dm.relative_fileloc = relative_fileloc
session.merge(dm)
session.commit()
initial_task_states = {
"test_state_succeeded1": TaskInstanceState.SUCCESS,
"test_state_succeeded2": TaskInstanceState.SUCCESS,
}
dag.relative_fileloc = relative_fileloc
SerializedDagModel.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="dag_maker")
session.commit()
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
dag_run.dag_model = dm
_, callback = dag_run.update_state(execute_callbacks=False)
assert dag_run.state == DagRunState.SUCCESS
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback == DagCallbackRequest(
filepath=dag_run.dag.relative_fileloc,
dag_id="test_dagrun_update_state_with_handle_callback_success",
run_id=dag_run.run_id,
is_failure_callback=False,
bundle_name="dag_maker",
bundle_version=None,
context_from_server=DagRunContext(
dag_run=dag_run,
last_ti=dag_run.get_last_ti(dag, session),
),
msg="success",
)
def test_dagrun_update_state_with_handle_callback_failure(self, testing_dag_bundle, dag_maker, session):
def on_failure_callable(context):
assert context["dag_run"].dag_id == "test_dagrun_update_state_with_handle_callback_failure"
relative_fileloc = "test_dagrun_update_state_with_handle_callback_failure.py"
with dag_maker(
dag_id="test_dagrun_update_state_with_handle_callback_failure",
schedule=datetime.timedelta(days=1),
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
) as dag:
dag_task1 = EmptyOperator(task_id="test_state_succeeded1")
dag_task2 = EmptyOperator(task_id="test_state_failed2")
dag_task1.set_downstream(dag_task2)
dm = DagModel.get_dagmodel(dag.dag_id, session=session)
dm.relative_fileloc = relative_fileloc
session.merge(dm)
session.commit()
initial_task_states = {
"test_state_succeeded1": TaskInstanceState.SUCCESS,
"test_state_failed2": TaskInstanceState.FAILED,
}
dag.relative_fileloc = relative_fileloc
SerializedDagModel.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name="dag_maker")
session.commit()
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
dag_run.dag_model = dm
_, callback = dag_run.update_state(execute_callbacks=False)
assert dag_run.state == DagRunState.FAILED
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback == DagCallbackRequest(
filepath=dag.relative_fileloc,
dag_id="test_dagrun_update_state_with_handle_callback_failure",
run_id=dag_run.run_id,
is_failure_callback=True,
msg="task_failure",
bundle_name="dag_maker",
bundle_version=None,
context_from_server=DagRunContext(
dag_run=dag_run,
last_ti=dag_run.get_last_ti(dag, session),
),
)
def test_dagrun_set_state_end_date(self, dag_maker, session):
with dag_maker(schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE):
pass
dr = dag_maker.create_dagrun()
# Initial end_date should be NULL
# DagRunState.SUCCESS and DagRunState.FAILED are all ending state and should set end_date
# DagRunState.RUNNING set end_date back to NULL
session.add(dr)
session.commit()
assert dr.end_date is None
dr.set_state(DagRunState.SUCCESS)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(DagRun.run_id == dr.run_id).one()
assert dr_database.end_date is not None
assert dr.end_date == dr_database.end_date
dr.set_state(DagRunState.RUNNING)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(DagRun.run_id == dr.run_id).one()
assert dr_database.end_date is None
dr.set_state(DagRunState.FAILED)
session.merge(dr)
session.commit()
dr_database = session.query(DagRun).filter(DagRun.run_id == dr.run_id).one()
assert dr_database.end_date is not None
assert dr.end_date == dr_database.end_date
def test_dagrun_update_state_end_date(self, dag_maker, session):
# A -> B
with dag_maker(schedule=datetime.timedelta(days=1)):
op1 = EmptyOperator(task_id="A")
op2 = EmptyOperator(task_id="B")
op1.set_upstream(op2)
dr = dag_maker.create_dagrun()
# Initial end_date should be NULL
# DagRunState.SUCCESS and DagRunState.FAILED are all ending state and should set end_date
# DagRunState.RUNNING set end_date back to NULL
session.merge(dr)
session.commit()
assert dr.end_date is None
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=TaskInstanceState.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=TaskInstanceState.SUCCESS, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(DagRun.run_id == dr.run_id).one()
assert dr_database.end_date is not None
assert dr.end_date == dr_database.end_date
ti_op1.set_state(state=TaskInstanceState.RUNNING, session=session)
ti_op2.set_state(state=TaskInstanceState.RUNNING, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(DagRun.run_id == dr.run_id).one()
assert dr._state == DagRunState.RUNNING
assert dr.end_date is None
assert dr_database.end_date is None
ti_op1.set_state(state=TaskInstanceState.FAILED, session=session)
ti_op2.set_state(state=TaskInstanceState.FAILED, session=session)
dr.update_state()
dr_database = session.query(DagRun).filter(DagRun.run_id == dr.run_id).one()
assert dr_database.end_date is not None
assert dr.end_date == dr_database.end_date
def test_get_task_instance_on_empty_dagrun(self, dag_maker, session):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
with dag_maker(
dag_id="test_get_task_instance_on_empty_dagrun",
schedule=datetime.timedelta(days=1),
start_date=timezone.datetime(2017, 1, 1),
) as dag:
ShortCircuitOperator(task_id="test_short_circuit_false", python_callable=lambda: False)
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = DagRun(
dag_id=dag.dag_id,
run_id="test_get_task_instance_on_empty_dagrun",
run_type=DagRunType.MANUAL,
logical_date=now,
start_date=now,
state=DagRunState.RUNNING,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance("test_short_circuit_false")
assert ti is None
def test_get_latest_runs(self, dag_maker, session):
with dag_maker(
dag_id="test_latest_runs_1", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE
) as dag:
...
self.create_dag_run(dag, logical_date=timezone.datetime(2015, 1, 1), session=session)
self.create_dag_run(dag, logical_date=timezone.datetime(2015, 1, 2), session=session)
dagruns = DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == "test_latest_runs_1":
assert dagrun.logical_date == timezone.datetime(2015, 1, 2)
def test_removed_task_instances_can_be_restored(self, dag_maker, session):
def create_dag():
return dag_maker(
dag_id="test_task_restoration",
schedule=datetime.timedelta(days=1),
start_date=DEFAULT_DATE,
)
with create_dag() as dag:
EmptyOperator(task_id="flaky_task", owner="test")
dagrun = self.create_dag_run(dag, session=session)
flaky_ti = dagrun.get_task_instances()[0]
assert flaky_ti.task_id == "flaky_task"
assert flaky_ti.state is None
with create_dag() as dag:
pass
dagrun.dag = dag
dag_version_id = DagVersion.get_latest_version(dag.dag_id, session=session).id
dagrun.verify_integrity(dag_version_id=dag_version_id)
flaky_ti.refresh_from_db()
assert flaky_ti.state is None
with create_dag() as dag:
EmptyOperator(task_id="flaky_task", owner="test")
dagrun.verify_integrity(dag_version_id=dag_version_id)
flaky_ti.refresh_from_db()
assert flaky_ti.state is None
def test_already_added_task_instances_can_be_ignored(self, dag_maker, session):
with dag_maker("triggered_dag", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE) as dag:
...
dag.add_task(EmptyOperator(task_id="first_task", owner="test"))
dagrun = self.create_dag_run(dag, session=session)
first_ti = dagrun.get_task_instances()[0]
assert first_ti.task_id == "first_task"
assert first_ti.state is None
# Lets assume that the above TI was added into DB by webserver, but if scheduler
# is running the same method at the same time it would find 0 TIs for this dag
# and proceeds further to create TIs. Hence mocking DagRun.get_task_instances
# method to return an empty list of TIs.
with mock.patch.object(DagRun, "get_task_instances") as mock_gtis:
mock_gtis.return_value = []
dagrun.verify_integrity(
dag_version_id=DagVersion.get_latest_version(dag.dag_id, session=session).id
)
first_ti.refresh_from_db()
assert first_ti.state is None
@pytest.mark.parametrize("state", State.task_states)
@mock.patch.object(settings, "task_instance_mutation_hook", autospec=True)
def test_task_instance_mutation_hook(self, mock_hook, dag_maker, session, state):
def mutate_task_instance(task_instance):
if task_instance.queue == "queue1":
task_instance.queue = "queue2"
else:
task_instance.queue = "queue1"
mock_hook.side_effect = mutate_task_instance
with dag_maker(
"test_task_instance_mutation_hook",
schedule=datetime.timedelta(days=1),
start_date=DEFAULT_DATE,
) as dag:
EmptyOperator(task_id="task_to_mutate", owner="test", queue="queue1")
dagrun = self.create_dag_run(dag, session=session)
task = dagrun.get_task_instances()[0]
task.state = state
session.merge(task)
session.commit()
assert task.queue == "queue2"
dagrun.verify_integrity(dag_version_id=DagVersion.get_latest_version(dag.dag_id, session=session).id)
task = dagrun.get_task_instances()[0]
assert task.queue == "queue1"
@pytest.mark.parametrize(
("prev_ti_state", "is_ti_schedulable"),
[
(TaskInstanceState.SUCCESS, True),
(TaskInstanceState.SKIPPED, True),
(TaskInstanceState.RUNNING, False),
(TaskInstanceState.FAILED, False),
(None, False),
],
)
def test_depends_on_past(self, dag_maker, session, prev_ti_state, is_ti_schedulable):
# DAG tests depends_on_past dependencies
with dag_maker(
dag_id="test_depends_on_past", schedule=datetime.timedelta(days=1), session=session
) as dag:
BaseOperator(
task_id="test_dop_task",
depends_on_past=True,
)
task = dag.tasks[0]
dag_run_1: DagRun = dag_maker.create_dagrun(
logical_date=timezone.datetime(2016, 1, 1, 0, 0, 0),
run_type=DagRunType.SCHEDULED,
)
dag_run_2: DagRun = dag_maker.create_dagrun(
logical_date=timezone.datetime(2016, 1, 2, 0, 0, 0),
run_type=DagRunType.SCHEDULED,
)
prev_ti = TI(task, run_id=dag_run_1.run_id, dag_version_id=dag_run_1.created_dag_version_id)
prev_ti.refresh_from_db(session=session)
prev_ti.set_state(prev_ti_state, session=session)
session.flush()
ti = TI(task, run_id=dag_run_2.run_id, dag_version_id=dag_run_1.created_dag_version_id)
ti.refresh_from_db(session=session)
decision = dag_run_2.task_instance_scheduling_decisions(session=session)
schedulable_tis = [ti.task_id for ti in decision.schedulable_tis]
assert ("test_dop_task" in schedulable_tis) == is_ti_schedulable
@pytest.mark.parametrize(
("prev_ti_state", "is_ti_schedulable"),
[
(TaskInstanceState.SUCCESS, True),
(TaskInstanceState.SKIPPED, True),
(TaskInstanceState.RUNNING, False),
(TaskInstanceState.FAILED, False),
(None, False),
],
)
def test_wait_for_downstream(self, dag_maker, session, prev_ti_state, is_ti_schedulable):
dag_id = "test_wait_for_downstream"
with dag_maker(dag_id=dag_id, session=session, serialized=True) as dag:
dag_wfd_upstream = EmptyOperator(
task_id="upstream_task",
wait_for_downstream=True,
)
dag_wfd_downstream = EmptyOperator(task_id="downstream_task")
dag_wfd_upstream >> dag_wfd_downstream
upstream, downstream = dag.tasks
# For ti.set_state() to work, the DagRun has to exist,
# Otherwise ti.previous_ti returns an unpersisted TI
dag_run_1: DagRun = dag_maker.create_dagrun(
logical_date=timezone.datetime(2016, 1, 1, 0, 0, 0),
run_type=DagRunType.SCHEDULED,
)
dag_run_2: DagRun = dag_maker.create_dagrun(
logical_date=timezone.datetime(2016, 1, 2, 0, 0, 0),
run_type=DagRunType.SCHEDULED,
)
ti = dag_run_2.get_task_instance(task_id=upstream.task_id, session=session)
# Operate on serialized operator since it is Scheduler code
ti.task = dag.task_dict[ti.task_id]
prev_ti_downstream = dag_run_1.get_task_instance(task_id=downstream.task_id, session=session)
prev_ti_upstream = ti.get_previous_ti(session=session)
assert ti
assert prev_ti_upstream
assert prev_ti_downstream
prev_ti_upstream.state = TaskInstanceState.SUCCESS
prev_ti_downstream.state = prev_ti_state
session.flush()
decision = dag_run_2.task_instance_scheduling_decisions(session=session)
schedulable_tis = [ti.task_id for ti in decision.schedulable_tis]
assert (upstream.task_id in schedulable_tis) == is_ti_schedulable
@pytest.mark.parametrize("state", [DagRunState.QUEUED, DagRunState.RUNNING])
def test_next_dagruns_to_examine_only_unpaused(self, session, state, testing_dag_bundle):
"""
Check that "next_dagruns_to_examine" ignores runs from paused/inactive DAGs
and gets running/queued dagruns
"""
dag = DAG(dag_id="test_dags", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE)
EmptyOperator(task_id="dummy", dag=dag, owner="airflow")
orm_dag = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
has_task_concurrency_limits=False,
next_dagrun=DEFAULT_DATE,
next_dagrun_create_after=DEFAULT_DATE + datetime.timedelta(days=1),
is_stale=False,
)
session.add(orm_dag)
session.flush()
scheduler_dag = sync_dag_to_db(dag, session=session)
dr = scheduler_dag.create_dagrun(
run_id=scheduler_dag.timetable.generate_run_id(
run_type=DagRunType.SCHEDULED,
run_after=DEFAULT_DATE,
data_interval=infer_automated_data_interval(scheduler_dag.timetable, DEFAULT_DATE),
),
run_type=DagRunType.SCHEDULED,
state=state,
logical_date=DEFAULT_DATE,
data_interval=infer_automated_data_interval(scheduler_dag.timetable, DEFAULT_DATE),
run_after=DEFAULT_DATE,
start_date=DEFAULT_DATE if state == DagRunState.RUNNING else None,
session=session,
triggered_by=DagRunTriggeredByType.TEST,
)
if state == DagRunState.RUNNING:
func = DagRun.get_running_dag_runs_to_examine
else:
func = DagRun.get_queued_dag_runs_to_set_running
runs = func(session).all()
assert runs == [dr]
orm_dag.is_paused = True
session.merge(orm_dag)
session.commit()
runs = func(session).all()
assert runs == []
@mock.patch.object(Stats, "timing")
def test_no_scheduling_delay_for_nonscheduled_runs(self, stats_mock, session, testing_dag_bundle):
"""
Tests that dag scheduling delay stat is not called if the dagrun is not a scheduled run.
This case is manual run. Simple test for coherence check.
"""
dag = DAG(dag_id="test_dagrun_stats", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE)
dag_task = EmptyOperator(task_id="dummy", dag=dag)
# Create DagModel directly with bundle_name
dag_model = DagModel(
dag_id=dag.dag_id,
bundle_name="testing",
)
session.merge(dag_model)
session.flush()
scheduler_dag = sync_dag_to_db(dag, session=session)
initial_task_states = {dag_task.task_id: TaskInstanceState.SUCCESS}
dag_run = self.create_dag_run(scheduler_dag, task_states=initial_task_states, session=session)
dag_run.update_state(session=session)
assert call(f"dagrun.{dag.dag_id}.first_task_scheduling_delay") not in stats_mock.mock_calls
@pytest.mark.parametrize(
("schedule", "expected"),
[
("*/5 * * * *", True),
(None, False),
("@once", False),
],
)
def test_emit_scheduling_delay(self, session, schedule, expected, testing_dag_bundle):
"""
Tests that dag scheduling delay stat is set properly once running scheduled dag.
dag_run.update_state() invokes the _emit_true_scheduling_delay_stats_for_finished_state method.
"""
dag = DAG(dag_id="test_emit_dag_stats", start_date=DEFAULT_DATE, schedule=schedule)
dag_task = EmptyOperator(task_id="dummy", dag=dag, owner="airflow")
expected_stat_tags = {"dag_id": f"{dag.dag_id}", "run_type": DagRunType.SCHEDULED}
scheduler_dag = sync_dag_to_db(dag, session=session)
try:
info = scheduler_dag.next_dagrun_info(None)
orm_dag_kwargs = {
"dag_id": dag.dag_id,
"bundle_name": "testing",
"has_task_concurrency_limits": False,
"is_stale": False,
}
if info is not None:
orm_dag_kwargs.update(
{
"next_dagrun": info.logical_date,
"next_dagrun_data_interval": info.data_interval,
"next_dagrun_create_after": info.run_after,
},
)
orm_dag = DagModel(**orm_dag_kwargs)
session.merge(orm_dag)
session.flush()
dag_run = scheduler_dag.create_dagrun(
run_id=scheduler_dag.timetable.generate_run_id(
run_type=DagRunType.SCHEDULED,
run_after=dag.start_date,
data_interval=infer_automated_data_interval(scheduler_dag.timetable, dag.start_date),
),
run_type=DagRunType.SCHEDULED,
state=DagRunState.SUCCESS,
logical_date=dag.start_date,
data_interval=infer_automated_data_interval(scheduler_dag.timetable, dag.start_date),
run_after=dag.start_date,
start_date=dag.start_date,
triggered_by=DagRunTriggeredByType.TEST,
session=session,
)
ti = dag_run.get_task_instance(dag_task.task_id, session)
ti.set_state(TaskInstanceState.SUCCESS, session)
session.flush()
with mock.patch.object(Stats, "timing") as stats_mock:
dag_run.update_state(session)
metric_name = f"dagrun.{dag.dag_id}.first_task_scheduling_delay"
if expected:
true_delay = ti.start_date - dag_run.data_interval_end
sched_delay_stat_call = call(metric_name, true_delay, tags=expected_stat_tags)
sched_delay_stat_call_with_tags = call(
"dagrun.first_task_scheduling_delay", true_delay, tags=expected_stat_tags
)
assert sched_delay_stat_call in stats_mock.mock_calls
assert sched_delay_stat_call_with_tags in stats_mock.mock_calls
else:
# Assert that we never passed the metric
sched_delay_stat_call = call(
metric_name,
mock.ANY,
)
assert sched_delay_stat_call not in stats_mock.mock_calls
finally:
# Don't write anything to the DB
session.rollback()
session.close()
def test_states_sets(self, dag_maker, session):
"""
Tests that adding State.failed_states and State.success_states work as expected.
"""
with dag_maker(
dag_id="test_dagrun_states", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE
) as dag:
dag_task_success = EmptyOperator(task_id="dummy")
dag_task_failed = EmptyOperator(task_id="dummy2")
initial_task_states = {
dag_task_success.task_id: TaskInstanceState.SUCCESS,
dag_task_failed.task_id: TaskInstanceState.FAILED,
}
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
ti_success = dag_run.get_task_instance(dag_task_success.task_id)
ti_failed = dag_run.get_task_instance(dag_task_failed.task_id)
assert ti_success.state in State.success_states
assert ti_failed.state in State.failed_states
def test_update_state_one_unfinished(self, dag_maker, session):
"""
Previously this lived in test_scheduler_job.py
It only really tested the behavior of DagRun.update_state.
As far as I can tell, it checks that if you null out the state on a TI of a finished dag,
and then you call ``update_state``, then the DR will be set to running.
"""
with dag_maker(session=session) as dag:
PythonOperator(task_id="t1", python_callable=lambda: print)
PythonOperator(task_id="t2", python_callable=lambda: print)
dr = dag_maker.create_dagrun(state=DagRunState.FAILED)
for ti in dr.get_task_instances(session=session):
ti.state = TaskInstanceState.FAILED
session.commit()
session.expunge_all()
dr = session.get(DagRun, dr.id)
assert dr.state == DagRunState.FAILED
ti = dr.get_task_instance("t1", session=session)
ti.state = State.NONE
session.commit()
dr = session.get(DagRun, dr.id)
assert dr.state == DagRunState.FAILED
dr.dag = dag
dr.update_state(session=session)
session.commit()
dr = session.get(DagRun, dr.id)
assert dr.state == State.RUNNING
def test_dag_run_dag_versions_method(self, dag_maker, session):
with dag_maker(
"test_dag_run_dag_versions", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE
):
EmptyOperator(task_id="empty")
dag_run = dag_maker.create_dagrun()
dm = session.query(DagModel).options(joinedload(DagModel.dag_versions)).one()
assert dag_run.dag_versions[0].id == dm.dag_versions[0].id
def test_dag_run_version_number(self, dag_maker, session):
with dag_maker(
"test_dag_run_version_number", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE
):
EmptyOperator(task_id="empty") >> EmptyOperator(task_id="empty2")
dag_run = dag_maker.create_dagrun()
tis = dag_run.task_instances
tis[0].set_state(TaskInstanceState.SUCCESS)
dag_v = DagVersion.write_dag(dag_id=dag_run.dag_id, bundle_name="testing", version_number=2)
tis[1].dag_version = dag_v
session.merge(tis[1])
session.flush()
dag_run = session.query(DagRun).filter(DagRun.run_id == dag_run.run_id).one()
# Check that dag_run.version_number returns the version number of
# the latest task instance dag_version
assert dag_run.version_number == dag_v.version_number
def test_dag_run_dag_versions_with_null_created_dag_version(self, dag_maker, session):
"""Test that dag_versions returns empty list when created_dag_version is None and bundle_version is populated."""
with dag_maker(
"test_dag_run_null_created_dag_version",
schedule=datetime.timedelta(days=1),
start_date=DEFAULT_DATE,
):
EmptyOperator(task_id="empty")
dag_run = dag_maker.create_dagrun()
dag_run.bundle_version = "some_bundle_version"
dag_run.created_dag_version_id = None
dag_run.created_dag_version = None
session.merge(dag_run)
session.flush()
# This should return empty list, not [None]
assert dag_run.dag_versions == []
assert isinstance(dag_run.dag_versions, list)
assert len(dag_run.dag_versions) == 0
def test_dagrun_success_deadline(self, dag_maker, session):
def on_success_callable(context):
assert context["dag_run"].dag_id == "test_dagrun_success_callback"
future_date = datetime.datetime.now() + datetime.timedelta(days=365)
with dag_maker(
dag_id="test_dagrun_success_callback",
schedule=datetime.timedelta(days=1),
on_success_callback=on_success_callable,
deadline=DeadlineAlert(
reference=DeadlineReference.FIXED_DATETIME(future_date),
interval=datetime.timedelta(hours=1),
callback=AsyncCallback(empty_callback_for_deadline),
),
) as dag:
dag_task1 = EmptyOperator(task_id="test_state_succeeded1")
dag_task2 = EmptyOperator(task_id="test_state_succeeded2")
dag_task1.set_downstream(dag_task2)
initial_task_states = {
"test_state_succeeded1": TaskInstanceState.SUCCESS,
"test_state_succeeded2": TaskInstanceState.SUCCESS,
}
# Scheduler uses Serialized DAG -- so use that instead of the Actual DAG.
dag_run = self.create_dag_run(dag=dag, task_states=initial_task_states, session=session)
dag_run = session.merge(dag_run)
dag_run.dag = dag
with mock.patch.object(dag_run, "handle_dag_callback") as handle_dag_callback:
_, callback = dag_run.update_state()
assert handle_dag_callback.mock_calls == [mock.call(dag=dag, success=True, reason="success")]
assert dag_run.state == DagRunState.SUCCESS
# Callbacks are not added until handle_callback = False is passed to dag_run.update_state()
assert callback is None
@pytest.mark.parametrize(
("run_type", "expected_tis"),
[
pytest.param(DagRunType.MANUAL, 1, id="manual"),
pytest.param(DagRunType.BACKFILL_JOB, 3, id="backfill"),
],
)
@mock.patch.object(Stats, "incr")
def test_verify_integrity_task_start_and_end_date(Stats_incr, dag_maker, session, run_type, expected_tis):
"""Test that tasks with specific dates are only created for backfill runs"""
with dag_maker("test", schedule=datetime.timedelta(days=1), start_date=DEFAULT_DATE) as dag:
EmptyOperator(task_id="without")
EmptyOperator(task_id="with_start_date", start_date=DEFAULT_DATE + datetime.timedelta(1))
EmptyOperator(task_id="with_end_date", end_date=DEFAULT_DATE - datetime.timedelta(1))
dag_run = DagRun(
dag_id=dag.dag_id,
run_type=run_type,
logical_date=DEFAULT_DATE,
run_id=DagRun.generate_run_id(run_type=run_type, logical_date=DEFAULT_DATE, run_after=DEFAULT_DATE),
)
dag_run.dag = dag
session.add(dag_run)
session.flush()
dag_version_id = DagVersion.get_latest_version(dag.dag_id, session=session).id
dag_run.verify_integrity(dag_version_id=dag_version_id, session=session)
tis = dag_run.task_instances
assert len(tis) == expected_tis
Stats_incr.assert_any_call(
"task_instance_created_EmptyOperator", expected_tis, tags={"dag_id": "test", "run_type": run_type}
)
Stats_incr.assert_any_call(
"task_instance_created",
expected_tis,
tags={"dag_id": "test", "run_type": run_type, "task_type": "EmptyOperator"},
)
@pytest.mark.parametrize("is_noop", [True, False])
def test_expand_mapped_task_instance_at_create(is_noop, dag_maker, session):
with mock.patch("airflow.settings.task_instance_mutation_hook") as mock_mut:
mock_mut.is_noop = is_noop
literal = [1, 2, 3, 4]
with dag_maker(session=session, dag_id="test_dag"):
mapped = MockOperator.partial(task_id="task_2").expand(arg2=literal)
dr = dag_maker.create_dagrun()
indices = (
session.query(TI.map_index)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
.all()
)
assert indices == [(0,), (1,), (2,), (3,)]
@pytest.mark.parametrize("is_noop", [True, False])
def test_expand_mapped_task_instance_task_decorator(is_noop, dag_maker, session):
with mock.patch("airflow.settings.task_instance_mutation_hook") as mock_mut:
mock_mut.is_noop = is_noop
@task
def mynameis(arg):
print(arg)
literal = [1, 2, 3, 4]
with dag_maker(session=session, dag_id="test_dag"):
mynameis.expand(arg=literal)
dr = dag_maker.create_dagrun()
indices = (
session.query(TI.map_index)
.filter_by(task_id="mynameis", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
.all()
)
assert indices == [(0,), (1,), (2,), (3,)]
def test_mapped_literal_verify_integrity(dag_maker, session):
"""Test that when the length of a mapped literal changes we remove extra TIs"""
@task
def task_2(arg2): ...
with dag_maker(session=session):
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
query = (
select(TI.map_index, TI.state)
.filter_by(task_id="task_2", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
)
indices = session.execute(query).all()
assert indices == [(0, None), (1, None), (2, None), (3, None)]
# Now "change" the DAG and we should see verify_integrity REMOVE some TIs
with dag_maker(session=session):
task_2.expand(arg2=[1, 2])
# Update it to use the new serialized DAG
dr.dag = dag_maker.dag
dag_version_id = DagVersion.get_latest_version(dag_id=dr.dag_id, session=session).id
dr.verify_integrity(dag_version_id=dag_version_id, session=session)
indices = session.execute(query).all()
assert indices == [(0, None), (1, None), (2, TaskInstanceState.REMOVED), (3, TaskInstanceState.REMOVED)]
def test_mapped_literal_to_xcom_arg_verify_integrity(dag_maker, session):
"""Test that when we change from literal to a XComArg the TIs are removed"""
@task
def task_2(arg2): ...
with dag_maker(session=session):
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
with dag_maker(session=session):
t1 = BaseOperator(task_id="task_1")
task_2.expand(arg2=t1.output)
dr.dag = dag_maker.dag
dag_version_id = DagVersion.get_latest_version(dag_id=dr.dag_id, session=session).id
dr.verify_integrity(dag_version_id=dag_version_id, session=session)
indices = (
session.query(TI.map_index, TI.state)
.filter_by(task_id="task_2", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
.all()
)
assert indices == [
(0, TaskInstanceState.REMOVED),
(1, TaskInstanceState.REMOVED),
(2, TaskInstanceState.REMOVED),
(3, TaskInstanceState.REMOVED),
]
def test_mapped_literal_length_increase_adds_additional_ti(dag_maker, session):
"""Test that when the length of mapped literal increases, additional ti is added"""
@task
def task_2(arg2): ...
with dag_maker(session=session, serialized=True):
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
query = (
select(TI.map_index, TI.state)
.filter_by(task_id="task_2", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
)
indices = session.execute(query).all()
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
]
# Now "increase" the length of literal
with dag_maker(session=session, serialized=True) as dag:
task_2.expand(arg2=[1, 2, 3, 4, 5])
dr.dag = dag
# Every mapped task is revised at task_instance_scheduling_decision
dr.task_instance_scheduling_decisions()
indices = session.execute(query).all()
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
(4, State.NONE),
]
def test_mapped_literal_length_reduction_adds_removed_state(dag_maker, session):
"""Test that when the length of mapped literal reduces, removed state is added"""
@task
def task_2(arg2): ...
with dag_maker(session=session):
task_2.expand(arg2=[1, 2, 3, 4])
dr = dag_maker.create_dagrun()
query = (
select(TI.map_index, TI.state)
.filter_by(task_id="task_2", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
)
indices = session.execute(query).all()
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.NONE),
(3, State.NONE),
]
with dag_maker(session=session):
task_2.expand(arg2=[1, 2])
dr.dag = dag_maker.dag
# Since we change the literal on the dag file itself, the dag_hash will
# change which will have the scheduler verify the dr integrity
dag_version_id = DagVersion.get_latest_version(dag_id=dr.dag_id, session=session).id
dr.verify_integrity(dag_version_id=dag_version_id, session=session)
indices = session.execute(query).all()
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, State.REMOVED),
(3, State.REMOVED),
]
def test_mapped_length_increase_at_runtime_adds_additional_tis(dag_maker, session):
"""Test that when the length of mapped literal increases at runtime, additional ti is added"""
# Variable.set(key="arg1", value=[1, 2, 3])
@task
def task_1():
# Behave as if we did this
# return Variable.get("arg1", deserialize_json=True)
...
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr: DagRun = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1", session=session)
assert ti
ti.state = TaskInstanceState.SUCCESS
# Behave as if TI ran after: Variable.set(key="arg1", value=[1, 2, 3])
session.add(TaskMap.from_task_instance_xcom(ti, [1, 2, 3]))
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
indices = [(ti.task_id, ti.map_index) for ti in decision.schedulable_tis]
assert indices == [("task_2", 0), ("task_2", 1), ("task_2", 2)]
# Now "clear" and "increase" the length of literal
dag.clear()
# "Run" the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1", session=session)
assert ti
# Behave as if we did and re-ran the task: Variable.set(key="arg1", value=[1, 2, 3, 4])
session.merge(TaskMap.from_task_instance_xcom(ti, [1, 2, 3, 4]))
ti.state = TaskInstanceState.SUCCESS
session.flush()
# this would be called by the localtask job
decision = dr.task_instance_scheduling_decisions(session=session)
indices = [(ti.task_id, ti.state, ti.map_index) for ti in decision.schedulable_tis]
assert sorted(indices) == [
("task_2", None, 0),
("task_2", None, 1),
("task_2", None, 2),
("task_2", None, 3),
]
def test_mapped_literal_length_reduction_at_runtime_adds_removed_state(dag_maker, session):
"""
Test that when the length of mapped literal reduces at runtime, the missing task instances
are marked as removed
"""
@task
def task_1():
# return Variable.get("arg1", deserialize_json=True)
...
with dag_maker(session=session) as dag:
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr: DagRun = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1", session=session)
assert ti
ti.state = TaskInstanceState.SUCCESS
# Behave as if TI ran after: Variable.set(key="arg1", value=[1, 2, 3])
session.add(TaskMap.from_task_instance_xcom(ti, [1, 2, 3]))
session.flush()
dr.task_instance_scheduling_decisions(session=session)
query = (
select(TI.map_index, TI.state)
.filter_by(task_id="task_2", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
)
indices = session.execute(query).all()
assert indices == [(0, None), (1, None), (2, None)]
# Now "clear" and "reduce" the length of literal
dag.clear()
# "Run" the first task again to get the new lengths
ti = dr.get_task_instance(task_id="task_1", session=session)
assert ti
# Behave as if we did and re-ran the task: Variable.set(key="arg1", value=[1, 2])
session.merge(TaskMap.from_task_instance_xcom(ti, [1, 2]))
ti.state = TaskInstanceState.SUCCESS
session.flush()
dag_version_id = DagVersion.get_latest_version(dag.dag_id, session=session).id
dr.verify_integrity(dag_version_id=dag_version_id, session=session)
indices = session.execute(query).all()
assert sorted(indices) == [
(0, State.NONE),
(1, State.NONE),
(2, TaskInstanceState.REMOVED),
]
def test_mapped_literal_faulty_state_in_db(dag_maker, session):
"""
This test tries to recreate a faulty state in the database and checks if we can recover from it.
The state that happens is that there exists mapped task instances and the unmapped task instance.
So we have instances with map_index [-1, 0, 1]. The -1 task instances should be removed in this case.
"""
with dag_maker(session=session) as dag:
@task
def task_1():
return [1, 2]
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1")
ti.run()
decision = dr.task_instance_scheduling_decisions()
assert len(decision.schedulable_tis) == 2
# We insert a faulty record
session.add(TaskInstance(task=dag.get_task("task_2"), run_id=dr.run_id, dag_version_id=ti.dag_version_id))
session.flush()
decision = dr.task_instance_scheduling_decisions()
assert len(decision.schedulable_tis) == 2
def test_calls_to_verify_integrity_with_mapped_task_zero_length_at_runtime(dag_maker, session, caplog):
"""
Test zero length reduction in mapped task at runtime with calls to dagrun.verify_integrity
"""
import logging
with dag_maker(session=session) as dag:
@task
def task_1():
# return Variable.get("arg1", deserialize_json=True)
...
@task
def task_2(arg2): ...
task_2.expand(arg2=task_1())
dr: DagRun = dag_maker.create_dagrun()
ti = dr.get_task_instance(task_id="task_1", session=session)
assert ti
# "Run" task_1
ti.state = TaskInstanceState.SUCCESS
# Behave as if TI ran after: Variable.set(key="arg1", value=[1, 2, 3])
session.add(TaskMap.from_task_instance_xcom(ti, [1, 2, 3]))
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
ti_2 = decision.schedulable_tis[0]
assert ti_2
query = (
select(TI.map_index, TI.state)
.filter_by(task_id="task_2", dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
)
indices = session.execute(query).all()
assert sorted(indices) == [(0, State.NONE), (1, State.NONE), (2, State.NONE)]
# Now "clear" and "reduce" the length to empty list
dag.clear()
# We don't execute task anymore, but this is what we are
# simulating happened:
# Variable.set(key="arg1", value=[])
session.merge(TaskMap.from_task_instance_xcom(ti, []))
session.flush()
# Run the first task again to get the new lengths
with caplog.at_level(logging.DEBUG):
# Run verify_integrity as a whole and assert the tasks were removed
dag_version = DagVersion.get_latest_version(dag.dag_id)
dr.verify_integrity(dag_version_id=dag_version.id, session=session)
indices = session.execute(query).all()
assert indices == [
(0, TaskInstanceState.REMOVED),
(1, TaskInstanceState.REMOVED),
(2, TaskInstanceState.REMOVED),
]
def test_mapped_mixed_literal_not_expanded_at_create(dag_maker, session):
literal = [1, 2, 3, 4]
with dag_maker(session=session):
task = BaseOperator(task_id="task_1")
mapped = MockOperator.partial(task_id="task_2").expand(arg1=literal, arg2=task.output)
dr = dag_maker.create_dagrun()
query = (
session.query(TI.map_index, TI.state)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
)
assert query.all() == [(-1, None)]
# Verify_integrity shouldn't change the result now that the TIs exist
dag_version_id = DagVersion.get_latest_version(dag_id=dr.dag_id, session=session).id
dr.verify_integrity(dag_version_id=dag_version_id, session=session)
assert query.all() == [(-1, None)]
def test_mapped_task_group_expands_at_create(dag_maker, session):
literal = [[1, 2], [3, 4]]
with dag_maker(session=session):
@task_group
def tg(x):
# Normal operator in mapped task group, expands to 2 tis.
MockOperator(task_id="t1")
# Mapped operator expands *again* against mapped task group arguments to 4 tis.
with pytest.raises(NotImplementedError) as ctx:
MockOperator.partial(task_id="t2").expand(arg1=literal)
assert str(ctx.value) == "operator expansion in an expanded task group is not yet supported"
# Normal operator referencing mapped task group arguments does not further expand, only 2 tis.
MockOperator(task_id="t3", arg1=x)
# It can expand *again* (since each item in x is a list) but this is not done at parse time.
with pytest.raises(NotImplementedError) as ctx:
MockOperator.partial(task_id="t4").expand(arg1=x)
assert str(ctx.value) == "operator expansion in an expanded task group is not yet supported"
tg.expand(x=literal)
dr = dag_maker.create_dagrun()
query = (
session.query(TI.task_id, TI.map_index, TI.state)
.filter_by(dag_id=dr.dag_id, run_id=dr.run_id)
.order_by(TI.task_id, TI.map_index)
)
assert query.all() == [
("tg.t1", 0, None),
("tg.t1", 1, None),
# ("tg.t2", 0, None),
# ("tg.t2", 1, None),
# ("tg.t2", 2, None),
# ("tg.t2", 3, None),
("tg.t3", 0, None),
("tg.t3", 1, None),
# ("tg.t4", -1, None),
]
def test_mapped_task_group_empty_operator(dag_maker, session):
"""
Test that dynamic task inside a dynamic task group only marks
the corresponding downstream EmptyOperator as success.
"""
literal = [1, 2, 3]
with dag_maker(session=session) as dag:
@task_group
def tg(x):
@task
def t1(x):
return x
t2 = EmptyOperator(task_id="t2")
@task
def t3(x):
return x
t1(x) >> t2 >> t3(x)
tg.expand(x=literal)
dr = dag_maker.create_dagrun()
t2_task = dag.get_task("tg.t2")
t2_0 = dr.get_task_instance(task_id="tg.t2", map_index=0)
t2_0.refresh_from_task(t2_task)
assert t2_0.state is None
t2_1 = dr.get_task_instance(task_id="tg.t2", map_index=1)
t2_1.refresh_from_task(t2_task)
assert t2_1.state is None
dr.schedule_tis([t2_0])
t2_0 = dr.get_task_instance(task_id="tg.t2", map_index=0)
assert t2_0.state == TaskInstanceState.SUCCESS
t2_1 = dr.get_task_instance(task_id="tg.t2", map_index=1)
assert t2_1.state is None
def test_ti_scheduling_mapped_zero_length(dag_maker, session):
with dag_maker(session=session):
task = BaseOperator(task_id="task_1")
mapped = MockOperator.partial(task_id="task_2").expand(arg2=task.output)
dr: DagRun = dag_maker.create_dagrun()
ti1, ti2 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti1.state = TaskInstanceState.SUCCESS
session.add(
TaskMap(dag_id=dr.dag_id, task_id=ti1.task_id, run_id=dr.run_id, map_index=-1, length=0, keys=None)
)
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
# ti1 finished execution. ti2 goes directly to finished state because it's
# expanded against a zero-length XCom.
assert decision.finished_tis == [ti1, ti2]
indices = (
session.query(TI.map_index, TI.state)
.filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)
.order_by(TI.map_index)
.all()
)
assert indices == [(-1, TaskInstanceState.SKIPPED)]
@pytest.mark.parametrize("trigger_rule", [TriggerRule.ALL_DONE, TriggerRule.ALL_SUCCESS])
def test_mapped_task_upstream_failed(dag_maker, session, trigger_rule):
from airflow.providers.standard.operators.python import PythonOperator
with dag_maker(session=session) as dag:
@dag.task
def make_list():
return [f'echo "{a!r}"' for a in [1, 2, {"a": "b"}]]
def consumer(*args):
print(repr(args))
PythonOperator.partial(
task_id="consumer",
trigger_rule=trigger_rule,
python_callable=consumer,
).expand(op_args=make_list())
dr = dag_maker.create_dagrun()
_, make_list_ti = sorted(dr.task_instances, key=lambda ti: ti.task_id)
make_list_ti.state = TaskInstanceState.FAILED
session.flush()
tis, _ = dr.update_state(execute_callbacks=False, session=session)
assert tis == []
tis = sorted(dr.task_instances, key=lambda ti: ti.task_id)
assert sorted((ti.task_id, ti.map_index, ti.state) for ti in tis) == [
("consumer", -1, TaskInstanceState.UPSTREAM_FAILED),
("make_list", -1, TaskInstanceState.FAILED),
]
# Bug/possible source of optimization: The DR isn't marked as failed until
# in the loop that marks the last task as UPSTREAM_FAILED
tis, _ = dr.update_state(execute_callbacks=False, session=session)
assert tis == []
assert dr.state == DagRunState.FAILED
def test_mapped_task_all_finish_before_downstream(dag_maker, session):
with dag_maker(session=session) as dag:
@dag.task
def make_list():
return [1, 2]
@dag.task
def double(value):
return value * 2
@dag.task
def consumer(value):
...
# result = list(value)
consumer(value=double.expand(value=make_list()))
dr: DagRun = dag_maker.create_dagrun()
def _task_ids(tis):
return [ti.task_id for ti in tis]
# The first task is always make_list.
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == ["make_list"]
# After make_list is run, double is expanded.
ti = decision.schedulable_tis[0]
ti.state = TaskInstanceState.SUCCESS
session.add(TaskMap.from_task_instance_xcom(ti, [1, 2]))
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == ["double", "double"]
# Running just one of the mapped tis does not make downstream schedulable.
ti = decision.schedulable_tis[0]
ti.state = TaskInstanceState.SUCCESS
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == ["double"]
# Downstream is scheduleable after all mapped tis are run.
ti = decision.schedulable_tis[0]
ti.state = TaskInstanceState.SUCCESS
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == ["consumer"]
def test_schedule_tis_map_index(dag_maker, session):
with dag_maker(session=session, dag_id="test"):
task = BaseOperator(task_id="task_1")
dr = DagRun(dag_id="test", run_id="test", run_type=DagRunType.MANUAL)
dag_version = DagVersion.get_latest_version(dag_id=dr.dag_id)
ti0 = TI(
task=task,
run_id=dr.run_id,
map_index=0,
state=TaskInstanceState.SUCCESS,
dag_version_id=dag_version.id,
)
ti1 = TI(task=task, run_id=dr.run_id, map_index=1, state=None, dag_version_id=dag_version.id)
ti2 = TI(
task=task,
run_id=dr.run_id,
map_index=2,
state=TaskInstanceState.SUCCESS,
dag_version_id=dag_version.id,
)
session.add_all((dr, ti0, ti1, ti2))
session.flush()
assert dr.schedule_tis((ti1,), session=session) == 1
session.refresh(ti0)
session.refresh(ti1)
session.refresh(ti2)
assert ti0.state == TaskInstanceState.SUCCESS
assert ti1.state == TaskInstanceState.SCHEDULED
assert ti2.state == TaskInstanceState.SUCCESS
@pytest.mark.xfail(reason="We can't keep this behaviour with remote workers where scheduler can't reach xcom")
@pytest.mark.need_serialized_dag
def test_schedule_tis_start_trigger(dag_maker, session):
"""
Test that an operator with start_trigger_args set can be directly deferred during scheduling.
"""
class TestOperator(BaseOperator):
start_trigger_args = StartTriggerArgs(
trigger_cls="airflow.triggers.testing.SuccessTrigger",
trigger_kwargs=None,
next_method="execute_complete",
timeout=None,
)
start_from_trigger = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_trigger_args.trigger_kwargs = {}
def execute_complete(self):
pass
with dag_maker(session=session):
TestOperator(task_id="test_task")
dr: DagRun = dag_maker.create_dagrun()
ti = dr.get_task_instance("test_task")
assert ti.state is None
ti.task = dr.dag.get_task("test_task")
dr.schedule_tis((ti,), session=session)
assert ti.state == TaskInstanceState.DEFERRED
def test_schedule_tis_empty_operator_try_number(dag_maker, session: Session):
"""
When empty operator is not actually run, then we need to increment the try_number,
since ordinarily it's incremented when scheduled, but empty operator is generally not scheduled.
"""
with dag_maker(session=session):
BashOperator(task_id="real_task", bash_command="echo 1")
EmptyOperator(task_id="empty_task")
dr: DagRun = dag_maker.create_dagrun(session=session)
session.commit()
tis = dr.task_instances
dr.schedule_tis(tis, session=session)
session.commit()
session.expunge_all()
tis = dr.get_task_instances(session=session)
real_ti = next(x for x in tis if x.task_id == "real_task")
empty_ti = next(x for x in tis if x.task_id == "empty_task")
assert real_ti.try_number == 1
assert empty_ti.try_number == 1
@pytest.mark.xfail(reason="We can't keep this behaviour with remote workers where scheduler can't reach xcom")
def test_schedule_tis_start_trigger_through_expand(dag_maker, session):
"""
Test that an operator with start_trigger_args set can be directly deferred during scheduling.
"""
class TestOperator(BaseOperator):
start_trigger_args = StartTriggerArgs(
trigger_cls="airflow.triggers.testing.SuccessTrigger",
trigger_kwargs={},
next_method="execute_complete",
timeout=None,
)
start_from_trigger = False
def __init__(self, *args, start_from_trigger: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.start_from_trigger = start_from_trigger
def execute_complete(self):
pass
with dag_maker(session=session):
TestOperator.partial(task_id="test_task").expand(start_from_trigger=[True, False])
dr: DagRun = dag_maker.create_dagrun()
dr.schedule_tis(dr.task_instances, session=session)
tis = [(ti.state, ti.map_index) for ti in dr.task_instances]
assert tis[0] == (TaskInstanceState.DEFERRED, 0)
assert tis[1] == (None, 1)
def test_mapped_expand_kwargs(dag_maker):
with dag_maker():
@task
def task_0():
return {"arg1": "a", "arg2": "b"}
@task
def task_1(args_0):
return [args_0, {"arg1": "y"}, {"arg2": "z"}]
args_0 = task_0()
args_list = task_1(args_0=args_0)
MockOperator.partial(task_id="task_2").expand_kwargs(args_list)
MockOperator.partial(task_id="task_3").expand_kwargs(
[{"arg1": "a", "arg2": "b"}, {"arg1": "y"}, {"arg2": "z"}],
)
MockOperator.partial(task_id="task_4").expand_kwargs([args_0, {"arg1": "y"}, {"arg2": "z"}])
dr: DagRun = dag_maker.create_dagrun()
tis = {(ti.task_id, ti.map_index): ti for ti in dr.task_instances}
# task_2 is not expanded yet since it relies on one single XCom input.
# task_3 and task_4 received a pure literal and can expanded right away.
# task_4 relies on an XCom input in the list, but can also be expanded.
assert sorted(map_index for (task_id, map_index) in tis if task_id == "task_2") == [-1]
assert sorted(map_index for (task_id, map_index) in tis if task_id == "task_3") == [0, 1, 2]
assert sorted(map_index for (task_id, map_index) in tis if task_id == "task_4") == [0, 1, 2]
tis[("task_0", -1)].run()
tis[("task_1", -1)].run()
# With the upstreams available, everything should get expanded now.
decision = dr.task_instance_scheduling_decisions()
assert {(ti.task_id, ti.map_index): ti.state for ti in decision.schedulable_tis} == {
("task_2", 0): None,
("task_2", 1): None,
("task_2", 2): None,
("task_3", 0): None,
("task_3", 1): None,
("task_3", 2): None,
("task_4", 0): None,
("task_4", 1): None,
("task_4", 2): None,
}
def test_mapped_skip_upstream_not_deadlock(dag_maker):
with dag_maker() as dag:
@dag.task
def add_one(x: int):
return x + 1
@dag.task
def say_hi():
print("Hi")
added_values = add_one.expand(x=[])
added_more_values = add_one.expand(x=[])
say_hi() >> added_values
added_values >> added_more_values
dr = dag_maker.create_dagrun()
session = dag_maker.session
tis = {ti.task_id: ti for ti in dr.task_instances}
tis["say_hi"].state = TaskInstanceState.SUCCESS
session.flush()
dr.update_state(session=session) # expands the mapped tasks
dr.update_state(session=session) # marks the task as skipped
dr.update_state(session=session) # marks dagrun as success
assert dr.state == DagRunState.SUCCESS
assert tis["add_one__1"].state == TaskInstanceState.SKIPPED
def test_schedulable_task_exist_when_rerun_removed_upstream_mapped_task(session, dag_maker):
from airflow.sdk import task
@task
def do_something(i):
return 1
@task
def do_something_else(i):
return 1
with dag_maker():
nums = do_something.expand(i=[i + 1 for i in range(5)])
do_something_else.expand(i=nums)
dr = dag_maker.create_dagrun()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "do_something_else":
ti.map_index = 0
task = ti.task
for map_index in range(1, 5):
ti_new = TI(task, run_id=dr.run_id, map_index=map_index, dag_version_id=ti.dag_version_id)
session.add(ti_new)
ti_new.dag_run = dr
else:
# run tasks "do_something" to get XCOMs for correct downstream length
ti.run()
session.flush()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "do_something":
if ti.map_index > 2:
ti.state = TaskInstanceState.REMOVED
else:
ti.state = TaskInstanceState.SUCCESS
session.merge(ti)
session.commit()
# The Upstream is done with 2 removed tis and 3 success tis
(tis, _) = dr.update_state()
assert len(tis) == 3
assert dr.state != DagRunState.FAILED
@pytest.mark.parametrize(
("partial_params", "mapped_params", "expected"),
[
pytest.param(None, [{"a": 1}], 1, id="simple"),
pytest.param({"b": 2}, [{"a": 1}], 1, id="merge"),
pytest.param({"b": 2}, [{"a": 1, "b": 3}], 1, id="override"),
],
)
def test_mapped_expand_against_params(dag_maker, partial_params, mapped_params, expected):
with dag_maker():
BaseOperator.partial(task_id="t", params=partial_params).expand(params=mapped_params)
dr: DagRun = dag_maker.create_dagrun()
decision = dr.task_instance_scheduling_decisions()
assert len(decision.schedulable_tis) == expected
def test_mapped_task_group_expands(dag_maker, session):
with dag_maker(session=session):
@task_group
def tg(x, y):
return MockOperator(task_id="task_2", arg1=x, arg2=y)
task_1 = BaseOperator(task_id="task_1")
tg.expand(x=task_1.output, y=[1, 2, 3])
dr: DagRun = dag_maker.create_dagrun()
# Not expanding task_2 yet since it depends on result from task_1.
decision = dr.task_instance_scheduling_decisions(session=session)
assert {(ti.task_id, ti.map_index, ti.state) for ti in decision.tis} == {
("task_1", -1, None),
("tg.task_2", -1, None),
}
# Simulate task_1 execution to produce TaskMap.
(ti_1,) = decision.schedulable_tis
assert ti_1.task_id == "task_1"
ti_1.state = TaskInstanceState.SUCCESS
session.add(TaskMap.from_task_instance_xcom(ti_1, ["a", "b"]))
session.flush()
# Now task_2 in mapped tagk group is expanded.
decision = dr.task_instance_scheduling_decisions(session=session)
assert {(ti.task_id, ti.map_index, ti.state) for ti in decision.schedulable_tis} == {
("tg.task_2", 0, None),
("tg.task_2", 1, None),
("tg.task_2", 2, None),
("tg.task_2", 3, None),
("tg.task_2", 4, None),
("tg.task_2", 5, None),
}
@pytest.mark.parametrize("rerun_length", [0, 1, 2, 3])
def test_mapped_task_rerun_with_different_length_of_args(session, dag_maker, rerun_length):
@task
def generate_mapping_args():
context = get_current_context()
if context["ti"].try_number == 0:
args = [i for i in range(2)]
else:
args = [i for i in range(rerun_length)]
return args
@task
def mapped_print_value(arg):
return arg
with dag_maker(session=session):
args = generate_mapping_args()
mapped_print_value.expand(arg=args)
# First Run
dr = dag_maker.create_dagrun()
dag_maker.run_ti("generate_mapping_args", dr)
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
dag_maker.run_ti(ti.task_id, dr, map_index=ti.map_index)
clear_task_instances(dr.get_task_instances(), session=session)
# Second Run
ti = dr.get_task_instance(task_id="generate_mapping_args", session=session)
ti.try_number += 1
session.merge(ti)
dag_maker.run_ti("generate_mapping_args", dr)
# Check if the new mapped task instances are correctly scheduled
decision = dr.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == rerun_length
assert all([ti.task_id == "mapped_print_value" for ti in decision.schedulable_tis])
# Check if mapped task rerun successfully
for ti in decision.schedulable_tis:
dag_maker.run_ti(ti.task_id, dr, map_index=ti.map_index)
query = select(TI).where(
TI.dag_id == dr.dag_id,
TI.run_id == dr.run_id,
TI.task_id == "mapped_print_value",
TI.state == TaskInstanceState.SUCCESS,
)
success_tis = session.execute(query).all()
assert len(success_tis) == rerun_length
def test_operator_mapped_task_group_receives_value(dag_maker, session):
with dag_maker(session=session):
@task
def t(value): ...
@task_group
def tg(va):
# Each expanded group has one t1 and t2 each.
t1 = t.override(task_id="t1")(va)
t2 = t.override(task_id="t2")(t1)
with pytest.raises(NotImplementedError) as ctx:
t.override(task_id="t4").expand(value=va)
assert str(ctx.value) == "operator expansion in an expanded task group is not yet supported"
return t2
# The group is mapped by 3.
t2 = tg.expand(va=[["a", "b"], [4], ["z"]])
# Aggregates results from task group.
t.override(task_id="t3")(t2)
dr: DagRun = dag_maker.create_dagrun()
results = set()
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
results.add((ti.task_id, ti.map_index))
ti.state = TaskInstanceState.SUCCESS
session.flush()
assert results == {("tg.t1", 0), ("tg.t1", 1), ("tg.t1", 2)}
results.clear()
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
results.add((ti.task_id, ti.map_index))
ti.state = TaskInstanceState.SUCCESS
session.flush()
assert results == {("tg.t2", 0), ("tg.t2", 1), ("tg.t2", 2)}
results.clear()
decision = dr.task_instance_scheduling_decisions(session=session)
for ti in decision.schedulable_tis:
results.add((ti.task_id, ti.map_index))
ti.state = TaskInstanceState.SUCCESS
session.flush()
assert results == {("t3", -1)}
def test_mapping_against_empty_list(dag_maker, session):
with dag_maker(session=session):
@task
def add_one(x: int):
return x + 1
@task
def say_hi():
print("Hi")
@task
def say_bye():
print("Bye")
added_values = add_one.expand(x=[])
added_more_values = add_one.expand(x=[])
added_more_more_values = add_one.expand(x=[])
say_hi() >> say_bye() >> added_values
added_values >> added_more_values >> added_more_more_values
dr: DagRun = dag_maker.create_dagrun()
tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)}
say_hi_ti = tis["say_hi"]
say_bye_ti = tis["say_bye"]
say_hi_ti.state = TaskInstanceState.SUCCESS
say_bye_ti.state = TaskInstanceState.SUCCESS
session.merge(say_hi_ti)
session.merge(say_bye_ti)
session.flush()
dr.update_state(session=session)
dr.update_state(session=session) # marks first empty mapped task as skipped
dr.update_state(session=session) # marks second empty mapped task as skipped
dr.update_state(session=session) # marks the third empty mapped task as skipped and dagrun as success
tis = {ti.task_id: ti.state for ti in dr.get_task_instances(session=session)}
assert tis["say_hi"] == TaskInstanceState.SUCCESS
assert tis["say_bye"] == TaskInstanceState.SUCCESS
assert tis["add_one"] == TaskInstanceState.SKIPPED
assert tis["add_one__1"] == TaskInstanceState.SKIPPED
assert tis["add_one__2"] == TaskInstanceState.SKIPPED
assert dr.state == State.SUCCESS
def test_mapped_task_depends_on_past(dag_maker, session):
with dag_maker(session=session):
@task(depends_on_past=True)
def print_value(value):
print(value)
print_value.expand_kwargs([{"value": i} for i in range(2)])
dr1: DagRun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
dr2: DagRun = dag_maker.create_dagrun_after(dr1, run_type=DagRunType.SCHEDULED)
# print_value in dr2 is not ready yet since the task depends on past.
decision = dr2.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 0
# Run print_value in dr1.
decision = dr1.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 2
for ti in decision.schedulable_tis:
ti.state = TaskInstanceState.SUCCESS
session.flush()
# Now print_value in dr2 can run
decision = dr2.task_instance_scheduling_decisions(session=session)
assert len(decision.schedulable_tis) == 2
for ti in decision.schedulable_tis:
ti.state = TaskInstanceState.SUCCESS
session.flush()
# Both runs are finished now.
decision = dr1.task_instance_scheduling_decisions(session=session)
assert len(decision.unfinished_tis) == 0
decision = dr2.task_instance_scheduling_decisions(session=session)
assert len(decision.unfinished_tis) == 0
def test_xcom_map_skip_raised(dag_maker, session):
result = None
with dag_maker(session=session) as dag:
# Note: this doesn't actually run this dag, the callbacks are for reference only.
@dag.task()
def push():
return ["a", "b", "c"]
@dag.task()
def forward(value):
return value
@dag.task(trigger_rule=TriggerRule.ALL_DONE)
def collect(value):
nonlocal result
result = list(value)
def skip_c(v):
...
# if v == "c":
# raise AirflowSkipException
# return {"value": v}
collect(value=forward.expand_kwargs(push().map(skip_c)))
dr: DagRun = dag_maker.create_dagrun(session=session)
def _task_ids(tis):
return [(ti.task_id, ti.map_index) for ti in tis]
# Check that when forward w/ map_index=2 ends up skipping, that the collect task can still be
# scheduled!
# Run "push".
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == [("push", -1)]
ti = decision.schedulable_tis[0]
ti.state = TaskInstanceState.SUCCESS
session.add(TaskMap.from_task_instance_xcom(ti, push.function()))
session.flush()
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == [
("forward", 0),
("forward", 1),
("forward", 2),
]
# Run "forward". "c"/index 2 is skipped. Runtime behaviour checked in test_xcom_map_raise_to_skip in
# TaskSDK
for ti, state in zip(
decision.schedulable_tis,
[TaskInstanceState.SUCCESS, TaskInstanceState.SUCCESS, TaskInstanceState.SKIPPED],
):
ti.state = state
session.flush()
# Now "collect" should only get "a" and "b".
decision = dr.task_instance_scheduling_decisions(session=session)
assert _task_ids(decision.schedulable_tis) == [("collect", -1)]
def test_clearing_task_and_moving_from_non_mapped_to_mapped(dag_maker, session):
"""
Test that clearing a task and moving from non-mapped to mapped clears existing
references in XCom, TaskInstanceNote, TaskReschedule and
RenderedTaskInstanceFields. To be able to test this, RenderedTaskInstanceFields
was not used in the test since it would require that the task is expanded first.
"""
from airflow.models.xcom import XComModel
@task
def printx(x):
print(x)
with dag_maker() as dag:
printx.expand(x=[1])
dr1: DagRun = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)
ti = dr1.get_task_instances()[0]
filter_kwargs = dict(dag_id=ti.dag_id, task_id=ti.task_id, run_id=ti.run_id, map_index=ti.map_index)
ti = session.query(TaskInstance).filter_by(**filter_kwargs).one()
tr = TaskReschedule(
ti_id=ti.id,
start_date=timezone.datetime(2017, 1, 1),
end_date=timezone.datetime(2017, 1, 2),
reschedule_date=timezone.datetime(2017, 1, 1),
)
# mimicking a case where task moved from non-mapped to mapped
# in that case, it would have map_index of -1 even though mapped
ti.map_index = -1
ti.note = "sample note"
session.merge(ti)
session.flush()
# Purposely omitted RenderedTaskInstanceFields because the ti need
# to be expanded but here we are mimicking and made it map_index -1
session.add(tr)
XComModel.set(key="test", value="value", task_id=ti.task_id, dag_id=dag.dag_id, run_id=ti.run_id)
session.commit()
for table in [TaskInstanceNote, TaskReschedule, XComModel]:
assert session.query(table).count() == 1
dr1.task_instance_scheduling_decisions(session)
for table in [TaskInstanceNote, TaskReschedule, XComModel]:
assert session.query(table).count() == 0
def test_dagrun_with_note(dag_maker, session):
with dag_maker():
@task
def the_task():
print("Hi")
the_task()
dr: DagRun = dag_maker.create_dagrun()
dr.note = "dag run with note"
session.add(dr)
session.commit()
dr_note = session.query(DagRunNote).filter(DagRunNote.dag_run_id == dr.id).one()
assert dr_note.content == "dag run with note"
session.delete(dr)
session.commit()
assert session.query(DagRun).filter(DagRun.id == dr.id).one_or_none() is None
assert session.query(DagRunNote).filter(DagRunNote.dag_run_id == dr.id).one_or_none() is None
@pytest.mark.parametrize(
("dag_run_state", "on_failure_fail_dagrun"), [[DagRunState.SUCCESS, False], [DagRunState.FAILED, True]]
)
def test_teardown_failure_behaviour_on_dagrun(dag_maker, session, dag_run_state, on_failure_fail_dagrun):
with dag_maker():
@teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
def teardowntask():
print(1)
@task
def mytask():
print(1)
mytask() >> teardowntask()
dr = dag_maker.create_dagrun()
ti1 = dr.get_task_instance(task_id="mytask")
td1 = dr.get_task_instance(task_id="teardowntask")
ti1.state = State.SUCCESS
td1.state = State.FAILED
session.merge(ti1)
session.merge(td1)
session.flush()
dr.update_state()
session.flush()
dr = session.query(DagRun).one()
assert dr.state == dag_run_state
@pytest.mark.parametrize(
("dag_run_state", "on_failure_fail_dagrun"), [[DagRunState.SUCCESS, False], [DagRunState.FAILED, True]]
)
def test_teardown_failure_on_non_leaf_behaviour_on_dagrun(
dag_maker, session, dag_run_state, on_failure_fail_dagrun
):
with dag_maker():
@teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
def teardowntask():
print(1)
@teardown
def teardowntask2():
print(1)
@task
def mytask():
print(1)
mytask() >> teardowntask() >> teardowntask2()
dr = dag_maker.create_dagrun()
ti1 = dr.get_task_instance(task_id="mytask")
td1 = dr.get_task_instance(task_id="teardowntask")
td2 = dr.get_task_instance(task_id="teardowntask2")
ti1.state = State.SUCCESS
td1.state = State.FAILED
td2.state = State.FAILED
session.merge(ti1)
session.merge(td1)
session.merge(td2)
session.flush()
dr.update_state()
session.flush()
dr = session.query(DagRun).one()
assert dr.state == dag_run_state
def test_work_task_failure_when_setup_teardown_are_successful(dag_maker, session):
with dag_maker():
@setup
def setuptask():
print(2)
@teardown
def teardown_task():
print(1)
@task
def mytask():
print(1)
with setuptask() >> teardown_task():
mytask()
dr = dag_maker.create_dagrun()
s1 = dr.get_task_instance(task_id="setuptask")
td1 = dr.get_task_instance(task_id="teardown_task")
t1 = dr.get_task_instance(task_id="mytask")
s1.state = TaskInstanceState.SUCCESS
td1.state = TaskInstanceState.SUCCESS
t1.state = TaskInstanceState.FAILED
session.merge(s1)
session.merge(td1)
session.merge(t1)
session.flush()
dr.update_state()
session.flush()
dr = session.query(DagRun).one()
assert dr.state == DagRunState.FAILED
def test_failure_of_leaf_task_not_connected_to_teardown_task(dag_maker, session):
with dag_maker():
@setup
def setuptask():
print(2)
@teardown
def teardown_task():
print(1)
@task
def mytask():
print(1)
setuptask()
teardown_task()
mytask()
dr = dag_maker.create_dagrun()
s1 = dr.get_task_instance(task_id="setuptask")
td1 = dr.get_task_instance(task_id="teardown_task")
t1 = dr.get_task_instance(task_id="mytask")
s1.state = TaskInstanceState.SUCCESS
td1.state = TaskInstanceState.SUCCESS
t1.state = TaskInstanceState.FAILED
session.merge(s1)
session.merge(td1)
session.merge(t1)
session.flush()
dr.update_state()
session.flush()
dr = session.query(DagRun).one()
assert dr.state == DagRunState.FAILED
@pytest.mark.parametrize(
("input", "expected"),
[
(["s1 >> w1 >> t1"], {"w1"}), # t1 ignored
(["s1 >> w1 >> t1", "s1 >> t1"], {"w1"}), # t1 ignored; properly wired to setup
(["s1 >> w1"], {"w1"}), # no teardown
(["s1 >> w1 >> t1_"], {"t1_"}), # t1_ is natural leaf and OFFD=True;
(["s1 >> w1 >> t1_", "s1 >> t1_"], {"t1_"}), # t1_ is natural leaf and OFFD=True; wired to setup
(["s1 >> w1 >> t1_ >> w2", "s1 >> t1_"], {"w2"}), # t1_ is not a natural leaf so excluded anyway
(["t1 >> t2"], {"t2"}), # all teardowns -- default to "leaves"
(["w1 >> t1_ >> t2"], {"t1_"}), # teardown to teardown
],
)
def test_tis_considered_for_state(dag_maker, session, input, expected):
"""
We use a convenience notation to wire up test scenarios:
t<num> -- teardown task
t<num>_ -- teardown task with on_failure_fail_dagrun = True
s<num> -- setup task
w<num> -- work task (a.k.a. normal task)
In the test input, each line is a statement. We'll automatically create the tasks and wire them up
as indicated in the test input.
"""
@teardown
def teardown_task():
print(1)
@task
def work_task():
print(1)
@setup
def setup_task():
print(1)
def make_task(task_id, dag):
"""
Task factory helper.
Will give a setup, teardown, work, or teardown-with-dagrun-failure task depending on input.
"""
if task_id.startswith("s"):
factory = setup_task
elif task_id.startswith("w"):
factory = work_task
elif task_id.endswith("_"):
factory = teardown_task.override(on_failure_fail_dagrun=True)
else:
factory = teardown_task
return dag.task_dict.get(task_id) or factory.override(task_id=task_id)()
with dag_maker() as dag:
for line in input:
tasks = [make_task(x, dag_maker.dag) for x in line.split(" >> ")]
reduce(lambda x, y: x >> y, tasks)
dr = dag_maker.create_dagrun()
tis = dr.task_instance_scheduling_decisions(session).tis
tis_for_state = {x.task_id for x in dr._tis_for_dagrun_state(dag=dag, tis=tis)}
assert tis_for_state == expected
@pytest.mark.parametrize(
("pattern", "run_id", "result"),
[
["^[A-Z]", "ABC", True],
["^[A-Z]", "abc", False],
["^[0-9]", "123", True],
# The below params tests that user configuration does not affect internally generated
# run_ids
["", "scheduled__2023-01-01T00:00:00+00:00", True],
["", "manual__2023-01-01T00:00:00+00:00", True],
["", "asset_triggered__2023-01-01T00:00:00+00:00", True],
["", "scheduled_2023-01-01T00", False],
["", "manual_2023-01-01T00", False],
["", "asset_triggered_2023-01-01T00", False],
["^[0-9]", "scheduled__2023-01-01T00:00:00+00:00", True],
["^[0-9]", "manual__2023-01-01T00:00:00+00:00", True],
["^[a-z]", "asset_triggered__2023-01-01T00:00:00+00:00", True],
],
)
def test_dag_run_id_config(session, dag_maker, pattern, run_id, result):
with conf_vars({("scheduler", "allowed_run_id_pattern"): pattern}):
with dag_maker():
pass
run_type = DagRunType.from_run_id(run_id)
if result:
dag_maker.create_dagrun(run_id=run_id, run_type=run_type)
else:
with pytest.raises(ValueError, match=r"The run_id provided '.+' does not match regex pattern"):
dag_maker.create_dagrun(run_id=run_id, run_type=run_type)
def _get_states(dr):
"""
For a given dag run, get a dict of states.
Example::
{
"my_setup": "success",
"my_teardown": {0: "success", 1: "success", 2: "success"},
"my_work": "failed",
}
"""
ti_dict = defaultdict(dict)
for ti in dr.get_task_instances():
if ti.map_index == -1:
ti_dict[ti.task_id] = ti.state
else:
ti_dict[ti.task_id][ti.map_index] = ti.state
return dict(ti_dict)
@pytest.mark.db_test
@pytest.mark.need_serialized_dag(False)
def test_teardown_and_fail_fast(dag_maker):
"""
when fail_fast enabled, teardowns should run according to their setups.
in this case, the second teardown skips because its setup skips.
"""
from airflow.sdk import task as task_decorator
from airflow.sdk.definitions.taskgroup import TaskGroup
with dag_maker(fail_fast=True) as dag:
for num in (1, 2):
with TaskGroup(f"tg_{num}"):
@task_decorator
def my_setup():
print("setting up multiple things")
return [1, 2, 3]
@task_decorator
def my_work(val):
print(f"doing work with multiple things: {val}")
raise ValueError("this fails")
return val
@task_decorator
def my_teardown():
print("teardown")
s = my_setup()
t = my_teardown().as_teardown(setups=s)
with t:
my_work(s)
tg1, tg2 = dag.task_group.children.values()
tg1 >> tg2
dr = dag.test()
states = _get_states(dr)
assert states == {
"tg_1.my_setup": "success",
"tg_1.my_teardown": "success",
"tg_1.my_work": "failed",
"tg_2.my_setup": "skipped",
"tg_2.my_teardown": "skipped",
"tg_2.my_work": "skipped",
}
| TestDagRun |
python | tensorflow__tensorflow | tensorflow/python/training/optimizer_test.py | {
"start": 1521,
"end": 13768
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name='global_step_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd_op.minimize(loss, global_step, [var0, var1])
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testAggregationMethod(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step, [var0, var1],
aggregation_method=gradients_util.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
# pylint: disable=cell-var-from-loop
def loss():
return 5 * var0
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return constant_op.constant(5.0)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegex(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(array_ops.zeros([2], dtype),
name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
self.evaluate(variables.global_variables_initializer())
# Run convert_ops to achieve the gradients converting
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():
return x * x
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(f, [x])
self.assertEqual(1, len(grads_and_vars))
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd_op.apply_gradients(grads_and_vars)
@test_util.run_deprecated_v1
def testTrainOp(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.assertTrue(opt_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
@test_util.run_deprecated_v1
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.cached_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
@test_util.run_deprecated_v1
def testGetSlotUnderDistributedStrategy(self):
# Only run this test in graph mode so we don't need actual GPU.
ds = mirrored_strategy.MirroredStrategy(
['CPU:0', 'GPU:0'],
cross_device_ops=cross_device_ops.HierarchicalCopyAllReduce())
# We need an optimizer that creates slots.
optimizer = adam.AdamOptimizer()
def f():
v = variables.Variable([1.0])
self.assertTrue(distribute_utils.is_distributed_variable(v))
# Slot variables are created in the first call to apply_gradients.
optimizer.apply_gradients([(ops.convert_to_tensor([1.0]), v)])
self.assertTrue(optimizer.get_slot_names())
for name in optimizer.get_slot_names():
slot = optimizer.get_slot(v, name)
self.assertIsNotNone(slot)
self.assertTrue(distribute_utils.is_distributed_variable(slot))
ds.run(f)
if __name__ == '__main__':
test.main()
| OptimizerTest |
python | langchain-ai__langchain | libs/partners/perplexity/langchain_perplexity/chat_models.py | {
"start": 3064,
"end": 21446
} | class ____(BaseChatModel):
"""`Perplexity AI` Chat models API.
Setup:
To use, you should have the environment variable `PPLX_API_KEY` set to your API key.
Any parameters that are valid to be passed to the openai.create call
can be passed in, even if not explicitly saved on this class.
```bash
export PPLX_API_KEY=your_api_key
```
Key init args - completion params:
model:
Name of the model to use. e.g. "sonar"
temperature:
Sampling temperature to use.
max_tokens:
Maximum number of tokens to generate.
streaming:
Whether to stream the results or not.
Key init args - client params:
pplx_api_key:
API key for PerplexityChat API.
request_timeout:
Timeout for requests to PerplexityChat completion API.
max_retries:
Maximum number of retries to make when generating.
See full list of supported init args and their descriptions in the params section.
Instantiate:
```python
from langchain_perplexity import ChatPerplexity
model = ChatPerplexity(model="sonar", temperature=0.7)
```
Invoke:
```python
messages = [("system", "You are a chatbot."), ("user", "Hello!")]
model.invoke(messages)
```
Invoke with structured output:
```python
from pydantic import BaseModel
class StructuredOutput(BaseModel):
role: str
content: str
model.with_structured_output(StructuredOutput)
model.invoke(messages)
```
Invoke with perplexity-specific params:
```python
model.invoke(messages, extra_body={"search_recency_filter": "week"})
```
Stream:
```python
for chunk in model.stream(messages):
print(chunk.content)
```
Token usage:
```python
response = model.invoke(messages)
response.usage_metadata
```
Response metadata:
```python
response = model.invoke(messages)
response.response_metadata
```
""" # noqa: E501
client: Any = None
model: str = "sonar"
"""Model name."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
pplx_api_key: SecretStr | None = Field(
default_factory=secret_from_env("PPLX_API_KEY", default=None), alias="api_key"
)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
request_timeout: float | tuple[float, float] | None = Field(None, alias="timeout")
"""Timeout for requests to PerplexityChat completion API."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
max_tokens: int | None = None
"""Maximum number of tokens to generate."""
model_config = ConfigDict(populate_by_name=True)
@property
def lc_secrets(self) -> dict[str, str]:
return {"pplx_api_key": "PPLX_API_KEY"}
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not a default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
try:
self.client = openai.OpenAI(
api_key=self.pplx_api_key.get_secret_value()
if self.pplx_api_key
else None,
base_url="https://api.perplexity.ai",
)
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
self.profile = _get_default_model_profile(self.model)
return self
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling PerplexityChat API."""
return {
"max_tokens": self.max_tokens,
"stream": self.streaming,
"temperature": self.temperature,
**self.model_kwargs,
}
def _convert_message_to_dict(self, message: BaseMessage) -> dict[str, Any]:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _create_message_dicts(
self, messages: list[BaseMessage], stop: list[str] | None
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
params = dict(self._invocation_params)
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [self._convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _convert_delta_to_message_chunk(
self, _dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
additional_kwargs: dict = {}
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"])
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type]
else:
return default_class(content=content) # type: ignore[call-arg]
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
default_chunk_class = AIMessageChunk
params.pop("stream", None)
if stop:
params["stop_sequences"] = stop
stream_resp = self.client.chat.completions.create(
messages=message_dicts, stream=True, **params
)
first_chunk = True
prev_total_usage: UsageMetadata | None = None
added_model_name: bool = False
added_search_queries: bool = False
for chunk in stream_resp:
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
# Collect standard usage metadata (transform from aggregate to delta)
if total_usage := chunk.get("usage"):
lc_total_usage = _create_usage_metadata(total_usage)
if prev_total_usage:
usage_metadata: UsageMetadata | None = subtract_usage(
lc_total_usage, prev_total_usage
)
else:
usage_metadata = lc_total_usage
prev_total_usage = lc_total_usage
else:
usage_metadata = None
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
additional_kwargs = {}
if first_chunk:
additional_kwargs["citations"] = chunk.get("citations", [])
for attr in ["images", "related_questions", "search_results"]:
if attr in chunk:
additional_kwargs[attr] = chunk[attr]
generation_info = {}
if (model_name := chunk.get("model")) and not added_model_name:
generation_info["model_name"] = model_name
added_model_name = True
# Add num_search_queries to generation_info if present
if total_usage := chunk.get("usage"):
if num_search_queries := total_usage.get("num_search_queries"):
if not added_search_queries:
generation_info["num_search_queries"] = num_search_queries
added_search_queries = True
chunk = self._convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
if isinstance(chunk, AIMessageChunk) and usage_metadata:
chunk.usage_metadata = usage_metadata
if first_chunk:
chunk.additional_kwargs |= additional_kwargs
first_chunk = False
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
if stream_iter:
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.client.chat.completions.create(messages=message_dicts, **params)
if usage := getattr(response, "usage", None):
usage_dict = usage.model_dump()
usage_metadata = _create_usage_metadata(usage_dict)
else:
usage_metadata = None
usage_dict = {}
additional_kwargs = {}
for attr in ["citations", "images", "related_questions", "search_results"]:
if hasattr(response, attr):
additional_kwargs[attr] = getattr(response, attr)
# Build response_metadata with model_name and num_search_queries
response_metadata: dict[str, Any] = {
"model_name": getattr(response, "model", self.model)
}
if num_search_queries := usage_dict.get("num_search_queries"):
response_metadata["num_search_queries"] = num_search_queries
message = AIMessage(
content=response.choices[0].message.content,
additional_kwargs=additional_kwargs,
usage_metadata=usage_metadata,
response_metadata=response_metadata,
)
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _invocation_params(self) -> Mapping[str, Any]:
"""Get the parameters used to invoke the model."""
pplx_creds: dict[str, Any] = {"model": self.model}
return {**pplx_creds, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "perplexitychat"
def with_structured_output(
self,
schema: _DictOrPydanticClass | None = None,
*,
method: Literal["json_schema"] = "json_schema",
include_raw: bool = False,
strict: bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, _DictOrPydantic]:
"""Model wrapper that returns outputs formatted to match the given schema for Preplexity.
Currently, Perplexity only supports "json_schema" method for structured output
as per their [official documentation](https://docs.perplexity.ai/guides/structured-outputs).
Args:
schema: The output schema. Can be passed in as:
- a JSON Schema,
- a `TypedDict` class,
- or a Pydantic class
method: The method for steering model generation, currently only support:
- `'json_schema'`: Use the JSON Schema to parse the model output
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
strict:
Unsupported: whether to enable strict schema adherence when generating
the output. This parameter is included for compatibility with other
chat models, but is currently ignored.
kwargs: Additional keyword args aren't supported.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
""" # noqa: E501
if method in ("function_calling", "json_mode"):
method = "json_schema"
if method == "json_schema":
if schema is None:
raise ValueError(
"schema must be specified when method is not 'json_schema'. "
"Received None."
)
is_pydantic_schema = _is_pydantic_class(schema)
response_format = convert_to_json_schema(schema)
llm = self.bind(
response_format={
"type": "json_schema",
"json_schema": {"schema": response_format},
},
ls_structured_output_format={
"kwargs": {"method": method},
"schema": response_format,
},
)
output_parser = (
ReasoningStructuredOutputParser(pydantic_object=schema) # type: ignore[arg-type]
if is_pydantic_schema
else ReasoningJsonOutputParser()
)
else:
raise ValueError(
f"Unrecognized method argument. Expected 'json_schema' Received:\
'{method}'"
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser
| ChatPerplexity |
python | doocs__leetcode | solution/1100-1199/1186.Maximum Subarray Sum with One Deletion/Solution.py | {
"start": 0,
"end": 482
} | class ____:
def maximumSum(self, arr: List[int]) -> int:
n = len(arr)
left = [0] * n
right = [0] * n
s = 0
for i, x in enumerate(arr):
s = max(s, 0) + x
left[i] = s
s = 0
for i in range(n - 1, -1, -1):
s = max(s, 0) + arr[i]
right[i] = s
ans = max(left)
for i in range(1, n - 1):
ans = max(ans, left[i - 1] + right[i + 1])
return ans
| Solution |
python | getsentry__sentry | src/sentry/issues/endpoints/group_user_reports.py | {
"start": 537,
"end": 1630
} | class ____(GroupEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, group) -> Response:
"""
List User Reports
`````````````````
Returns a list of user reports for an issue.
:pparam string issue_id: the ID of the issue to retrieve.
:pparam string key: the tag key to look the values up for.
:auth: required
"""
try:
environment = get_environment(request, group.organization.id)
except Environment.DoesNotExist:
report_list = UserReport.objects.none()
else:
report_list = UserReport.objects.filter(group_id=group.id)
if environment is not None:
report_list = report_list.filter(environment_id=environment.id)
return self.paginate(
request=request,
queryset=report_list,
order_by="-date_added",
on_results=lambda x: serialize(x, request.user),
paginator_cls=DateTimePaginator,
)
| GroupUserReportsEndpoint |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 2902,
"end": 3129
} | class ____(TorchDynamoException):
def __init__(self, name: str) -> None:
super().__init__(
f"Invalid backend: {name!r}, see `torch._dynamo.list_backends()` for available backends."
)
| InvalidBackend |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 35933,
"end": 36026
} | class ____(Operator):
__slots__ = ()
_description = "equality"
_op = operator.eq
| Eq |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/parameterTypes/progress.py | {
"start": 343,
"end": 508
} | class ____(Parameter):
"""
Displays a progress bar whose value can be set between 0 and 100
"""
itemClass = ProgressBarParameterItem
| ProgressBarParameter |
python | davidhalter__parso | test/normalizer_issue_files/E30not.py | {
"start": 1492,
"end": 1747
} | class ____(object):
pass
if __name__ == '__main__':
foo()
# Okay
classification_errors = None
# Okay
defined_properly = True
# Okay
defaults = {}
defaults.update({})
# Okay
def foo(x):
classification = x
definitely = not classification
| Bar |
python | py-pdf__pypdf | pypdf/errors.py | {
"start": 232,
"end": 410
} | class ____(Exception):
"""
Raised when a required dependency (a library or module that pypdf depends on)
is not available or cannot be imported.
"""
| DependencyError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/fifo_queue_test.py | {
"start": 16396,
"end": 30608
} | class ____(test.TestCase):
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(self.evaluate(dequeued_t))
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(self.evaluate(size_t), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(self.evaluate(q1_size_t), [1])
self.assertEqual(self.evaluate(q2_size_t), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(self.evaluate(q1_size_t), [0])
self.assertEqual(self.evaluate(q2_size_t), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(self.evaluate(q1_size_t), [1])
self.assertEqual(self.evaluate(q2_size_t), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(self.evaluate(q1_size_t), [0])
self.assertEqual(self.evaluate(q2_size_t), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in range(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in range(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testEnqueueDequeueOneComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegex(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueWrongShapeAtRuntime(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
@test_util.run_v1_only(
"These tests are heavily reliant on Session for parallelism. We can likely "
"convert them to run in 2.x/eager, but it may be difficult.")
| UnconvertedFIFOQueueTests |
python | pypa__pip | src/pip/_vendor/tomli/_parser.py | {
"start": 10261,
"end": 25778
} | class ____:
def __init__(self) -> None:
self.data = NestedDict()
self.flags = Flags()
def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
try:
while src[pos] in chars:
pos += 1
except IndexError:
pass
return pos
def skip_until(
src: str,
pos: Pos,
expect: str,
*,
error_on: frozenset[str],
error_on_eof: bool,
) -> Pos:
try:
new_pos = src.index(expect, pos)
except ValueError:
new_pos = len(src)
if error_on_eof:
raise TOMLDecodeError(f"Expected {expect!r}", src, new_pos) from None
if not error_on.isdisjoint(src[pos:new_pos]):
while src[pos] not in error_on:
pos += 1
raise TOMLDecodeError(f"Found invalid character {src[pos]!r}", src, pos)
return new_pos
def skip_comment(src: str, pos: Pos) -> Pos:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char == "#":
return skip_until(
src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
)
return pos
def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
while True:
pos_before_skip = pos
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
pos = skip_comment(src, pos)
if pos == pos_before_skip:
return pos
def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 1 # Skip "["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
raise TOMLDecodeError(f"Cannot declare {key} twice", src, pos)
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.get_or_create_nest(key)
except KeyError:
raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None
if not src.startswith("]", pos):
raise TOMLDecodeError(
"Expected ']' at the end of a table declaration", src, pos
)
return pos + 1, key
def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
pos += 2 # Skip "[["
pos = skip_chars(src, pos, TOML_WS)
pos, key = parse_key(src, pos)
if out.flags.is_(key, Flags.FROZEN):
raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos)
# Free the namespace now that it points to another empty list item...
out.flags.unset_all(key)
# ...but this key precisely is still prohibited from table declaration
out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
try:
out.data.append_nest_to_list(key)
except KeyError:
raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None
if not src.startswith("]]", pos):
raise TOMLDecodeError(
"Expected ']]' at the end of an array declaration", src, pos
)
return pos + 2, key
def key_value_rule(
src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
) -> Pos:
pos, key, value = parse_key_value_pair(src, pos, parse_float, nest_lvl=0)
key_parent, key_stem = key[:-1], key[-1]
abs_key_parent = header + key_parent
relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
for cont_key in relative_path_cont_keys:
# Check that dotted key syntax does not redefine an existing table
if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
raise TOMLDecodeError(f"Cannot redefine namespace {cont_key}", src, pos)
# Containers in the relative path can't be opened with the table syntax or
# dotted key/value syntax in following table sections.
out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
if out.flags.is_(abs_key_parent, Flags.FROZEN):
raise TOMLDecodeError(
f"Cannot mutate immutable namespace {abs_key_parent}", src, pos
)
try:
nest = out.data.get_or_create_nest(abs_key_parent)
except KeyError:
raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None
if key_stem in nest:
raise TOMLDecodeError("Cannot overwrite a value", src, pos)
# Mark inline table and array namespaces recursively immutable
if isinstance(value, (dict, list)):
out.flags.set(header + key, Flags.FROZEN, recursive=True)
nest[key_stem] = value
return pos
def parse_key_value_pair(
src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, Key, Any]:
pos, key = parse_key(src, pos)
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != "=":
raise TOMLDecodeError("Expected '=' after a key in a key/value pair", src, pos)
pos += 1
pos = skip_chars(src, pos, TOML_WS)
pos, value = parse_value(src, pos, parse_float, nest_lvl)
return pos, key, value
def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
pos, key_part = parse_key_part(src, pos)
key: Key = (key_part,)
pos = skip_chars(src, pos, TOML_WS)
while True:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char != ".":
return pos, key
pos += 1
pos = skip_chars(src, pos, TOML_WS)
pos, key_part = parse_key_part(src, pos)
key += (key_part,)
pos = skip_chars(src, pos, TOML_WS)
def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
try:
char: str | None = src[pos]
except IndexError:
char = None
if char in BARE_KEY_CHARS:
start_pos = pos
pos = skip_chars(src, pos, BARE_KEY_CHARS)
return pos, src[start_pos:pos]
if char == "'":
return parse_literal_str(src, pos)
if char == '"':
return parse_one_line_basic_str(src, pos)
raise TOMLDecodeError("Invalid initial character for a key part", src, pos)
def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1
return parse_basic_str(src, pos, multiline=False)
def parse_array(
src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, list[Any]]:
pos += 1
array: list[Any] = []
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
return pos + 1, array
while True:
pos, val = parse_value(src, pos, parse_float, nest_lvl)
array.append(val)
pos = skip_comments_and_array_ws(src, pos)
c = src[pos : pos + 1]
if c == "]":
return pos + 1, array
if c != ",":
raise TOMLDecodeError("Unclosed array", src, pos)
pos += 1
pos = skip_comments_and_array_ws(src, pos)
if src.startswith("]", pos):
return pos + 1, array
def parse_inline_table(
src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, dict[str, Any]]:
pos += 1
nested_dict = NestedDict()
flags = Flags()
pos = skip_chars(src, pos, TOML_WS)
if src.startswith("}", pos):
return pos + 1, nested_dict.dict
while True:
pos, key, value = parse_key_value_pair(src, pos, parse_float, nest_lvl)
key_parent, key_stem = key[:-1], key[-1]
if flags.is_(key, Flags.FROZEN):
raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos)
try:
nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
except KeyError:
raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None
if key_stem in nest:
raise TOMLDecodeError(f"Duplicate inline table key {key_stem!r}", src, pos)
nest[key_stem] = value
pos = skip_chars(src, pos, TOML_WS)
c = src[pos : pos + 1]
if c == "}":
return pos + 1, nested_dict.dict
if c != ",":
raise TOMLDecodeError("Unclosed inline table", src, pos)
if isinstance(value, (dict, list)):
flags.set(key, Flags.FROZEN, recursive=True)
pos += 1
pos = skip_chars(src, pos, TOML_WS)
def parse_basic_str_escape(
src: str, pos: Pos, *, multiline: bool = False
) -> tuple[Pos, str]:
escape_id = src[pos : pos + 2]
pos += 2
if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
# Skip whitespace until next non-whitespace character or end of
# the doc. Error if non-whitespace is found before newline.
if escape_id != "\\\n":
pos = skip_chars(src, pos, TOML_WS)
try:
char = src[pos]
except IndexError:
return pos, ""
if char != "\n":
raise TOMLDecodeError("Unescaped '\\' in a string", src, pos)
pos += 1
pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
return pos, ""
if escape_id == "\\u":
return parse_hex_char(src, pos, 4)
if escape_id == "\\U":
return parse_hex_char(src, pos, 8)
try:
return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
except KeyError:
raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) from None
def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
return parse_basic_str_escape(src, pos, multiline=True)
def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
hex_str = src[pos : pos + hex_len]
if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
raise TOMLDecodeError("Invalid hex value", src, pos)
pos += hex_len
hex_int = int(hex_str, 16)
if not is_unicode_scalar_value(hex_int):
raise TOMLDecodeError(
"Escaped character is not a Unicode scalar value", src, pos
)
return pos, chr(hex_int)
def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
pos += 1 # Skip starting apostrophe
start_pos = pos
pos = skip_until(
src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
)
return pos + 1, src[start_pos:pos] # Skip ending apostrophe
def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
pos += 3
if src.startswith("\n", pos):
pos += 1
if literal:
delim = "'"
end_pos = skip_until(
src,
pos,
"'''",
error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
error_on_eof=True,
)
result = src[pos:end_pos]
pos = end_pos + 3
else:
delim = '"'
pos, result = parse_basic_str(src, pos, multiline=True)
# Add at maximum two extra apostrophes/quotes if the end sequence
# is 4 or 5 chars long instead of just 3.
if not src.startswith(delim, pos):
return pos, result
pos += 1
if not src.startswith(delim, pos):
return pos, result + delim
pos += 1
return pos, result + (delim * 2)
def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
if multiline:
error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape_multiline
else:
error_on = ILLEGAL_BASIC_STR_CHARS
parse_escapes = parse_basic_str_escape
result = ""
start_pos = pos
while True:
try:
char = src[pos]
except IndexError:
raise TOMLDecodeError("Unterminated string", src, pos) from None
if char == '"':
if not multiline:
return pos + 1, result + src[start_pos:pos]
if src.startswith('"""', pos):
return pos + 3, result + src[start_pos:pos]
pos += 1
continue
if char == "\\":
result += src[start_pos:pos]
pos, parsed_escape = parse_escapes(src, pos)
result += parsed_escape
start_pos = pos
continue
if char in error_on:
raise TOMLDecodeError(f"Illegal character {char!r}", src, pos)
pos += 1
def parse_value( # noqa: C901
src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int
) -> tuple[Pos, Any]:
if nest_lvl > MAX_INLINE_NESTING:
# Pure Python should have raised RecursionError already.
# This ensures mypyc binaries eventually do the same.
raise RecursionError( # pragma: no cover
"TOML inline arrays/tables are nested more than the allowed"
f" {MAX_INLINE_NESTING} levels"
)
try:
char: str | None = src[pos]
except IndexError:
char = None
# IMPORTANT: order conditions based on speed of checking and likelihood
# Basic strings
if char == '"':
if src.startswith('"""', pos):
return parse_multiline_str(src, pos, literal=False)
return parse_one_line_basic_str(src, pos)
# Literal strings
if char == "'":
if src.startswith("'''", pos):
return parse_multiline_str(src, pos, literal=True)
return parse_literal_str(src, pos)
# Booleans
if char == "t":
if src.startswith("true", pos):
return pos + 4, True
if char == "f":
if src.startswith("false", pos):
return pos + 5, False
# Arrays
if char == "[":
return parse_array(src, pos, parse_float, nest_lvl + 1)
# Inline tables
if char == "{":
return parse_inline_table(src, pos, parse_float, nest_lvl + 1)
# Dates and times
datetime_match = RE_DATETIME.match(src, pos)
if datetime_match:
try:
datetime_obj = match_to_datetime(datetime_match)
except ValueError as e:
raise TOMLDecodeError("Invalid date or datetime", src, pos) from e
return datetime_match.end(), datetime_obj
localtime_match = RE_LOCALTIME.match(src, pos)
if localtime_match:
return localtime_match.end(), match_to_localtime(localtime_match)
# Integers and "normal" floats.
# The regex will greedily match any type starting with a decimal
# char, so needs to be located after handling of dates and times.
number_match = RE_NUMBER.match(src, pos)
if number_match:
return number_match.end(), match_to_number(number_match, parse_float)
# Special floats
first_three = src[pos : pos + 3]
if first_three in {"inf", "nan"}:
return pos + 3, parse_float(first_three)
first_four = src[pos : pos + 4]
if first_four in {"-inf", "+inf", "-nan", "+nan"}:
return pos + 4, parse_float(first_four)
raise TOMLDecodeError("Invalid value", src, pos)
def is_unicode_scalar_value(codepoint: int) -> bool:
return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
"""A decorator to make `parse_float` safe.
`parse_float` must not return dicts or lists, because these types
would be mixed with parsed TOML tables and arrays, thus confusing
the parser. The returned decorated callable raises `ValueError`
instead of returning illegal types.
"""
# The default `float` callable never returns illegal types. Optimize it.
if parse_float is float:
return float
def safe_parse_float(float_str: str) -> Any:
float_value = parse_float(float_str)
if isinstance(float_value, (dict, list)):
raise ValueError("parse_float must not return dicts or lists")
return float_value
return safe_parse_float
| Output |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_fields.py | {
"start": 385,
"end": 5313
} | class ____(AliasBaseTest):
def setUp(self):
super().setUp()
self.storage = utils.TemporaryStorage()
# Save a test image.
self.create_image(self.storage, 'avatars/avatar.jpg')
# Set the test model to use the current temporary storage.
for name in ('avatar', 'picture'):
field = models.TestModel._meta.get_field(name)
field.storage = self.storage
field.thumbnail_storage = self.storage
def tearDown(self):
self.storage.delete_temporary_storage()
super().tearDown()
def test_generate_thumbnail(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
thumb = instance.avatar.generate_thumbnail({'size': (300, 300)})
self.assertEqual((thumb.width, thumb.height), (300, 225))
def test_generate_thumbnail_bad_image(self):
text_file = ContentFile("Lorem ipsum dolor sit amet. Not an image.")
self.storage.save('avatars/invalid.jpg', text_file)
instance = models.TestModel(avatar='avatars/invalid.jpg')
generate = lambda: instance.avatar.generate_thumbnail(
{'size': (300, 300)})
self.assertRaises(NoSourceGenerator, generate)
def test_generate_thumbnail_alias_bad_image(self):
text_file = ContentFile("Lorem ipsum dolor sit amet. Not an image.")
self.storage.save('avatars/invalid.jpg', text_file)
instance = models.TestModel(avatar='avatars/invalid.jpg')
generate = lambda: instance.avatar['small']
self.assertRaises(InvalidImageFormatError, generate)
def test_generate_thumbnail_alias_0x0_size(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
self.assertRaises(
EasyThumbnailsError,
instance.avatar.generate_thumbnail, {'size': (0, 0)})
def test_delete(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
source_path = instance.avatar.path
thumb_paths = (
instance.avatar.get_thumbnail({'size': (300, 300)}).path,
instance.avatar.get_thumbnail({'size': (200, 200)}).path,
instance.avatar.get_thumbnail({'size': (100, 100)}).path,
)
self.assertTrue(os.path.exists(source_path))
for path in thumb_paths:
self.assertTrue(os.path.exists(path))
instance.avatar.delete(save=False)
self.assertFalse(os.path.exists(source_path))
for path in thumb_paths:
self.assertFalse(os.path.exists(path))
def test_delete_thumbnails(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
source_path = instance.avatar.path
thumb_paths = (
instance.avatar.get_thumbnail({'size': (300, 300)}).path,
instance.avatar.get_thumbnail({'size': (200, 200)}).path,
instance.avatar.get_thumbnail({'size': (100, 100)}).path,
)
self.assertTrue(os.path.exists(source_path))
for path in thumb_paths:
self.assertTrue(os.path.exists(path))
instance.avatar.delete_thumbnails()
self.assertTrue(os.path.exists(source_path))
for path in thumb_paths:
self.assertFalse(os.path.exists(path))
def test_get_thumbnails(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
instance.avatar.get_thumbnail({'size': (300, 300)})
instance.avatar.get_thumbnail({'size': (200, 200)})
self.assertEqual(len(list(instance.avatar.get_thumbnails())), 2)
def test_serialization(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
self.assertEqual('/media/avatars/avatar.jpg.100x100_q85.jpg', instance.avatar['small'].url)
new_instance = pickle.loads(pickle.dumps(instance))
self.assertEqual('/media/avatars/avatar.jpg.100x100_q85.jpg', new_instance.avatar['small'].url)
def _read_filefield(self, field):
if DJANGO_VERSION < (2, 0):
try:
return field.file.read()
finally:
field.file.close()
with field.open('rb') as fd:
return fd.read()
def test_saving_image_field_with_resize_source(self):
# Ensure that saving ThumbnailerImageField with resize_source enabled
# using instance.field.save() does not fail
instance = models.TestModel(avatar='avatars/avatar.jpg')
instance.picture.save(
'file.jpg', ContentFile(self._read_filefield(instance.avatar)), save=False)
self.assertEqual(instance.picture.width, 10)
def test_saving_image_field_with_resize_source_different_ext(self):
instance = models.TestModel(avatar='avatars/avatar.jpg')
instance.picture.save(
'file.gif', ContentFile(self._read_filefield(instance.avatar)), save=False)
self.assertEqual(instance.picture.name, 'pictures/file.jpg')
| ThumbnailerFieldTest |
python | pypa__warehouse | warehouse/oidc/models/github.py | {
"start": 11874,
"end": 14140
} | class ____(GitHubPublisherMixin, OIDCPublisher):
__tablename__ = "github_oidc_publishers"
__mapper_args__ = {"polymorphic_identity": "github_oidc_publishers"}
__table_args__ = (
UniqueConstraint(
"repository_name",
"repository_owner",
"workflow_filename",
"environment",
name="_github_oidc_publisher_uc",
),
)
id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True), ForeignKey(OIDCPublisher.id), primary_key=True
)
def verify_url(self, url: str) -> bool:
"""
Verify a given URL against this GitHub's publisher information
In addition to the generic Trusted Publisher verification logic in
the parent class, the GitHub Trusted Publisher allows URLs hosted
on `github.io` for the configured repository, i.e:
`https://${OWNER}.github.io/${REPO_NAME}/`.
As with the generic verification, we allow subpaths of the `.io` URL,
but we normalize using `rfc3986` to reject things like
`https://${OWNER}.github.io/${REPO_NAME}/../malicious`, which would
resolve to a URL outside the `/$REPO_NAME` path.
The suffix `.git` in repo URLs is ignored, since `github.com/org/repo.git`
always redirects to `github.com/org/repo`. This does not apply to subpaths,
like `github.com/org/repo.git/issues`, which do not redirect to the correct URL.
GitHub uses case-insensitive owner/repo slugs - so we perform a case-insensitive
comparison.
"""
docs_url = (
f"https://{self.repository_owner}.github.io/{self.repository_name}".lower()
)
normalized_url_prefixes = (self.publisher_base_url.lower(), docs_url)
for prefix in normalized_url_prefixes:
if url.lower().startswith(prefix):
url = prefix + url[len(prefix) :]
break
url_for_generic_check = url.removesuffix("/").removesuffix(".git")
if verify_url_from_reference(
reference_url=self.publisher_base_url.lower(),
url=url_for_generic_check,
):
return True
return verify_url_from_reference(reference_url=docs_url, url=url)
| GitHubPublisher |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_incident_groupopenperiod.py | {
"start": 2089,
"end": 4524
} | class ____(
OrganizationIncidentGroupOpenPeriodAPITestCase
):
def test_get_with_incident_id_filter(self) -> None:
response = self.get_success_response(
self.organization.slug, incident_id=str(self.incident_1.id)
)
assert response.data == serialize(self.igop_1, self.user)
def test_get_with_incident_identifier_filter(self) -> None:
response = self.get_success_response(
self.organization.slug, incident_identifier=str(self.incident_1.identifier)
)
assert response.data == serialize(self.igop_1, self.user)
def test_get_with_group_id_filter(self) -> None:
response = self.get_success_response(self.organization.slug, group_id=str(self.group_2.id))
assert response.data == serialize(self.igop_2, self.user)
def test_get_with_open_period_id_filter(self) -> None:
response = self.get_success_response(
self.organization.slug, open_period_id=str(self.open_period_3.id)
)
assert response.data == serialize(self.igop_3, self.user)
def test_get_with_multiple_filters(self) -> None:
response = self.get_success_response(
self.organization.slug,
incident_id=str(self.incident_1.id),
group_id=str(self.group_1.id),
)
assert response.data == serialize(self.igop_1, self.user)
def test_get_with_multiple_filters_with_invalid_filter(self) -> None:
self.get_error_response(
self.organization.slug,
incident_id=str(self.incident_1.id),
group_id="99999",
)
def test_get_with_nonexistent_incident_id(self) -> None:
self.get_error_response(self.organization.slug, incident_id="99999", status_code=404)
def test_get_with_nonexistent_incident_identifier(self) -> None:
self.get_error_response(
self.organization.slug, incident_identifier="99999", status_code=404
)
def test_get_with_nonexistent_group_id(self) -> None:
self.get_error_response(self.organization.slug, group_id="99999", status_code=404)
def test_get_with_nonexistent_open_period_id(self) -> None:
self.get_error_response(self.organization.slug, open_period_id="99999", status_code=404)
def test_no_filter_provided(self) -> None:
self.get_error_response(self.organization.slug, status_code=400)
| OrganizationIncidentGroupOpenPeriodIndexGetTest |
python | huggingface__transformers | src/transformers/models/roberta/modular_roberta.py | {
"start": 1653,
"end": 5728
} | class ____(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
del self.pad_token_id
del self.position_embeddings
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| RobertaEmbeddings |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 730,
"end": 1319
} | class ____(dict):
def __init__(self, identifier: str):
super().__init__()
def __setattr__(self, key: str, value: str) -> None:
if key not in self:
self[key] = value
def __getattribute__(self, key: str) -> str:
if key in self:
return self[key]
return super().__getattribute__(key)
RunContext = RunContextType('context')
RunContext.File = 'file'
RunContext.Cell = 'cell'
RunContext.Selection = 'selection'
RunResultFormat = RunContextType('result display format')
RunResultFormat.NoDisplay = 'no_display'
| RunContextType |
python | python-visualization__folium | folium/plugins/semicircle.py | {
"start": 196,
"end": 2991
} | class ____(JSCSSMixin, Marker):
"""Add a marker in the shape of a semicircle, similar to the Circle class.
Use (direction and arc) or (start_angle and stop_angle), not both.
Parameters
----------
location: tuple[float, float]
Latitude and Longitude pair (Northing, Easting)
radius: float
Radius of the circle, in meters.
direction: int, default None
Direction angle in degrees
arc: int, default None
Arc angle in degrees.
start_angle: int, default None
Start angle in degrees
stop_angle: int, default None
Stop angle in degrees.
popup: str or folium.Popup, optional
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, optional
Display a text when hovering over the object.
**kwargs
For additional arguments see :func:`folium.vector_layers.path_options`
Uses Leaflet plugin https://github.com/jieter/Leaflet-semicircle
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.semiCircle(
{{ this.location|tojson }},
{{ this.options|tojavascript }}
)
{%- if this.direction %}
.setDirection({{ this.direction[0] }}, {{ this.direction[1] }})
{%- endif %}
.addTo({{ this._parent.get_name() }});
{% endmacro %}
"""
)
default_js = [
(
"semicirclejs",
"https://cdn.jsdelivr.net/npm/leaflet-semicircle@2.0.4/Semicircle.min.js",
)
]
def __init__(
self,
location,
radius,
direction=None,
arc=None,
start_angle=None,
stop_angle=None,
popup=None,
tooltip=None,
**kwargs
):
super().__init__(location, popup=popup, tooltip=tooltip)
self._name = "SemiCircle"
self.direction = (
(direction, arc) if direction is not None and arc is not None else None
)
self.options = path_options(line=False, radius=radius, **kwargs)
self.options.update(
dict(
start_angle=start_angle,
stop_angle=stop_angle,
)
)
self.options = remove_empty(**self.options)
if not (
(direction is None and arc is None)
and (start_angle is not None and stop_angle is not None)
or (direction is not None and arc is not None)
and (start_angle is None and stop_angle is None)
):
raise ValueError(
"Invalid arguments. Either provide direction and arc OR start_angle and stop_angle"
)
| SemiCircle |
python | langchain-ai__langchain | libs/text-splitters/tests/unit_tests/test_text_splitters.py | {
"start": 23891,
"end": 116913
} | class ____
{
static void Main()
{
int age = 30; // Change the age value as needed
// Categorize the age without any console output
if (age < 18)
{
// Age is under 18
}
else if (age >= 18 && age < 65)
{
// Age is an adult
}
else
{
// Age is a senior citizen
}
}
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"using System;",
"class Program\n{",
"static void",
"Main()",
"{",
"int age",
"= 30; // Change",
"the age value",
"as needed",
"//",
"Categorize the",
"age without any",
"console output",
"if (age",
"< 18)",
"{",
"//",
"Age is under 18",
"}",
"else if",
"(age >= 18 &&",
"age < 65)",
"{",
"//",
"Age is an adult",
"}",
"else",
"{",
"//",
"Age is a senior",
"citizen",
"}\n }",
"}",
]
def test_cpp_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.CPP, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
#include <iostream>
int main() {
std::cout << "Hello, World!" << std::endl;
return 0;
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"#include",
"<iostream>",
"int main() {",
"std::cout",
'<< "Hello,',
'World!" <<',
"std::endl;",
"return 0;\n}",
]
def test_scala_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.SCALA, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
object HelloWorld {
def main(args: Array[String]): Unit = {
println("Hello, World!")
}
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"object",
"HelloWorld {",
"def",
"main(args:",
"Array[String]):",
"Unit = {",
'println("Hello,',
'World!")',
"}\n}",
]
def test_ruby_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.RUBY, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
def hello_world
puts "Hello, World!"
end
hello_world
"""
chunks = splitter.split_text(code)
assert chunks == [
"def hello_world",
'puts "Hello,',
'World!"',
"end",
"hello_world",
]
def test_php_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.PHP, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
<?php
function hello_world() {
echo "Hello, World!";
}
hello_world();
?>
"""
chunks = splitter.split_text(code)
assert chunks == [
"<?php",
"function",
"hello_world() {",
"echo",
'"Hello,',
'World!";',
"}",
"hello_world();",
"?>",
]
def test_swift_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.SWIFT, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
func helloWorld() {
print("Hello, World!")
}
helloWorld()
"""
chunks = splitter.split_text(code)
assert chunks == [
"func",
"helloWorld() {",
'print("Hello,',
'World!")',
"}",
"helloWorld()",
]
def test_rust_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.RUST, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
fn main() {
println!("Hello, World!");
}
"""
chunks = splitter.split_text(code)
assert chunks == ["fn main() {", 'println!("Hello', ",", 'World!");', "}"]
def test_markdown_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.MARKDOWN, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
# Sample Document
## Section
This is the content of the section.
## Lists
- Item 1
- Item 2
- Item 3
### Horizontal lines
***********
____________
-------------------
#### Code blocks
```
This is a code block
# sample code
a = 1
b = 2
```
"""
chunks = splitter.split_text(code)
assert chunks == [
"# Sample",
"Document",
"## Section",
"This is the",
"content of the",
"section.",
"## Lists",
"- Item 1",
"- Item 2",
"- Item 3",
"### Horizontal",
"lines",
"***********",
"____________",
"---------------",
"----",
"#### Code",
"blocks",
"```",
"This is a code",
"block",
"# sample code",
"a = 1\nb = 2",
"```",
]
# Special test for special characters
code = "harry\n***\nbabylon is"
chunks = splitter.split_text(code)
assert chunks == ["harry", "***\nbabylon is"]
def test_latex_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.LATEX, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
Hi Harrison!
\\chapter{1}
"""
chunks = splitter.split_text(code)
assert chunks == ["Hi Harrison!", "\\chapter{1}"]
def test_html_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.HTML, chunk_size=60, chunk_overlap=0
)
code = """
<h1>Sample Document</h1>
<h2>Section</h2>
<p id="1234">Reference content.</p>
<h2>Lists</h2>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<h3>A block</h3>
<div class="amazing">
<p>Some text</p>
<p>Some more text</p>
</div>
"""
chunks = splitter.split_text(code)
assert chunks == [
"<h1>Sample Document</h1>\n <h2>Section</h2>",
'<p id="1234">Reference content.</p>',
"<h2>Lists</h2>\n <ul>",
"<li>Item 1</li>\n <li>Item 2</li>",
"<li>Item 3</li>\n </ul>",
"<h3>A block</h3>",
'<div class="amazing">',
"<p>Some text</p>",
"<p>Some more text</p>\n </div>",
]
def test_md_header_text_splitter_1() -> None:
"""Test markdown splitter by header: Case 1."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
" ## Baz\n\n"
" Hi this is Molly"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Hi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="Hi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
]
assert output == expected_output
def test_md_header_text_splitter_2() -> None:
"""Test markdown splitter by header: Case 2."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
" ### Boo \n\n"
" Hi this is Lance \n\n"
" ## Baz\n\n"
" Hi this is Molly"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Hi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="Hi this is Lance",
metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"},
),
Document(
page_content="Hi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
]
assert output == expected_output
def test_md_header_text_splitter_3() -> None:
"""Test markdown splitter by header: Case 3."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
" ### Boo \n\n"
" Hi this is Lance \n\n"
" #### Bim \n\n"
" Hi this is John \n\n"
" ## Baz\n\n"
" Hi this is Molly"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
("####", "Header 4"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Hi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="Hi this is Lance",
metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"},
),
Document(
page_content="Hi this is John",
metadata={
"Header 1": "Foo",
"Header 2": "Bar",
"Header 3": "Boo",
"Header 4": "Bim",
},
),
Document(
page_content="Hi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
]
assert output == expected_output
def test_md_header_text_splitter_preserve_headers_1() -> None:
"""Test markdown splitter by header: Preserve Headers."""
markdown_document = (
"# Foo\n\n"
" ## Bat\n\n"
"Hi this is Jim\n\n"
"Hi Joe\n\n"
"## Baz\n\n"
"# Bar\n\n"
"This is Alice\n\n"
"This is Bob"
)
headers_to_split_on = [
("#", "Header 1"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
strip_headers=False,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="# Foo \n## Bat \nHi this is Jim \nHi Joe \n## Baz",
metadata={"Header 1": "Foo"},
),
Document(
page_content="# Bar \nThis is Alice \nThis is Bob",
metadata={"Header 1": "Bar"},
),
]
assert output == expected_output
def test_md_header_text_splitter_preserve_headers_2() -> None:
"""Test markdown splitter by header: Preserve Headers."""
markdown_document = (
"# Foo\n\n"
" ## Bar\n\n"
"Hi this is Jim\n\n"
"Hi this is Joe\n\n"
"### Boo \n\n"
"Hi this is Lance\n\n"
"## Baz\n\n"
"Hi this is Molly\n"
" ## Buz\n"
"# Bop"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
strip_headers=False,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="# Foo \n## Bar \nHi this is Jim \nHi this is Joe",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
Document(
page_content="### Boo \nHi this is Lance",
metadata={"Header 1": "Foo", "Header 2": "Bar", "Header 3": "Boo"},
),
Document(
page_content="## Baz \nHi this is Molly",
metadata={"Header 1": "Foo", "Header 2": "Baz"},
),
Document(
page_content="## Buz",
metadata={"Header 1": "Foo", "Header 2": "Buz"},
),
Document(page_content="# Bop", metadata={"Header 1": "Bop"}),
]
assert output == expected_output
@pytest.mark.parametrize("fence", [("```"), ("~~~")])
def test_md_header_text_splitter_fenced_code_block(fence: str) -> None:
"""Test markdown splitter by header: Fenced code block."""
markdown_document = (
f"# This is a Header\n\n{fence}\nfoo()\n# Not a header\nbar()\n{fence}"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content=f"{fence}\nfoo()\n# Not a header\nbar()\n{fence}",
metadata={"Header 1": "This is a Header"},
),
]
assert output == expected_output
@pytest.mark.parametrize(("fence", "other_fence"), [("```", "~~~"), ("~~~", "```")])
def test_md_header_text_splitter_fenced_code_block_interleaved(
fence: str, other_fence: str
) -> None:
"""Test markdown splitter by header: Interleaved fenced code block."""
markdown_document = (
"# This is a Header\n\n"
f"{fence}\n"
"foo\n"
"# Not a header\n"
f"{other_fence}\n"
"# Not a header\n"
f"{fence}"
)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content=(
f"{fence}\nfoo\n# Not a header\n{other_fence}\n# Not a header\n{fence}"
),
metadata={"Header 1": "This is a Header"},
),
]
assert output == expected_output
@pytest.mark.parametrize("characters", ["\ufeff"])
def test_md_header_text_splitter_with_invisible_characters(characters: str) -> None:
"""Test markdown splitter by header: Fenced code block."""
markdown_document = f"{characters}# Foo\n\nfoo()\n{characters}## Bar\n\nbar()"
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="foo()",
metadata={"Header 1": "Foo"},
),
Document(
page_content="bar()",
metadata={"Header 1": "Foo", "Header 2": "Bar"},
),
]
assert output == expected_output
def test_md_header_text_splitter_with_custom_headers() -> None:
"""Test markdown splitter with custom header patterns like **Header**."""
markdown_document = """**Chapter 1**
This is the content for chapter 1.
***Section 1.1***
This is the content for section 1.1.
**Chapter 2**
This is the content for chapter 2.
***Section 2.1***
This is the content for section 2.1.
"""
headers_to_split_on = [
("**", "Bold Header"),
("***", "Bold Italic Header"),
]
custom_header_patterns = {
"**": 1, # Level 1 headers
"***": 2, # Level 2 headers
}
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
custom_header_patterns=custom_header_patterns,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="This is the content for chapter 1.",
metadata={"Bold Header": "Chapter 1"},
),
Document(
page_content="This is the content for section 1.1.",
metadata={"Bold Header": "Chapter 1", "Bold Italic Header": "Section 1.1"},
),
Document(
page_content="This is the content for chapter 2.",
metadata={"Bold Header": "Chapter 2"},
),
Document(
page_content="This is the content for section 2.1.",
metadata={"Bold Header": "Chapter 2", "Bold Italic Header": "Section 2.1"},
),
]
assert output == expected_output
def test_md_header_text_splitter_mixed_headers() -> None:
"""Test markdown splitter with both standard and custom headers."""
markdown_document = """# Standard Header 1
Content under standard header.
**Custom Header 1**
Content under custom header.
## Standard Header 2
Content under standard header 2.
***Custom Header 2***
Content under custom header 2.
"""
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("**", "Bold Header"),
("***", "Bold Italic Header"),
]
custom_header_patterns = {
"**": 1, # Same level as #
"***": 2, # Same level as ##
}
markdown_splitter = MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
custom_header_patterns=custom_header_patterns,
)
output = markdown_splitter.split_text(markdown_document)
expected_output = [
Document(
page_content="Content under standard header.",
metadata={"Header 1": "Standard Header 1"},
),
Document(
page_content="Content under custom header.",
metadata={"Bold Header": "Custom Header 1"},
),
Document(
page_content="Content under standard header 2.",
metadata={
"Bold Header": "Custom Header 1",
"Header 2": "Standard Header 2",
},
),
Document(
page_content="Content under custom header 2.",
metadata={
"Bold Header": "Custom Header 1",
"Bold Italic Header": "Custom Header 2",
},
),
]
assert output == expected_output
EXPERIMENTAL_MARKDOWN_DOCUMENT = (
"# My Header 1\n"
"Content for header 1\n"
"## Header 2\n"
"Content for header 2\n"
"### Header 3\n"
"Content for header 3\n"
"## Header 2 Again\n"
"This should be tagged with Header 1 and Header 2 Again\n"
"```python\n"
"def func_definition():\n"
" print('Keep the whitespace consistent')\n"
"```\n"
"# Header 1 again\n"
"We should also split on the horizontal line\n"
"----\n"
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
)
def test_experimental_markdown_syntax_text_splitter() -> None:
"""Test experimental markdown syntax splitter."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter()
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content="Content for header 1\n",
metadata={"Header 1": "My Header 1"},
),
Document(
page_content="Content for header 2\n",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
),
Document(
page_content="Content for header 3\n",
metadata={
"Header 1": "My Header 1",
"Header 2": "Header 2",
"Header 3": "Header 3",
},
),
Document(
page_content="This should be tagged with Header 1 and Header 2 Again\n",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2 Again"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_header_configuration() -> None:
"""Test experimental markdown syntax splitter."""
headers_to_split_on = [("#", "Encabezamiento 1")]
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(
headers_to_split_on=headers_to_split_on
)
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content=(
"Content for header 1\n"
"## Header 2\n"
"Content for header 2\n"
"### Header 3\n"
"Content for header 3\n"
"## Header 2 Again\n"
"This should be tagged with Header 1 and Header 2 Again\n"
),
metadata={"Encabezamiento 1": "My Header 1"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={"Code": "python", "Encabezamiento 1": "My Header 1"},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Encabezamiento 1": "Header 1 again"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Encabezamiento 1": "Header 1 again"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_with_headers() -> None:
"""Test experimental markdown syntax splitter."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(strip_headers=False)
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content="# My Header 1\nContent for header 1\n",
metadata={"Header 1": "My Header 1"},
),
Document(
page_content="## Header 2\nContent for header 2\n",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
),
Document(
page_content="### Header 3\nContent for header 3\n",
metadata={
"Header 1": "My Header 1",
"Header 2": "Header 2",
"Header 3": "Header 3",
},
),
Document(
page_content=(
"## Header 2 Again\n"
"This should be tagged with Header 1 and Header 2 Again\n"
),
metadata={"Header 1": "My Header 1", "Header 2": "Header 2 Again"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content=(
"# Header 1 again\nWe should also split on the horizontal line\n"
),
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_split_lines() -> None:
"""Test experimental markdown syntax splitter."""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(return_each_line=True)
output = markdown_splitter.split_text(EXPERIMENTAL_MARKDOWN_DOCUMENT)
expected_output = [
Document(
page_content="Content for header 1", metadata={"Header 1": "My Header 1"}
),
Document(
page_content="Content for header 2",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2"},
),
Document(
page_content="Content for header 3",
metadata={
"Header 1": "My Header 1",
"Header 2": "Header 2",
"Header 3": "Header 3",
},
),
Document(
page_content="This should be tagged with Header 1 and Header 2 Again",
metadata={"Header 1": "My Header 1", "Header 2": "Header 2 Again"},
),
Document(
page_content="```python",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="def func_definition():",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content=" print('Keep the whitespace consistent')",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="```",
metadata={
"Code": "python",
"Header 1": "My Header 1",
"Header 2": "Header 2 Again",
},
),
Document(
page_content="We should also split on the horizontal line",
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content="This will be a new doc but with the same header metadata",
metadata={"Header 1": "Header 1 again"},
),
Document(
page_content="And it includes a new paragraph",
metadata={"Header 1": "Header 1 again"},
),
]
assert output == expected_output
EXPERIMENTAL_MARKDOWN_DOCUMENTS = [
(
"# My Header 1 From Document 1\n"
"Content for header 1 from Document 1\n"
"## Header 2 From Document 1\n"
"Content for header 2 from Document 1\n"
"```python\n"
"def func_definition():\n"
" print('Keep the whitespace consistent')\n"
"```\n"
"# Header 1 again From Document 1\n"
"We should also split on the horizontal line\n"
"----\n"
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
(
"# My Header 1 From Document 2\n"
"Content for header 1 from Document 2\n"
"## Header 2 From Document 2\n"
"Content for header 2 from Document 2\n"
"```python\n"
"def func_definition():\n"
" print('Keep the whitespace consistent')\n"
"```\n"
"# Header 1 again From Document 2\n"
"We should also split on the horizontal line\n"
"----\n"
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
]
def test_experimental_markdown_syntax_text_splitter_on_multi_files() -> None:
"""Test ExperimentalMarkdownSyntaxTextSplitter on multiple files.
Test experimental markdown syntax splitter split on default called consecutively
on two files.
"""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter()
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="Content for header 1 from Document 1\n",
metadata={"Header 1": "My Header 1 From Document 1"},
),
Document(
page_content="Content for header 2 from Document 1\n",
metadata={
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="Content for header 1 from Document 2\n",
metadata={"Header 1": "My Header 1 From Document 2"},
),
Document(
page_content="Content for header 2 from Document 2\n",
metadata={
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_split_lines_on_multi_files() -> (
None
):
"""Test ExperimentalMarkdownSyntaxTextSplitter split lines on multiple files.
Test experimental markdown syntax splitter split on each line called consecutively
on two files.
"""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(return_each_line=True)
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="Content for header 1 from Document 1",
metadata={"Header 1": "My Header 1 From Document 1"},
),
Document(
page_content="Content for header 2 from Document 1",
metadata={
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="```python",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="def func_definition():",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content=" print('Keep the whitespace consistent')",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="```",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="We should also split on the horizontal line",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="This will be a new doc but with the same header metadata",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="And it includes a new paragraph",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="Content for header 1 from Document 2",
metadata={"Header 1": "My Header 1 From Document 2"},
),
Document(
page_content="Content for header 2 from Document 2",
metadata={
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="```python",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="def func_definition():",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content=" print('Keep the whitespace consistent')",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="```",
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="We should also split on the horizontal line",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content="This will be a new doc but with the same header metadata",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content="And it includes a new paragraph",
metadata={"Header 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_with_header_on_multi_files() -> (
None
):
"""Test ExperimentalMarkdownSyntaxTextSplitter with header on multiple files.
Test experimental markdown splitter by header called consecutively on two files.
"""
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(strip_headers=False)
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="# My Header 1 From Document 1\n"
"Content for header 1 from Document 1\n",
metadata={"Header 1": "My Header 1 From Document 1"},
),
Document(
page_content="## Header 2 From Document 1\n"
"Content for header 2 from Document 1\n",
metadata={
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 1",
"Header 2": "Header 2 From Document 1",
},
),
Document(
page_content="# Header 1 again From Document 1\n"
"We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 1"},
),
Document(
page_content="# My Header 1 From Document 2\n"
"Content for header 1 from Document 2\n",
metadata={"Header 1": "My Header 1 From Document 2"},
),
Document(
page_content="## Header 2 From Document 2\n"
"Content for header 2 from Document 2\n",
metadata={
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Header 1": "My Header 1 From Document 2",
"Header 2": "Header 2 From Document 2",
},
),
Document(
page_content="# Header 1 again From Document 2\n"
"We should also split on the horizontal line\n",
metadata={"Header 1": "Header 1 again From Document 2"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Header 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_experimental_markdown_syntax_text_splitter_header_config_on_multi_files() -> (
None
):
"""Test ExperimentalMarkdownSyntaxTextSplitter header config on multiple files.
Test experimental markdown splitter by header configuration called consecutively
on two files.
"""
headers_to_split_on = [("#", "Encabezamiento 1")]
markdown_splitter = ExperimentalMarkdownSyntaxTextSplitter(
headers_to_split_on=headers_to_split_on
)
output = []
for experimental_markdown_document in EXPERIMENTAL_MARKDOWN_DOCUMENTS:
output += markdown_splitter.split_text(experimental_markdown_document)
expected_output = [
Document(
page_content="Content for header 1 from Document 1\n"
"## Header 2 From Document 1\n"
"Content for header 2 from Document 1\n",
metadata={"Encabezamiento 1": "My Header 1 From Document 1"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Encabezamiento 1": "My Header 1 From Document 1",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Encabezamiento 1": "Header 1 again From Document 1"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Encabezamiento 1": "Header 1 again From Document 1"},
),
Document(
page_content="Content for header 1 from Document 2\n"
"## Header 2 From Document 2\n"
"Content for header 2 from Document 2\n",
metadata={"Encabezamiento 1": "My Header 1 From Document 2"},
),
Document(
page_content=(
"```python\ndef func_definition():\n "
"print('Keep the whitespace consistent')\n```\n"
),
metadata={
"Code": "python",
"Encabezamiento 1": "My Header 1 From Document 2",
},
),
Document(
page_content="We should also split on the horizontal line\n",
metadata={"Encabezamiento 1": "Header 1 again From Document 2"},
),
Document(
page_content=(
"This will be a new doc but with the same header metadata\n\n"
"And it includes a new paragraph"
),
metadata={"Encabezamiento 1": "Header 1 again From Document 2"},
),
]
assert output == expected_output
def test_solidity_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.SOL, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """pragma solidity ^0.8.20;
contract HelloWorld {
function add(uint a, uint b) pure public returns(uint) {
return a + b;
}
}
"""
chunks = splitter.split_text(code)
assert chunks == [
"pragma solidity",
"^0.8.20;",
"contract",
"HelloWorld {",
"function",
"add(uint a,",
"uint b) pure",
"public",
"returns(uint) {",
"return a",
"+ b;",
"}\n }",
]
def test_lua_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.LUA, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
local variable = 10
function add(a, b)
return a + b
end
if variable > 5 then
for i=1, variable do
while i < variable do
repeat
print(i)
i = i + 1
until i >= variable
end
end
end
"""
chunks = splitter.split_text(code)
assert chunks == [
"local variable",
"= 10",
"function add(a,",
"b)",
"return a +",
"b",
"end",
"if variable > 5",
"then",
"for i=1,",
"variable do",
"while i",
"< variable do",
"repeat",
"print(i)",
"i = i + 1",
"until i >=",
"variable",
"end",
"end\nend",
]
def test_haskell_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.HASKELL, chunk_size=CHUNK_SIZE, chunk_overlap=0
)
code = """
main :: IO ()
main = do
putStrLn "Hello, World!"
-- Some sample functions
add :: Int -> Int -> Int
add x y = x + y
"""
# Adjusted expected chunks to account for indentation and newlines
expected_chunks = [
"main ::",
"IO ()",
"main = do",
"putStrLn",
'"Hello, World!"',
"--",
"Some sample",
"functions",
"add :: Int ->",
"Int -> Int",
"add x y = x",
"+ y",
]
chunks = splitter.split_text(code)
assert chunks == expected_chunks
@pytest.fixture
def html_header_splitter_splitter_factory() -> Callable[
[list[tuple[str, str]]], HTMLHeaderTextSplitter
]:
"""Fixture to create an `HTMLHeaderTextSplitter` instance with given headers.
This factory allows dynamic creation of splitters with different headers.
"""
def _create_splitter(
headers_to_split_on: list[tuple[str, str]],
) -> HTMLHeaderTextSplitter:
return HTMLHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
return _create_splitter
@pytest.mark.parametrize(
("headers_to_split_on", "html_input", "expected_documents", "test_case"),
[
(
# Test Case 1: Split on h1 and h2
[("h1", "Header 1"), ("h2", "Header 2")],
"""
<html>
<body>
<h1>Introduction</h1>
<p>This is the introduction.</p>
<h2>Background</h2>
<p>Background information.</p>
<h1>Conclusion</h1>
<p>Final thoughts.</p>
</body>
</html>
""",
[
Document(
page_content="Introduction", metadata={"Header 1": "Introduction"}
),
Document(
page_content="This is the introduction.",
metadata={"Header 1": "Introduction"},
),
Document(
page_content="Background",
metadata={"Header 1": "Introduction", "Header 2": "Background"},
),
Document(
page_content="Background information.",
metadata={"Header 1": "Introduction", "Header 2": "Background"},
),
Document(
page_content="Conclusion", metadata={"Header 1": "Conclusion"}
),
Document(
page_content="Final thoughts.", metadata={"Header 1": "Conclusion"}
),
],
"Simple headers and paragraphs",
),
(
# Test Case 2: Nested headers with h1, h2, and h3
[("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3")],
"""
<html>
<body>
<div>
<h1>Main Title</h1>
<div>
<h2>Subsection</h2>
<p>Details of subsection.</p>
<div>
<h3>Sub-subsection</h3>
<p>More details.</p>
</div>
</div>
</div>
<h1>Another Main Title</h1>
<p>Content under another main title.</p>
</body>
</html>
""",
[
Document(
page_content="Main Title", metadata={"Header 1": "Main Title"}
),
Document(
page_content="Subsection",
metadata={"Header 1": "Main Title", "Header 2": "Subsection"},
),
Document(
page_content="Details of subsection.",
metadata={"Header 1": "Main Title", "Header 2": "Subsection"},
),
Document(
page_content="Sub-subsection",
metadata={
"Header 1": "Main Title",
"Header 2": "Subsection",
"Header 3": "Sub-subsection",
},
),
Document(
page_content="More details.",
metadata={
"Header 1": "Main Title",
"Header 2": "Subsection",
"Header 3": "Sub-subsection",
},
),
Document(
page_content="Another Main Title",
metadata={"Header 1": "Another Main Title"},
),
Document(
page_content="Content under another main title.",
metadata={"Header 1": "Another Main Title"},
),
],
"Nested headers with h1, h2, and h3",
),
(
# Test Case 3: No headers
[("h1", "Header 1")],
"""
<html>
<body>
<p>Paragraph one.</p>
<p>Paragraph two.</p>
<div>
<p>Paragraph three.</p>
</div>
</body>
</html>
""",
[
Document(
page_content="Paragraph one. \nParagraph two. \nParagraph three.",
metadata={},
)
],
"No headers present",
),
(
# Test Case 4: Multiple headers of the same level
[("h1", "Header 1")],
"""
<html>
<body>
<h1>Chapter 1</h1>
<p>Content of chapter 1.</p>
<h1>Chapter 2</h1>
<p>Content of chapter 2.</p>
<h1>Chapter 3</h1>
<p>Content of chapter 3.</p>
</body>
</html>
""",
[
Document(page_content="Chapter 1", metadata={"Header 1": "Chapter 1"}),
Document(
page_content="Content of chapter 1.",
metadata={"Header 1": "Chapter 1"},
),
Document(page_content="Chapter 2", metadata={"Header 1": "Chapter 2"}),
Document(
page_content="Content of chapter 2.",
metadata={"Header 1": "Chapter 2"},
),
Document(page_content="Chapter 3", metadata={"Header 1": "Chapter 3"}),
Document(
page_content="Content of chapter 3.",
metadata={"Header 1": "Chapter 3"},
),
],
"Multiple headers of the same level",
),
(
# Test Case 5: Headers with no content
[("h1", "Header 1"), ("h2", "Header 2")],
"""
<html>
<body>
<h1>Header 1</h1>
<h2>Header 2</h2>
<h1>Header 3</h1>
</body>
</html>
""",
[
Document(page_content="Header 1", metadata={"Header 1": "Header 1"}),
Document(
page_content="Header 2",
metadata={"Header 1": "Header 1", "Header 2": "Header 2"},
),
Document(page_content="Header 3", metadata={"Header 1": "Header 3"}),
],
"Headers with no associated content",
),
],
)
@pytest.mark.requires("bs4")
def test_html_header_text_splitter(
html_header_splitter_splitter_factory: Callable[
[list[tuple[str, str]]], HTMLHeaderTextSplitter
],
headers_to_split_on: list[tuple[str, str]],
html_input: str,
expected_documents: list[Document],
test_case: str,
) -> None:
"""Test the HTML header text splitter.
Args:
html_header_splitter_splitter_factory : Factory function to create the HTML
header splitter.
headers_to_split_on: List of headers to split on.
html_input: The HTML input string to be split.
expected_documents: List of expected Document objects.
test_case: Description of the test case.
Raises:
AssertionError: If the number of documents or their content/metadata
does not match the expected values.
"""
splitter = html_header_splitter_splitter_factory(headers_to_split_on)
docs = splitter.split_text(html_input)
assert len(docs) == len(expected_documents), (
f"Test Case '{test_case}' Failed: Number of documents mismatch. "
f"Expected {len(expected_documents)}, got {len(docs)}."
)
for idx, (doc, expected) in enumerate(
zip(docs, expected_documents, strict=False), start=1
):
assert doc.page_content == expected.page_content, (
f"Test Case '{test_case}' Failed at Document {idx}: "
f"Content mismatch.\nExpected: {expected.page_content}"
"\nGot: {doc.page_content}"
)
assert doc.metadata == expected.metadata, (
f"Test Case '{test_case}' Failed at Document {idx}: "
f"Metadata mismatch.\nExpected: {expected.metadata}\nGot: {doc.metadata}"
)
@pytest.mark.parametrize(
("headers_to_split_on", "html_content", "expected_output", "test_case"),
[
(
# Test Case A: Split on h1 and h2 with h3 in content
[("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3")],
"""
<!DOCTYPE html>
<html>
<body>
<div>
<h1>Foo</h1>
<p>Some intro text about Foo.</p>
<div>
<h2>Bar main section</h2>
<p>Some intro text about Bar.</p>
<h3>Bar subsection 1</h3>
<p>Some text about the first subtopic of Bar.</p>
<h3>Bar subsection 2</h3>
<p>Some text about the second subtopic of Bar.</p>
</div>
<div>
<h2>Baz</h2>
<p>Some text about Baz</p>
</div>
<br>
<p>Some concluding text about Foo</p>
</div>
</body>
</html>
""",
[
Document(metadata={"Header 1": "Foo"}, page_content="Foo"),
Document(
metadata={"Header 1": "Foo"},
page_content="Some intro text about Foo.",
),
Document(
metadata={"Header 1": "Foo", "Header 2": "Bar main section"},
page_content="Bar main section",
),
Document(
metadata={"Header 1": "Foo", "Header 2": "Bar main section"},
page_content="Some intro text about Bar.",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 1",
},
page_content="Bar subsection 1",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 1",
},
page_content="Some text about the first subtopic of Bar.",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 2",
},
page_content="Bar subsection 2",
),
Document(
metadata={
"Header 1": "Foo",
"Header 2": "Bar main section",
"Header 3": "Bar subsection 2",
},
page_content="Some text about the second subtopic of Bar.",
),
Document(
metadata={"Header 1": "Foo", "Header 2": "Baz"}, page_content="Baz"
),
Document(
metadata={"Header 1": "Foo"},
page_content=(
"Some text about Baz \nSome concluding text about Foo"
),
),
],
"Test Case A: Split on h1, h2, and h3 with nested headers",
),
(
# Test Case B: Split on h1 only without any headers
[("h1", "Header 1")],
"""
<html>
<body>
<p>Paragraph one.</p>
<p>Paragraph two.</p>
<p>Paragraph three.</p>
</body>
</html>
""",
[
Document(
metadata={},
page_content="Paragraph one. \nParagraph two. \nParagraph three.",
)
],
"Test Case B: Split on h1 only without any headers",
),
],
)
@pytest.mark.requires("bs4")
def test_additional_html_header_text_splitter(
html_header_splitter_splitter_factory: Callable[
[list[tuple[str, str]]], HTMLHeaderTextSplitter
],
headers_to_split_on: list[tuple[str, str]],
html_content: str,
expected_output: list[Document],
test_case: str,
) -> None:
"""Test the HTML header text splitter.
Args:
html_header_splitter_splitter_factory: Factory function to create the HTML
header splitter.
headers_to_split_on: List of headers to split on.
html_content: HTML content to be split.
expected_output: Expected list of `Document` objects.
test_case: Description of the test case.
Raises:
AssertionError: If the number of documents or their content/metadata
does not match the expected output.
"""
splitter = html_header_splitter_splitter_factory(headers_to_split_on)
docs = splitter.split_text(html_content)
assert len(docs) == len(expected_output), (
f"{test_case} Failed: Number of documents mismatch. "
f"Expected {len(expected_output)}, got {len(docs)}."
)
for idx, (doc, expected) in enumerate(
zip(docs, expected_output, strict=False), start=1
):
assert doc.page_content == expected.page_content, (
f"{test_case} Failed at Document {idx}: "
f"Content mismatch.\nExpected: {expected.page_content}\n"
"Got: {doc.page_content}"
)
assert doc.metadata == expected.metadata, (
f"{test_case} Failed at Document {idx}: "
f"Metadata mismatch.\nExpected: {expected.metadata}\nGot: {doc.metadata}"
)
@pytest.mark.parametrize(
("headers_to_split_on", "html_content", "expected_output", "test_case"),
[
(
# Test Case C: Split on h1, h2, and h3 with no headers present
[("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3")],
"""
<html>
<body>
<p>Just some random text without headers.</p>
<div>
<span>More text here.</span>
</div>
</body>
</html>
""",
[
Document(
page_content="Just some random text without headers."
" \nMore text here.",
metadata={},
)
],
"Test Case C: Split on h1, h2, and h3 without any headers",
)
],
)
@pytest.mark.requires("bs4")
def test_html_no_headers_with_multiple_splitters(
html_header_splitter_splitter_factory: Callable[
[list[tuple[str, str]]], HTMLHeaderTextSplitter
],
headers_to_split_on: list[tuple[str, str]],
html_content: str,
expected_output: list[Document],
test_case: str,
) -> None:
"""Test HTML content splitting without headers using multiple splitters.
Args:
html_header_splitter_splitter_factory: Factory to create the HTML header
splitter.
headers_to_split_on: List of headers to split on.
html_content: HTML content to be split.
expected_output: Expected list of `Document` objects after splitting.
test_case: Description of the test case.
Raises:
AssertionError: If the number of documents or their content/metadata
does not match the expected output.
"""
splitter = html_header_splitter_splitter_factory(headers_to_split_on)
docs = splitter.split_text(html_content)
assert len(docs) == len(expected_output), (
f"{test_case} Failed: Number of documents mismatch. "
f"Expected {len(expected_output)}, got {len(docs)}."
)
for idx, (doc, expected) in enumerate(
zip(docs, expected_output, strict=False), start=1
):
assert doc.page_content == expected.page_content, (
f"{test_case} Failed at Document {idx}: "
f"Content mismatch.\nExpected: {expected.page_content}\n"
"Got: {doc.page_content}"
)
assert doc.metadata == expected.metadata, (
f"{test_case} Failed at Document {idx}: "
f"Metadata mismatch.\nExpected: {expected.metadata}\nGot: {doc.metadata}"
)
def test_split_text_on_tokens() -> None:
"""Test splitting by tokens per chunk."""
text = "foo bar baz 123"
tokenizer = Tokenizer(
chunk_overlap=3,
tokens_per_chunk=7,
decode=(lambda it: "".join(chr(i) for i in it)),
encode=(lambda it: [ord(c) for c in it]),
)
output = split_text_on_tokens(text=text, tokenizer=tokenizer)
expected_output = ["foo bar", "bar baz", "baz 123"]
assert output == expected_output
def test_decode_returns_no_chunks() -> None:
"""Test that when decode returns only empty strings, output is empty, not ['']."""
text = "foo bar baz 123"
tokenizer = Tokenizer(
chunk_overlap=3,
tokens_per_chunk=7,
decode=(lambda _: ""),
encode=(lambda it: [ord(c) for c in it]),
)
output = split_text_on_tokens(text=text, tokenizer=tokenizer)
expected_output: list[Any] = []
assert output == expected_output
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_section_aware_happy_path_splitting_based_on_header_1_2() -> None:
# arrange
html_string = """<!DOCTYPE html>
<html>
<body>
<div>
<h1>Foo</h1>
<p>Some intro text about Foo.</p>
<div>
<h2>Bar main section</h2>
<p>Some intro text about Bar.</p>
<h3>Bar subsection 1</h3>
<p>Some text about the first subtopic of Bar.</p>
<h3>Bar subsection 2</h3>
<p>Some text about the second subtopic of Bar.</p>
</div>
<div>
<h2>Baz</h2>
<p>Some text about Baz</p>
</div>
<br>
<p>Some concluding text about Foo</p>
</div>
</body>
</html>"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 3
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert (
docs[2].page_content
== "Baz \n Some text about Baz \n \n \n Some concluding text about Foo"
)
# Baz \n Some text about Baz \n \n \n Some concluding text about Foo
# Baz \n Some text about Baz \n \n Some concluding text about Foo
assert docs[2].metadata["Header 2"] == "Baz"
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_happy_path_splitting_based_on_header_with_font_size() -> None:
# arrange
html_string = """<!DOCTYPE html>
<html>
<body>
<div>
<span style="font-size: 22px">Foo</span>
<p>Some intro text about Foo.</p>
<div>
<h2>Bar main section</h2>
<p>Some intro text about Bar.</p>
<h3>Bar subsection 1</h3>
<p>Some text about the first subtopic of Bar.</p>
<h3>Bar subsection 2</h3>
<p>Some text about the second subtopic of Bar.</p>
</div>
<div>
<h2>Baz</h2>
<p>Some text about Baz</p>
</div>
<br>
<p>Some concluding text about Foo</p>
</div>
</body>
</html>"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 3
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert docs[2].page_content == (
"Baz \n Some text about Baz \n \n \n Some concluding text about Foo"
)
assert docs[2].metadata["Header 2"] == "Baz"
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_happy_path_splitting_based_on_header_with_whitespace_chars() -> None:
# arrange
html_string = """<!DOCTYPE html>
<html>
<body>
<div>
<span style="font-size: 22px">\nFoo </span>
<p>Some intro text about Foo.</p>
<div>
<h2>Bar main section</h2>
<p>Some intro text about Bar.</p>
<h3>Bar subsection 1</h3>
<p>Some text about the first subtopic of Bar.</p>
<h3>Bar subsection 2</h3>
<p>Some text about the second subtopic of Bar.</p>
</div>
<div>
<h2>Baz</h2>
<p>Some text about Baz</p>
</div>
<br>
<p>Some concluding text about Foo</p>
</div>
</body>
</html>"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 3
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert docs[2].page_content == (
"Baz \n Some text about Baz \n \n \n Some concluding text about Foo"
)
assert docs[2].metadata["Header 2"] == "Baz"
@pytest.mark.requires("bs4")
@pytest.mark.requires("lxml")
def test_happy_path_splitting_with_duplicate_header_tag() -> None:
# arrange
html_string = """<!DOCTYPE html>
<html>
<body>
<div>
<h1>Foo</h1>
<p>Some intro text about Foo.</p>
<div>
<h2>Bar main section</h2>
<p>Some intro text about Bar.</p>
<h3>Bar subsection 1</h3>
<p>Some text about the first subtopic of Bar.</p>
<h3>Bar subsection 2</h3>
<p>Some text about the second subtopic of Bar.</p>
</div>
<div>
<h2>Foo</h2>
<p>Some text about Baz</p>
</div>
<h1>Foo</h1>
<br>
<p>Some concluding text about Foo</p>
</div>
</body>
</html>"""
sec_splitter = HTMLSectionSplitter(
headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")]
)
docs = sec_splitter.split_text(html_string)
assert len(docs) == 4
assert docs[0].page_content == "Foo \n Some intro text about Foo."
assert docs[0].metadata["Header 1"] == "Foo"
assert docs[1].page_content == (
"Bar main section \n Some intro text about Bar. \n "
"Bar subsection 1 \n Some text about the first subtopic of Bar. \n "
"Bar subsection 2 \n Some text about the second subtopic of Bar."
)
assert docs[1].metadata["Header 2"] == "Bar main section"
assert docs[2].page_content == "Foo \n Some text about Baz"
assert docs[2].metadata["Header 2"] == "Foo"
assert docs[3].page_content == "Foo \n \n Some concluding text about Foo"
assert docs[3].metadata["Header 1"] == "Foo"
def test_split_json() -> None:
"""Test json text splitter."""
max_chunk = 800
splitter = RecursiveJsonSplitter(max_chunk_size=max_chunk)
def random_val() -> str:
return "".join(random.choices(string.ascii_letters, k=random.randint(4, 12)))
test_data: Any = {
"val0": random_val(),
"val1": {f"val1{i}": random_val() for i in range(100)},
}
test_data["val1"]["val16"] = {f"val16{i}": random_val() for i in range(100)}
# uses create_docs and split_text
docs = splitter.create_documents(texts=[test_data])
output = [len(doc.page_content) < max_chunk * 1.05 for doc in docs]
expected_output = [True for doc in docs]
assert output == expected_output
def test_split_json_with_lists() -> None:
"""Test json text splitter with list conversion."""
max_chunk = 800
splitter = RecursiveJsonSplitter(max_chunk_size=max_chunk)
def random_val() -> str:
return "".join(random.choices(string.ascii_letters, k=random.randint(4, 12)))
test_data: Any = {
"val0": random_val(),
"val1": {f"val1{i}": random_val() for i in range(100)},
}
test_data["val1"]["val16"] = {f"val16{i}": random_val() for i in range(100)}
test_data_list: Any = {"testPreprocessing": [test_data]}
# test text splitter
texts = splitter.split_text(json_data=test_data)
texts_list = splitter.split_text(json_data=test_data_list, convert_lists=True)
assert len(texts_list) >= len(texts)
def test_split_json_many_calls() -> None:
x = {"a": 1, "b": 2}
y = {"c": 3, "d": 4}
splitter = RecursiveJsonSplitter()
chunk0 = splitter.split_json(x)
assert chunk0 == [{"a": 1, "b": 2}]
chunk1 = splitter.split_json(y)
assert chunk1 == [{"c": 3, "d": 4}]
# chunk0 is now altered by creating chunk1
assert chunk0 == [{"a": 1, "b": 2}]
chunk0_output = [{"a": 1, "b": 2}]
chunk1_output = [{"c": 3, "d": 4}]
assert chunk0 == chunk0_output
assert chunk1 == chunk1_output
def test_powershell_code_splitter_short_code() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.POWERSHELL, chunk_size=60, chunk_overlap=0
)
code = """
# Check if a file exists
$filePath = "C:\\temp\\file.txt"
if (Test-Path $filePath) {
# File exists
} else {
# File does not exist
}
"""
chunks = splitter.split_text(code)
assert chunks == [
'# Check if a file exists\n$filePath = "C:\\temp\\file.txt"',
"if (Test-Path $filePath) {\n # File exists\n} else {",
"# File does not exist\n}",
]
def test_powershell_code_splitter_longer_code() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.POWERSHELL, chunk_size=60, chunk_overlap=0
)
code = """
# Get a list of all processes and export to CSV
$processes = Get-Process
$processes | Export-Csv -Path "C:\\temp\\processes.csv" -NoTypeInformation
# Read the CSV file and display its content
$csvContent = Import-Csv -Path "C:\\temp\\processes.csv"
$csvContent | ForEach-Object {
$_.ProcessName
}
# End of script
"""
chunks = splitter.split_text(code)
assert chunks == [
"# Get a list of all processes and export to CSV",
"$processes = Get-Process",
'$processes | Export-Csv -Path "C:\\temp\\processes.csv"',
"-NoTypeInformation",
"# Read the CSV file and display its content",
'$csvContent = Import-Csv -Path "C:\\temp\\processes.csv"',
"$csvContent | ForEach-Object {\n $_.ProcessName\n}",
"# End of script",
]
FAKE_VISUALBASIC6_TEXT = """
Option Explicit
Public Function SumTwoIntegers(ByVal a As Integer, ByVal b As Integer) As Integer
SumTwoIntegers = a + b
End Function
Public Sub Main()
Dim i As Integer
Dim limit As Integer
i = 0
limit = 50
While i < limit
i = SumTwoIntegers(i, 1)
If i = limit \\ 2 Then
MsgBox "Halfway there! i = " & i
End If
Wend
MsgBox "Done! Final value of i: " & i
End Sub
"""
def test_visualbasic6_code_splitter() -> None:
splitter = RecursiveCharacterTextSplitter.from_language(
Language.VISUALBASIC6,
chunk_size=CHUNK_SIZE,
chunk_overlap=0,
)
chunks = splitter.split_text(FAKE_VISUALBASIC6_TEXT)
assert chunks == [
"Option Explicit",
"Public Function",
"SumTwoIntegers(",
"ByVal",
"a As Integer,",
"ByVal b As",
"Integer) As",
"Integer",
"SumTwoIntegers",
"= a + b",
"End Function",
"Public Sub",
"Main()",
"Dim i As",
"Integer",
"Dim limit",
"As Integer",
"i = 0",
"limit = 50",
"While i <",
"limit",
"i =",
"SumTwoIntegers(",
"i,",
"1)",
"If i =",
"limit \\ 2 Then",
'MsgBox "Halfway',
'there! i = " &',
"i",
"End If",
"Wend",
"MsgBox",
'"Done! Final',
'value of i: " &',
"i",
"End Sub",
]
def custom_iframe_extractor(iframe_tag: Tag) -> str:
iframe_src = iframe_tag.get("src", "")
return f"[iframe:{iframe_src}]({iframe_src})"
@pytest.mark.requires("bs4")
def test_html_splitter_with_custom_extractor() -> None:
"""Test HTML splitting with a custom extractor."""
html_content = """
<h1>Section 1</h1>
<p>This is an iframe:</p>
<iframe src="http://example.com"></iframe>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
custom_handlers={"iframe": custom_iframe_extractor},
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is an iframe: "
"[iframe:http://example.com](http://example.com)",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_href_links() -> None:
"""Test HTML splitting with href links."""
html_content = """
<h1>Section 1</h1>
<p>This is a link to <a href="http://example.com">example.com</a></p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
preserve_links=True,
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is a link to [example.com](http://example.com)",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_nested_elements() -> None:
"""Test HTML splitting with nested elements."""
html_content = """
<h1>Main Section</h1>
<div>
<p>Some text here.</p>
<div>
<p>Nested content.</p>
</div>
</div>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=1000
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="Some text here. Nested content.",
metadata={"Header 1": "Main Section"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_preserved_elements() -> None:
"""Test HTML splitter with preserved elements.
Test HTML splitting with preserved elements like <table>, <ul> with low chunk
size.
"""
html_content = """
<h1>Section 1</h1>
<table>
<tr><td>Row 1</td></tr>
<tr><td>Row 2</td></tr>
</table>
<ul>
<li>Item 1</li>
<li>Item 2</li>
</ul>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
elements_to_preserve=["table", "ul"],
max_chunk_size=50, # Deliberately low to test preservation
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="Row 1 Row 2 Item 1 Item 2",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected # Shouldn't split the table or ul
@pytest.mark.requires("bs4")
def test_html_splitter_with_no_further_splits() -> None:
"""Test HTML splitting that requires no further splits beyond sections."""
html_content = """
<h1>Section 1</h1>
<p>Some content here.</p>
<h1>Section 2</h1>
<p>More content here.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=1000
)
documents = splitter.split_text(html_content)
expected = [
Document(page_content="Some content here.", metadata={"Header 1": "Section 1"}),
Document(page_content="More content here.", metadata={"Header 1": "Section 2"}),
]
assert documents == expected # No further splits, just sections
@pytest.mark.requires("bs4")
def test_html_splitter_with_small_chunk_size() -> None:
"""Test HTML splitting with a very small chunk size to validate chunking."""
html_content = """
<h1>Section 1</h1>
<p>This is some long text that should be split into multiple chunks due to the
small chunk size.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")], max_chunk_size=20, chunk_overlap=5
)
documents = splitter.split_text(html_content)
expected = [
Document(page_content="This is some long", metadata={"Header 1": "Section 1"}),
Document(page_content="long text that", metadata={"Header 1": "Section 1"}),
Document(page_content="that should be", metadata={"Header 1": "Section 1"}),
Document(page_content="be split into", metadata={"Header 1": "Section 1"}),
Document(page_content="into multiple", metadata={"Header 1": "Section 1"}),
Document(page_content="chunks due to the", metadata={"Header 1": "Section 1"}),
Document(page_content="the small chunk", metadata={"Header 1": "Section 1"}),
Document(page_content="size.", metadata={"Header 1": "Section 1"}),
]
assert documents == expected # Should split into multiple chunks
@pytest.mark.requires("bs4")
def test_html_splitter_with_denylist_tags() -> None:
"""Test HTML splitting with denylist tag filtering."""
html_content = """
<h1>Section 1</h1>
<p>This paragraph should be kept.</p>
<span>This span should be removed.</span>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
denylist_tags=["span"],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This paragraph should be kept.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_external_metadata() -> None:
"""Test HTML splitting with external metadata integration."""
html_content = """
<h1>Section 1</h1>
<p>This is some content.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
external_metadata={"source": "example.com"},
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some content.",
metadata={"Header 1": "Section 1", "source": "example.com"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_text_normalization() -> None:
"""Test HTML splitting with text normalization."""
html_content = """
<h1>Section 1</h1>
<p>This is some TEXT that should be normalized!</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
normalize_text=True,
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="this is some text that should be normalized",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_allowlist_tags() -> None:
"""Test HTML splitting with allowlist tag filtering."""
html_content = """
<h1>Section 1</h1>
<p>This paragraph should be kept.</p>
<span>This span should be kept.</span>
<div>This div should be removed.</div>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
allowlist_tags=["p", "span"],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This paragraph should be kept. This span should be kept.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_mixed_preserve_and_filter() -> None:
"""Test HTML splitting with both preserved elements and denylist tags."""
html_content = """
<h1>Section 1</h1>
<table>
<tr>
<td>Keep this table</td>
<td>Cell contents kept, span removed
<span>This span should be removed.</span>
</td>
</tr>
</table>
<p>This paragraph should be kept.</p>
<span>This span should be removed.</span>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
elements_to_preserve=["table"],
denylist_tags=["span"],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="Keep this table Cell contents kept, span removed"
" This paragraph should be kept.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_no_headers() -> None:
"""Test HTML splitting when there are no headers to split on."""
html_content = """
<p>This is content without any headers.</p>
<p>It should still produce a valid document.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[],
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is content without any headers. It should still produce"
" a valid document.",
metadata={},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_with_media_preservation() -> None:
"""Test HTML splitter with media preservation.
Test HTML splitting with media elements preserved and converted to Markdown-like
links.
"""
html_content = """
<h1>Section 1</h1>
<p>This is an image:</p>
<img src="http://example.com/image.png" />
<p>This is a video:</p>
<video src="http://example.com/video.mp4"></video>
<p>This is audio:</p>
<audio src="http://example.com/audio.mp3"></audio>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
preserve_images=True,
preserve_videos=True,
preserve_audio=True,
max_chunk_size=1000,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is an image: ![image:http://example.com/image.png]"
"(http://example.com/image.png) "
"This is a video: ![video:http://example.com/video.mp4]"
"(http://example.com/video.mp4) "
"This is audio: ![audio:http://example.com/audio.mp3]"
"(http://example.com/audio.mp3)",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_true() -> None:
"""Test HTML splitting with keep_separator=True."""
html_content = """
<h1>Section 1</h1>
<p>This is some text. This is some other text.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator=True,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content=". This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_false() -> None:
"""Test HTML splitting with keep_separator=False."""
html_content = """
<h1>Section 1</h1>
<p>This is some text. This is some other text.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator=False,
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content="This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_start() -> None:
"""Test HTML splitting with keep_separator="start"."""
html_content = """
<h1>Section 1</h1>
<p>This is some text. This is some other text.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator="start",
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content=". This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_end() -> None:
"""Test HTML splitting with keep_separator="end"."""
html_content = """
<h1>Section 1</h1>
<p>This is some text. This is some other text.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
keep_separator="end",
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text.",
metadata={"Header 1": "Section 1"},
),
Document(
page_content="This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
@pytest.mark.requires("bs4")
def test_html_splitter_keep_separator_default() -> None:
"""Test HTML splitting with keep_separator not set."""
html_content = """
<h1>Section 1</h1>
<p>This is some text. This is some other text.</p>
"""
with suppress_langchain_beta_warning():
splitter = HTMLSemanticPreservingSplitter(
headers_to_split_on=[("h1", "Header 1")],
max_chunk_size=10,
separators=[". "],
)
documents = splitter.split_text(html_content)
expected = [
Document(
page_content="This is some text",
metadata={"Header 1": "Section 1"},
),
Document(
page_content=". This is some other text.",
metadata={"Header 1": "Section 1"},
),
]
assert documents == expected
def test_character_text_splitter_discard_regex_separator_on_merge() -> None:
"""Test that regex lookahead separator is not re-inserted when merging."""
text = "SCE191 First chunk. SCE103 Second chunk."
splitter = CharacterTextSplitter(
separator=r"(?=SCE\d{3})",
is_separator_regex=True,
chunk_size=200,
chunk_overlap=0,
keep_separator=False,
)
output = splitter.split_text(text)
assert output == ["SCE191 First chunk. SCE103 Second chunk."]
@pytest.mark.parametrize(
("separator", "is_regex", "text", "chunk_size", "expected"),
[
# 1) regex lookaround & split happens
# "abcmiddef" split by "(?<=mid)" → ["abcmid","def"], chunk_size=5 keeps both
(r"(?<=mid)", True, "abcmiddef", 5, ["abcmid", "def"]),
# 2) regex lookaround & no split
# chunk_size=100 merges back into ["abcmiddef"]
(r"(?<=mid)", True, "abcmiddef", 100, ["abcmiddef"]),
# 3) literal separator & split happens
# split on "mid" → ["abc","def"], chunk_size=3 keeps both
("mid", False, "abcmiddef", 3, ["abc", "def"]),
# 4) literal separator & no split
# chunk_size=100 merges back into ["abcmiddef"]
("mid", False, "abcmiddef", 100, ["abcmiddef"]),
],
)
def test_character_text_splitter_chunk_size_effect(
separator: str,
*,
is_regex: bool,
text: str,
chunk_size: int,
expected: list[str],
) -> None:
splitter = CharacterTextSplitter(
separator=separator,
is_separator_regex=is_regex,
chunk_size=chunk_size,
chunk_overlap=0,
keep_separator=False,
)
assert splitter.split_text(text) == expected
| Program |
python | doocs__leetcode | solution/1800-1899/1819.Number of Different Subsequences GCDs/Solution.py | {
"start": 0,
"end": 419
} | class ____:
def countDifferentSubsequenceGCDs(self, nums: List[int]) -> int:
mx = max(nums)
vis = set(nums)
ans = 0
for x in range(1, mx + 1):
g = 0
for y in range(x, mx + 1, x):
if y in vis:
g = gcd(g, y)
if g == x:
ans += 1
break
return ans
| Solution |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/pod_generator.py | {
"start": 5032,
"end": 21038
} | class ____:
"""
Contains Kubernetes Airflow Worker configuration logic.
Represents a kubernetes pod and manages execution of a single pod.
Any configuration that is container specific gets applied to
the first container in the list of containers.
:param pod: The fully specified pod. Mutually exclusive with `pod_template_file`
:param pod_template_file: Path to YAML file. Mutually exclusive with `pod`
:param extract_xcom: Whether to bring up a container for xcom
"""
def __init__(
self,
pod: k8s.V1Pod | None = None,
pod_template_file: str | None = None,
extract_xcom: bool = True,
):
if not pod_template_file and not pod:
raise AirflowConfigException(
"Podgenerator requires either a `pod` or a `pod_template_file` argument"
)
if pod_template_file and pod:
raise AirflowConfigException("Cannot pass both `pod` and `pod_template_file` arguments")
if pod_template_file:
self.ud_pod = self.deserialize_model_file(pod_template_file)
else:
self.ud_pod = pod
# Attach sidecar
self.extract_xcom = extract_xcom
@staticmethod
def from_obj(obj) -> dict | k8s.V1Pod | None:
"""Convert to pod from obj."""
if obj is None:
return None
k8s_legacy_object = obj.get("KubernetesExecutor", None)
k8s_object = obj.get("pod_override", None)
if k8s_legacy_object and k8s_object:
raise AirflowConfigException(
"Can not have both a legacy and new"
"executor_config object. Please delete the KubernetesExecutor"
"dict and only use the pod_override kubernetes.client.models.V1Pod"
"object."
)
if not k8s_object and not k8s_legacy_object:
return None
if isinstance(k8s_object, k8s.V1Pod):
return k8s_object
raise TypeError(
"Cannot convert a non-kubernetes.client.models.V1Pod object into a KubernetesExecutorConfig"
)
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: k8s.V1Pod | None) -> k8s.V1Pod:
"""
Merge Kubernetes Pod objects.
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:param client_pod: the pod that the client wants to create.
:return: the merged pods
This can't be done recursively as certain fields are overwritten and some are concatenated.
"""
if client_pod is None:
return base_pod
client_pod_cp = copy.deepcopy(client_pod)
client_pod_cp.spec = PodGenerator.reconcile_specs(base_pod.spec, client_pod_cp.spec)
client_pod_cp.metadata = PodGenerator.reconcile_metadata(base_pod.metadata, client_pod_cp.metadata)
client_pod_cp = merge_objects(base_pod, client_pod_cp)
return client_pod_cp
@staticmethod
def reconcile_metadata(base_meta, client_meta):
"""
Merge Kubernetes Metadata objects.
:param base_meta: has the base attributes which are overwritten if they exist
in the client_meta and remain if they do not exist in the client_meta
:param client_meta: the spec that the client wants to create.
:return: the merged specs
"""
if base_meta and not client_meta:
return base_meta
if not base_meta and client_meta:
return client_meta
if client_meta and base_meta:
client_meta.labels = merge_objects(base_meta.labels, client_meta.labels)
client_meta.annotations = merge_objects(base_meta.annotations, client_meta.annotations)
extend_object_field(base_meta, client_meta, "managed_fields")
extend_object_field(base_meta, client_meta, "finalizers")
extend_object_field(base_meta, client_meta, "owner_references")
return merge_objects(base_meta, client_meta)
return None
@staticmethod
def reconcile_specs(
base_spec: k8s.V1PodSpec | None, client_spec: k8s.V1PodSpec | None
) -> k8s.V1PodSpec | None:
"""
Merge Kubernetes PodSpec objects.
:param base_spec: has the base attributes which are overwritten if they exist
in the client_spec and remain if they do not exist in the client_spec
:param client_spec: the spec that the client wants to create.
:return: the merged specs
"""
if base_spec and not client_spec:
return base_spec
if not base_spec and client_spec:
return client_spec
if client_spec and base_spec:
client_spec.containers = PodGenerator.reconcile_containers(
base_spec.containers, client_spec.containers
)
merged_spec = extend_object_field(base_spec, client_spec, "init_containers")
merged_spec = extend_object_field(base_spec, merged_spec, "volumes")
return merge_objects(base_spec, merged_spec)
return None
@staticmethod
def reconcile_containers(
base_containers: list[k8s.V1Container], client_containers: list[k8s.V1Container]
) -> list[k8s.V1Container]:
"""
Merge Kubernetes Container objects.
:param base_containers: has the base attributes which are overwritten if they exist
in the client_containers and remain if they do not exist in the client_containers
:param client_containers: the containers that the client wants to create.
:return: the merged containers
The runs recursively over the list of containers.
"""
if not base_containers:
return client_containers
if not client_containers:
return base_containers
client_container = client_containers[0]
base_container = base_containers[0]
client_container = extend_object_field(base_container, client_container, "volume_mounts")
client_container = extend_object_field(base_container, client_container, "env")
client_container = extend_object_field(base_container, client_container, "env_from")
client_container = extend_object_field(base_container, client_container, "ports")
client_container = extend_object_field(base_container, client_container, "volume_devices")
client_container = merge_objects(base_container, client_container)
return [
client_container,
*PodGenerator.reconcile_containers(base_containers[1:], client_containers[1:]),
]
@classmethod
def construct_pod(
cls,
dag_id: str,
task_id: str,
pod_id: str,
try_number: int,
kube_image: str,
date: datetime.datetime | None,
args: list[str],
pod_override_object: k8s.V1Pod | None,
base_worker_pod: k8s.V1Pod,
namespace: str,
scheduler_job_id: str,
run_id: str | None = None,
map_index: int = -1,
*,
with_mutation_hook: bool = False,
) -> k8s.V1Pod:
"""
Create a Pod.
Construct a pod by gathering and consolidating the configuration from 3 places:
- airflow.cfg
- executor_config
- dynamic arguments
"""
if len(pod_id) > POD_NAME_MAX_LENGTH:
warnings.warn(
f"pod_id supplied is longer than {POD_NAME_MAX_LENGTH} characters; "
f"truncating and adding unique suffix.",
UserWarning,
stacklevel=2,
)
pod_id = add_unique_suffix(name=pod_id, max_len=POD_NAME_MAX_LENGTH)
try:
image = pod_override_object.spec.containers[0].image # type: ignore
if not image:
image = kube_image
except Exception:
image = kube_image
annotations = {
"dag_id": dag_id,
"task_id": task_id,
"try_number": str(try_number),
}
if map_index >= 0:
annotations["map_index"] = str(map_index)
if date:
annotations[get_logical_date_key()] = date.isoformat()
if run_id:
annotations["run_id"] = run_id
main_container = k8s.V1Container(
name="base",
args=args,
image=image,
env=[
k8s.V1EnvVar(name="AIRFLOW_IS_K8S_EXECUTOR_POD", value="True"),
],
)
dynamic_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(
namespace=namespace,
annotations=annotations,
name=pod_id,
labels=cls.build_labels_for_k8s_executor_pod(
dag_id=dag_id,
task_id=task_id,
try_number=try_number,
airflow_worker=scheduler_job_id,
map_index=map_index,
logical_date=date,
run_id=run_id,
),
),
)
podspec = k8s.V1PodSpec(
containers=[main_container],
)
dynamic_pod.spec = podspec
# Reconcile the pods starting with the first chronologically,
# Pod from the pod_template_File -> Pod from the K8s executor -> Pod from executor_config arg
pod_list = [base_worker_pod, dynamic_pod, pod_override_object]
try:
pod = reduce(PodGenerator.reconcile_pods, pod_list)
except Exception as e:
raise PodReconciliationError from e
if with_mutation_hook:
from airflow.settings import pod_mutation_hook
try:
pod_mutation_hook(pod)
except Exception as e:
raise PodMutationHookException from e
return pod
@classmethod
def build_selector_for_k8s_executor_pod(
cls,
*,
dag_id,
task_id,
try_number,
map_index=None,
logical_date=None,
run_id=None,
airflow_worker=None,
include_version=False,
):
"""
Generate selector for kubernetes executor pod.
:meta private:
"""
labels = cls.build_labels_for_k8s_executor_pod(
dag_id=dag_id,
task_id=task_id,
try_number=try_number,
map_index=map_index,
logical_date=logical_date,
run_id=run_id,
airflow_worker=airflow_worker,
include_version=include_version,
)
label_strings = [f"{label_id}={label}" for label_id, label in sorted(labels.items())]
selector = ",".join(label_strings)
if not airflow_worker: # this filters out KPO pods even when we don't know the scheduler job id
selector += ",airflow-worker"
return selector
@classmethod
def build_labels_for_k8s_executor_pod(
cls,
*,
dag_id,
task_id,
try_number,
airflow_worker=None,
map_index=None,
logical_date=None,
run_id=None,
include_version=True,
):
"""
Generate labels for kubernetes executor pod.
:meta private:
"""
labels = {
"dag_id": make_safe_label_value(dag_id),
"task_id": make_safe_label_value(task_id),
"try_number": str(try_number),
"kubernetes_executor": "True",
}
if include_version:
labels["airflow_version"] = airflow_version.replace("+", "-")
if airflow_worker is not None:
labels["airflow-worker"] = make_safe_label_value(str(airflow_worker))
if map_index is not None and map_index >= 0:
labels["map_index"] = str(map_index)
if logical_date:
labels[get_logical_date_key()] = datetime_to_label_safe_datestring(logical_date)
if run_id:
labels["run_id"] = make_safe_label_value(run_id)
return labels
@staticmethod
def serialize_pod(pod: k8s.V1Pod) -> dict:
"""
Convert a k8s.V1Pod into a json serializable dictionary.
:param pod: k8s.V1Pod object
:return: Serialized version of the pod returned as dict
"""
api_client = ApiClient()
return api_client.sanitize_for_serialization(pod)
@staticmethod
def deserialize_model_file(path: str) -> k8s.V1Pod:
"""
Generate a Pod from a file.
:param path: Path to the file
:return: a kubernetes.client.models.V1Pod
"""
if os.path.exists(path):
with open(path) as stream:
pod = yaml.safe_load(stream)
else:
pod = None
log.warning("Model file %s does not exist", path)
return PodGenerator.deserialize_model_dict(pod)
@staticmethod
def deserialize_model_dict(pod_dict: dict | None) -> k8s.V1Pod:
"""
Deserializes a Python dictionary to k8s.V1Pod.
Unfortunately we need access to the private method
``_ApiClient__deserialize_model`` from the kubernetes client.
This issue is tracked here; https://github.com/kubernetes-client/python/issues/977.
:param pod_dict: Serialized dict of k8s.V1Pod object
:return: De-serialized k8s.V1Pod
"""
api_client = ApiClient()
return api_client._ApiClient__deserialize_model(pod_dict, k8s.V1Pod)
def merge_objects(base_obj, client_obj):
"""
Merge objects.
:param base_obj: has the base attributes which are overwritten if they exist
in the client_obj and remain if they do not exist in the client_obj
:param client_obj: the object that the client wants to create.
:return: the merged objects
"""
if not base_obj:
return client_obj
if not client_obj:
return base_obj
client_obj_cp = copy.deepcopy(client_obj)
if isinstance(base_obj, dict) and isinstance(client_obj_cp, dict):
base_obj_cp = copy.deepcopy(base_obj)
base_obj_cp.update(client_obj_cp)
return base_obj_cp
for base_key in base_obj.to_dict():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val is not None:
if not isinstance(client_obj_cp, dict):
setattr(client_obj_cp, base_key, base_val)
else:
client_obj_cp[base_key] = base_val
return client_obj_cp
def extend_object_field(base_obj, client_obj, field_name):
"""
Add field values to existing objects.
:param base_obj: an object which has a property `field_name` that is a list
:param client_obj: an object which has a property `field_name` that is a list.
A copy of this object is returned with `field_name` modified
:param field_name: the name of the list field
:return: the client_obj with the property `field_name` being the two properties appended
"""
client_obj_cp = copy.deepcopy(client_obj)
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if (not isinstance(base_obj_field, list) and base_obj_field is not None) or (
not isinstance(client_obj_field, list) and client_obj_field is not None
):
raise ValueError(
f"The chosen field must be a list. Got {type(base_obj_field)} base_object_field "
f"and {type(client_obj_field)} client_object_field."
)
if not base_obj_field:
return client_obj_cp
if not client_obj_field:
setattr(client_obj_cp, field_name, base_obj_field)
return client_obj_cp
appended_fields = base_obj_field + client_obj_field
setattr(client_obj_cp, field_name, appended_fields)
return client_obj_cp
| PodGenerator |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py | {
"start": 1173,
"end": 4722
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of BiasAdd MatMul in TF-TRT conversion."""
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GraphFn(self, x):
input_matrix_rows = 4
input_matrix_columns = 144
b = self._ConstOp((input_matrix_columns, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = x1 + b
b = self._ConstOp((input_matrix_rows, 144))
x2 = self.trt_incompatible_op(x)
x2 = math_ops.matmul(x2, b, transpose_a=True)
x2 = gen_array_ops.reshape(x2, [4, -1])
x2 = self.trt_incompatible_op(x2)
b = self._ConstOp((4, input_matrix_columns))
x3 = math_ops.matmul(x, b, transpose_b=True)
b = self._ConstOp((16, input_matrix_rows))
x4 = self.trt_incompatible_op(x)
x4 = math_ops.matmul(x4, b, transpose_b=True, transpose_a=True)
x4 = gen_array_ops.reshape(x4, [4, -1])
x4 = self.trt_incompatible_op(x4)
# Note that tf.nn.bias_add supports up to 5 dimensions.
b = self._ConstOp((input_matrix_columns, 48))
x5 = math_ops.matmul(x, b)
b = self._ConstOp((48,))
x5 = nn.bias_add(x5, b)
# TODO(b/154672994): Put the reshape back when the bug is fixed.
# x5 = gen_array_ops.reshape(x5, [4, -1])
x6 = gen_array_ops.reshape(x, [4, 24, 6])
b = self._ConstOp((6,))
x6 = nn.bias_add(x6, b, data_format="NHWC")
x6 = gen_array_ops.reshape(x6, [4, -1])
x7 = gen_array_ops.reshape(x, [4, 12, 4, 3])
b = self._ConstOp((3,))
x7 = nn.bias_add(x7, b, data_format="NHWC")
x7 = gen_array_ops.reshape(x7, [4, -1])
x8 = gen_array_ops.reshape(x, [4, 4, 3, 2, 6])
b = self._ConstOp((6,))
x8 = nn.bias_add(x8, b, data_format="NHWC")
x8 = gen_array_ops.reshape(x8, [4, -1])
x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((12,))
x9 = nn.bias_add(x9, b, data_format="NCHW")
x9 = gen_array_ops.reshape(x9, [4, -1])
x10 = gen_array_ops.reshape(x, [4, 3, 4, 12])
b = self._ConstOp((3,))
x10 = nn.bias_add(x10, b, data_format="NCHW")
x10 = gen_array_ops.reshape(x10, [4, -1])
x11 = gen_array_ops.reshape(x, [4, 6, 24])
b = self._ConstOp((6,))
x11 = nn.bias_add(x11, b, data_format="NCHW")
x11 = gen_array_ops.reshape(x11, [4, -1])
out = array_ops.concat([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11],
axis=-1)
return array_ops.squeeze(out, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[4, 144]],
[[4, 6680]])
def setUp(self):
super().setUp()
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimensional input.
self.DisableNonTrtOptimizers()
def GetMaxBatchSize(self, run_params):
"""Returns the max_batch_size that the converter should use for tests."""
if run_params.dynamic_engine:
return None
return 4
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
if run_params.dynamic_shape:
# Increased conversion rate in dynamic shape mode due to a few additional
# conversions for MatMul, Reshape and Concat ops. This increases the size
# of the candidate segments and results in two more TrtEngineOps.
return ["TRTEngineOp_000", "TRTEngineOp_001", "TRTEngineOp_002"]
else:
return ["TRTEngineOp_000"]
if __name__ == "__main__":
test.main()
| BiasaddMatMulTest |
python | Lightning-AI__lightning | src/lightning/pytorch/tuner/lr_finder.py | {
"start": 2513,
"end": 14301
} | class ____:
"""LR finder object. This object stores the results of lr_find().
Args:
mode: either `linear` or `exponential`, how to increase lr after each step
lr_min: lr to start search from
lr_max: lr to stop search
num_training: number of steps to take between lr_min and lr_max
"""
def __init__(self, mode: str, lr_min: float, lr_max: float, num_training: int) -> None:
assert mode in ("linear", "exponential"), "mode should be either `linear` or `exponential`"
self.mode = mode
self.lr_min = lr_min
self.lr_max = lr_max
self.num_training = num_training
self.results: dict[str, Any] = {}
self._total_batch_idx = 0 # for debug purpose
def _exchange_scheduler(self, trainer: "pl.Trainer") -> None:
# TODO: update docs here
"""Decorate `trainer.strategy.setup_optimizers` method such that it sets the user's originally specified
optimizer together with a new scheduler that takes care of the learning rate search."""
from lightning.pytorch.core.optimizer import _validate_optimizers_attached
optimizers = trainer.strategy.optimizers
if len(optimizers) != 1:
raise MisconfigurationException(
f"`model.configure_optimizers()` returned {len(optimizers)}, but"
" learning rate finder only works with single optimizer"
)
optimizer = optimizers[0]
new_lrs = [self.lr_min] * len(optimizer.param_groups)
for param_group, new_lr in zip(optimizer.param_groups, new_lrs):
param_group["lr"] = new_lr
param_group["initial_lr"] = new_lr
args = (optimizer, self.lr_max, self.num_training)
scheduler = _LinearLR(*args) if self.mode == "linear" else _ExponentialLR(*args)
trainer.strategy.optimizers = [optimizer]
trainer.strategy.lr_scheduler_configs = [LRSchedulerConfig(scheduler, interval="step")]
_validate_optimizers_attached(trainer.optimizers, trainer.lr_scheduler_configs)
def plot(
self, suggest: bool = False, show: bool = False, ax: Optional["Axes"] = None
) -> Optional[Union["plt.Figure", "plt.SubFigure"]]:
"""Plot results from lr_find run
Args:
suggest: if True, will mark suggested lr to use with a red point
show: if True, will show figure
ax: Axes object to which the plot is to be drawn. If not provided, a new figure is created.
"""
if not _MATPLOTLIB_AVAILABLE:
raise MisconfigurationException(
"To use the `plot` method, you must have Matplotlib installed."
" Install it by running `pip install -U matplotlib`."
)
import matplotlib.pyplot as plt
lrs = self.results["lr"]
losses = self.results["loss"]
fig: Optional[Union[plt.Figure, plt.SubFigure]]
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
# Plot loss as a function of the learning rate
ax.plot(lrs, losses)
if self.mode == "exponential":
ax.set_xscale("log")
ax.set_xlabel("Learning rate")
ax.set_ylabel("Loss")
if suggest:
_ = self.suggestion()
if self._optimal_idx:
ax.plot(lrs[self._optimal_idx], losses[self._optimal_idx], markersize=10, marker="o", color="red")
if show:
plt.show()
return fig
def suggestion(self, skip_begin: int = 10, skip_end: int = 1) -> Optional[float]:
"""This will propose a suggestion for an initial learning rate based on the point with the steepest negative
gradient.
Args:
skip_begin: how many samples to skip in the beginning; helps to avoid too naive estimates
skip_end: how many samples to skip in the end; helps to avoid too optimistic estimates
Returns:
The suggested initial learning rate to use, or `None` if a suggestion is not possible due to too few
loss samples.
"""
losses = torch.tensor(self.results["loss"][skip_begin:-skip_end])
lrs = torch.tensor(self.results["lr"][skip_begin:-skip_end])
is_finite = torch.isfinite(losses)
losses = losses[is_finite]
lrs = lrs[is_finite]
if len(losses) < 2:
# computing torch.gradient requires at least 2 points
log.error(
"Failed to compute suggestion for learning rate because there are not enough points. Increase the loop"
" iteration limits or the size of your dataset/dataloader."
)
self._optimal_idx = None
return None
gradients = torch.gradient(losses, spacing=[lrs])[0] # Compute the gradient of losses w.r.t. learning rates
min_grad = torch.argmin(gradients).item()
all_losses_idx = torch.arange(len(self.results["loss"]))
idx_non_skipped = all_losses_idx[skip_begin:-skip_end]
idx_finite = idx_non_skipped[is_finite]
self._optimal_idx = idx_finite[min_grad].item() # type: ignore
return self.results["lr"][self._optimal_idx]
def _lr_find(
trainer: "pl.Trainer",
model: "pl.LightningModule",
min_lr: float = 1e-8,
max_lr: float = 1,
num_training: int = 100,
mode: str = "exponential",
early_stop_threshold: Optional[float] = 4.0,
update_attr: bool = False,
attr_name: str = "",
) -> Optional[_LRFinder]:
"""Enables the user to do a range test of good initial learning rates, to reduce the amount of guesswork in picking
a good starting learning rate.
Args:
trainer: A Trainer instance.
model: Model to tune.
min_lr: minimum learning rate to investigate
max_lr: maximum learning rate to investigate
num_training: number of learning rates to test
mode: Search strategy to update learning rate after each batch:
- ``'exponential'``: Increases the learning rate exponentially.
- ``'linear'``: Increases the learning rate linearly.
early_stop_threshold: Threshold for stopping the search. If the
loss at any point is larger than early_stop_threshold*best_loss
then the search is stopped. To disable, set to None.
update_attr: Whether to update the learning rate attribute or not.
attr_name: Name of the attribute which stores the learning rate. The names 'learning_rate' or 'lr' get
automatically detected. Otherwise, set the name here.
"""
if trainer.fast_dev_run:
rank_zero_warn("Skipping learning rate finder since `fast_dev_run` is enabled.")
return None
# Determine lr attr
if update_attr:
attr_name = _determine_lr_attr_name(model, attr_name)
# Save initial model, that is loaded after learning rate is found
ckpt_path = os.path.join(trainer.default_root_dir, f".lr_find_{uuid.uuid4()}.ckpt")
ckpt_path = trainer.strategy.broadcast(ckpt_path)
trainer.save_checkpoint(ckpt_path)
start_steps = trainer.global_step
# Arguments we adjust during the lr finder, save for restoring
params = __lr_finder_dump_params(trainer)
# Set to values that are required by the algorithm
__lr_finder_reset_params(trainer, num_training, early_stop_threshold)
# Disable standard progress bar for fit
if trainer.progress_bar_callback:
trainer.progress_bar_callback.disable()
# Initialize lr finder object (stores results)
lr_finder = _LRFinder(mode, min_lr, max_lr, num_training)
lr_finder_finished = False
try:
# Configure optimizer and scheduler
lr_finder._exchange_scheduler(trainer)
# Fit, lr & loss logged in callback
_try_loop_run(trainer, params)
# Prompt if we stopped early
if trainer.global_step != num_training + start_steps:
log.info(f"LR finder stopped early after {trainer.global_step} steps due to diverging loss.")
# Transfer results from callback to lr finder object
lr_finder.results.update({"lr": trainer.callbacks[0].lrs, "loss": trainer.callbacks[0].losses})
lr_finder._total_batch_idx = trainer.fit_loop.total_batch_idx # for debug purpose
__lr_finder_restore_params(trainer, params)
if trainer.progress_bar_callback:
trainer.progress_bar_callback.enable()
# Update results across ranks
lr_finder.results = trainer.strategy.broadcast(lr_finder.results)
lr_finder_finished = True
except Exception as ex:
raise ex
finally:
# Restore initial state of model (this will also restore the original optimizer state)
trainer._checkpoint_connector.restore(ckpt_path)
trainer.strategy.remove_checkpoint(ckpt_path)
trainer.fit_loop.restarting = False # reset restarting flag as checkpoint restoring sets it to True
trainer.fit_loop.epoch_loop.restarting = False # reset restarting flag as checkpoint restoring sets it to True
trainer.fit_loop.epoch_loop.val_loop._combined_loader = None
trainer.fit_loop._combined_loader = None # reset data fetcher to avoid issues with the next fit
trainer.fit_loop.setup_data()
# Apply LR suggestion after restoring so it persists for the real training run
# When used as a callback, the suggestion would otherwise be lost due to checkpoint restore
if update_attr and lr_finder_finished:
lr = lr_finder.suggestion()
if lr is not None:
# update the attribute on the LightningModule (e.g., lr or learning_rate)
lightning_setattr(model, attr_name, lr)
# also update the currently active optimizer(s) so training continues with the suggested LR
for opt in trainer.optimizers or []:
for pg in opt.param_groups:
pg["lr"] = lr
log.info(f"Learning rate set to {lr}")
return lr_finder
def __lr_finder_dump_params(trainer: "pl.Trainer") -> dict[str, Any]:
return {
"optimizers": trainer.strategy.optimizers,
"lr_scheduler_configs": trainer.strategy.lr_scheduler_configs,
"callbacks": trainer.callbacks,
"loggers": trainer.loggers,
"max_steps": trainer.fit_loop.max_steps,
"limit_val_batches": trainer.limit_val_batches,
"loop_state_dict": deepcopy(trainer.fit_loop.state_dict()),
}
def __lr_finder_reset_params(trainer: "pl.Trainer", num_training: int, early_stop_threshold: Optional[float]) -> None:
from lightning.pytorch.loggers.logger import DummyLogger
trainer.strategy.lr_scheduler_configs = []
# Use special lr logger callback
trainer.callbacks = [_LRCallback(num_training, early_stop_threshold, progress_bar_refresh_rate=1)]
# No logging
trainer.logger = DummyLogger() if trainer.logger is not None else None
# Max step set to number of iterations starting at current number of iterations
trainer.fit_loop.epoch_loop.max_steps = num_training + trainer.global_step
trainer.limit_val_batches = num_training
def __lr_finder_restore_params(trainer: "pl.Trainer", params: dict[str, Any]) -> None:
trainer.strategy.optimizers = params["optimizers"]
trainer.strategy.lr_scheduler_configs = params["lr_scheduler_configs"]
trainer.callbacks = params["callbacks"]
trainer.loggers = params["loggers"]
loop = trainer.fit_loop
loop.epoch_loop.max_steps = params["max_steps"]
trainer.limit_val_batches = params["limit_val_batches"]
loop.load_state_dict(deepcopy(params["loop_state_dict"]))
loop.restarting = False
trainer.should_stop = False
| _LRFinder |
python | celery__celery | t/unit/utils/test_objects.py | {
"start": 41,
"end": 172
} | class ____:
def test(self):
x = Bunch(foo='foo', bar=2)
assert x.foo == 'foo'
assert x.bar == 2
| test_Bunch |
python | jazzband__django-waffle | waffle/middleware.py | {
"start": 187,
"end": 1199
} | class ____(MiddlewareMixin):
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
secure = get_setting('SECURE')
max_age = get_setting('MAX_AGE')
if hasattr(request, 'waffles'):
for k in request.waffles:
name = smart_str(get_setting('COOKIE') % k)
active, rollout = request.waffles[k]
if rollout and not active:
# "Inactive" is a session cookie during rollout mode.
age = None
else:
age = max_age
response.set_cookie(name, value=active, max_age=age,
secure=secure)
if hasattr(request, 'waffle_tests'):
for k in request.waffle_tests:
name = smart_str(get_setting('TEST_COOKIE') % k)
value = request.waffle_tests[k]
response.set_cookie(name, value=value)
return response
| WaffleMiddleware |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datacatalog.py | {
"start": 13361,
"end": 15185
} | class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook",
**{"return_value.create_tag_template.return_value": TEST_TAG_TEMPLATE},
)
def test_assert_valid_hook_call(self, mock_hook) -> None:
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogCreateTagTemplateOperator(
task_id="task_id",
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
if not AIRFLOW_V_3_0_PLUS:
mock_context["task"] = task # type: ignore[assignment]
result = task.execute(context=mock_context) # type: ignore[arg-type]
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_tag_template.assert_called_once_with(
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_ti.xcom_push.assert_any_call(
key="tag_template_id",
value=TEST_TAG_TEMPLATE_ID,
)
assert result == TEST_TAG_TEMPLATE_DICT
| TestCloudDataCatalogCreateTagTemplateOperator |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 854,
"end": 1220
} | class ____:
team_project_role_name = wtforms.SelectField(
"Select permissions",
choices=[("", "Select role"), ("Maintainer", "Maintainer"), ("Owner", "Owner")],
coerce=lambda string: TeamProjectRoleType(string) if string else None,
validators=[wtforms.validators.InputRequired(message="Select role")],
)
| TeamProjectRoleNameMixin |
python | jazzband__django-pipeline | pipeline/compressors/__init__.py | {
"start": 14088,
"end": 14306
} | class ____:
def __init__(self, verbose):
self.verbose = verbose
def filter_css(self, css):
raise NotImplementedError
def filter_js(self, js):
raise NotImplementedError
| CompressorBase |
python | cython__cython | Cython/Shadow.py | {
"start": 6382,
"end": 6791
} | class ____:
def __init__(self, arg0, arg1=None):
# It's ambiguous if this is being used as a decorator or context manager
# even with a callable arg.
self.arg0 = arg0
def __call__(self, *args, **kwds):
return self.arg0(*args, **kwds)
def __enter__(self):
pass
def __exit__(self, exc_class, exc, tb):
return False
# Emulated types
| critical_section |
python | gevent__gevent | src/gevent/tests/test__issue607.py | {
"start": 109,
"end": 203
} | class ____(greentest.ExpectedException):
pass
def f():
gevent.sleep(999)
| ExpectedError |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-postgresml/llama_index/indices/managed/postgresml/base.py | {
"start": 731,
"end": 977
} | class ____(IndexDict):
"""PostgresML Index Struct."""
@classmethod
def get_type(cls) -> IndexStructType:
"""Get index struct type."""
# return IndexStructType.POSTGRESML
return "POSTGRESML"
| PostgresMLIndexStruct |
python | django__django | tests/raw_query/models.py | {
"start": 1159,
"end": 1271
} | class ____(models.Model):
id = models.AutoField(primary_key=True, db_column="MiXeD_CaSe_Id")
| MixedCaseIDColumn |
python | milvus-io__pymilvus | pymilvus/client/asynch.py | {
"start": 5331,
"end": 5493
} | class ____(Future):
def on_response(self, response: Any):
check_status(response)
return Status(response.code, response.reason)
| CreateIndexFuture |
python | pytorch__pytorch | benchmarks/dynamo/genai_layers/utils.py | {
"start": 1196,
"end": 10684
} | class ____:
def __init__(self, script_args):
self.script_args = script_args
self.name = self.__class__.__name__
self.available_backends: list[str] = []
self.compile_mode: str = script_args.compile_mode
# mapping from backend to list of performance results
self.profiling_results: defaultdict[str, list[Performance]] = defaultdict(list)
def get_memory_bytes(self, args, kwargs) -> int:
# Get the necessary memory access in bytes for the kernelßß
raise NotImplementedError
def get_shapes(self) -> tuple[tuple[int, ...], ...]:
# Get a list of input shapes to benchmark the kernel
raise NotImplementedError
def eager(self, args, kwargs) -> Any:
raise NotImplementedError
def compiled(self, args, kwargs) -> Any:
raise NotImplementedError
def helion(self, args, kwargs) -> Any:
raise NotImplementedError
def quack(self, args, kwargs) -> Any:
raise NotImplementedError
def liger(self, args, kwargs) -> Any:
raise NotImplementedError
def triton(self, args, kwargs) -> Any:
raise NotImplementedError
def benchmark(self):
raise NotImplementedError
def clone_inputs(self, args, kwargs) -> Any:
args_ref = [
arg.clone().detach().requires_grad_(arg.requires_grad) for arg in args
]
kwargs_ref = (
{
k: (
v.clone().detach().requires_grad_(v.requires_grad)
if isinstance(v, torch.Tensor)
else v
)
for k, v in kwargs.items()
}
if kwargs
else kwargs
)
return args_ref, kwargs_ref
def check_accuracy(self, args, kwargs) -> None:
res = {}
for backend in self.available_backends:
args_ref, kwargs_ref = self.clone_inputs(args, kwargs)
res[backend] = getattr(self, backend)(args_ref, kwargs_ref)()
if (
"compiled" in self.available_backends
and self.script_args.custom_compile_options
):
torch._dynamo.reset() # cause recompile
with torch._inductor.config.patch(self.script_args.custom_compile_options):
args_ref, kwargs_ref = self.clone_inputs(args, kwargs)
res[self.script_args.custom_compile_name] = self.compiled(
args_ref, kwargs_ref
)()
gold = res["eager"]
tol = {}
if self.script_args.tolerance:
tol = {
"atol": self.script_args.tolerance,
"rtol": self.script_args.tolerance,
}
for backend in res:
if backend == "eager":
continue
try:
torch.testing.assert_close(res[backend], gold, **tol)
for t, gold_t in zip(res[backend], gold):
if t.requires_grad:
torch.testing.assert_close(t.grad, gold_t.grad, **tol)
print(
f"Accuracy check \033[92m✓ succeed\033[0m for {backend} backend on {self.name} kernel"
)
except Exception as e:
print(
f"Accuracy check \033[91m✗ failed\033[0m for {backend} backend on {self.name} kernel. Error {e}"
)
if self.script_args.exit_on_accuracy_failure:
print("Exit right away since --exit-on-accuracy-failure is set")
sys.exit(1)
def benchmark_single_shape_for_backend(
self, backend, args, kwargs, setting, fn=None
) -> bool:
if fn is None:
fn = getattr(self, backend)
args_ref, kwargs_ref = self.clone_inputs(args, kwargs)
try:
avg_time = benchmark_kernel_in_milliseconds(fn(args_ref, kwargs_ref))
except Exception as e:
print(
f"Failed to run {backend} backend on {self.name} kernel for {setting} due to {e}"
)
self.available_backends.remove(backend) # noqa: B909
return False
mem_bytes = self.get_memory_bytes(args_ref, kwargs_ref)
perf = Performance(setting, avg_time, mem_bytes)
print(f"{self.name} kernel on {backend} backend. {perf}")
self.profiling_results[backend].append(perf)
return True
def benchmark_single_shape(
self, args, kwargs=None, should_check_accuracy=True, setting: str = ""
):
for backend in self.available_backends:
self.benchmark_single_shape_for_backend(backend, args, kwargs, setting)
if (
"compiled" in self.available_backends
and self.script_args.custom_compile_options
):
torch._dynamo.reset() # cause recompile
with torch._inductor.config.patch(self.script_args.custom_compile_options):
status = self.benchmark_single_shape_for_backend(
self.script_args.custom_compile_name,
args,
kwargs,
setting,
fn=self.compiled,
)
if not status:
self.script_args.custom_compile_options = (
None # once fail, don't run again
)
if should_check_accuracy:
self.check_accuracy(args, kwargs)
def visualize(self) -> None:
device_name = torch.cuda.get_device_name(0)
visualize_comparison(
self.profiling_results,
title=f"{self.name} ({device_name})",
output_path=f"{self.name}_bench",
)
return
def report_geomean_speedup(self) -> None:
print(f"Geomean speedup for benchmark {self.name}")
eager_result = {
result.setting: result for result in self.profiling_results["eager"]
}
print(f" eager {len(eager_result)} data points")
for backend, backend_result in self.profiling_results.items():
if backend == "eager":
continue
speeduplist = []
for result in backend_result:
eager_latency = eager_result[result.setting].latency
backend_latency = result.latency
speeduplist.append(
eager_latency / backend_latency if backend_latency != 0 else 0.0
)
if len(speeduplist) > 0:
print(
f" {backend} {len(speeduplist)} data points, {gmean(speeduplist):.2f}x speedup"
)
def get_backend_colors() -> dict[str, str]:
"""Get consistent color scheme for different backends."""
return {
"eager": "#1f77b4", # blue
"compiled": "#ff7f0e", # orange
"quack": "#2ca02c", # green
"liger": "#d62728", # red
"helion": "#9467bd", # purple
"triton": "#8c564b", # brown
"cutlass": "#e377c2", # pink
"flash_attn": "#7f7f7f", # gray
"default": "#000000", # black
}
def visualize_comparison(
profiling_results: dict[str, list[Performance]],
title: Optional[str] = None,
output_path: Optional[str] = None,
) -> None:
"""
Create a single memory_bandwidth comparison plot from profiling results.
Args:
profiling_results: Dict mapping backend names to lists of Performance objects
output_path: Path to save the plot (optional)
"""
# Get backend colors
backend_colors = get_backend_colors()
# Extract settings from eager backend which runs all settings
all_settings = []
for perf in profiling_results["eager"]:
all_settings.append(perf.setting)
# Create single plot
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
for backend in profiling_results:
backend_perfs = profiling_results[backend]
perf_dict = {perf.setting: perf for perf in backend_perfs}
x_vals = []
y_vals = []
for i, setting in enumerate(all_settings):
if setting in perf_dict:
x_vals.append(i)
y_vals.append(perf_dict[setting].memory_bandwidth)
if x_vals: # Only plot if we have data
color = backend_colors.get(backend, backend_colors["default"])
ax.plot(
x_vals,
y_vals,
"o-",
label=backend,
color=color,
linewidth=2,
markersize=8,
alpha=0.8,
)
# Configure the plot
ax.set_title(title or "Memory Bandwidth Comparison", fontsize=16)
ax.set_xlabel("Shape", fontsize=12)
ax.set_ylabel("memory bandwidth (GB/s)", fontsize=12)
ax.set_xticks(range(len(all_settings)))
ax.set_xticklabels(
[
s.replace("shape: ", "").replace("[", "").replace("]", "")
for s in all_settings
],
rotation=45,
ha="right",
)
ax.legend(fontsize=10)
ax.grid(True, alpha=0.3)
plt.tight_layout()
# Save the plot if output path is provided
if output_path:
# Save as PNG
os.makedirs("pics", exist_ok=True)
full_path = os.path.join("pics", output_path + ".png")
plt.savefig(full_path, dpi=300, bbox_inches="tight", facecolor="white")
print(f"Chart saved to {full_path}")
plt.close()
| BenchmarkKernel |
python | pytorch__pytorch | test/dynamo/test_graph_deduplication.py | {
"start": 3868,
"end": 6340
} | class ____(torch.nn.Module):
def forward(self, primals_1: "f32[10, 10]", primals_2: "f32[10, 20]"):
sin: "f32[10, 20]" = torch.ops.aten.sin.default(primals_2)
partitioned_fw_subgraph_0_0 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_5 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_0, 'partitioned_fw_subgraph_0_0', primals_1, sin); partitioned_fw_subgraph_0_0 = sin = None
getitem_1: "f32[]" = invoke_subgraph_5[0]; invoke_subgraph_5 = None
partitioned_fw_subgraph_0_1 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_7 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_1, 'partitioned_fw_subgraph_0_0', primals_1, primals_2); partitioned_fw_subgraph_0_1 = primals_1 = None
getitem_2: "f32[]" = invoke_subgraph_7[0]; invoke_subgraph_7 = None
mul: "f32[]" = torch.ops.aten.mul.Tensor(getitem_2, getitem_2)
mul_1: "f32[]" = torch.ops.aten.mul.Tensor(getitem_1, mul); mul = None
return (mul_1, primals_2, getitem_1, getitem_2)
class partitioned_fw_subgraph_0_0(torch.nn.Module):
def forward(self, primals_0: "f32[10, 10]", primals_1: "f32[10, 20]"):
add: "f32[10, 10]" = torch.ops.aten.add.Tensor(primals_0, 1); primals_0 = None
add_1: "f32[10, 20]" = torch.ops.aten.add.Tensor(primals_1, 2); primals_1 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(add); add = None
sum_2: "f32[]" = torch.ops.aten.sum.default(add_1); add_1 = None
add_2: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
return (add_2,)
""",
)
def test_single_subgraph2(self):
def fn(x):
x0 = x + 2
o = inner_fn(x0)
o = torch.cos(o)
o = inner_fn(o)
return torch.sin(o)
def inner_fn(x):
o = x * 7
o += 1
o += 2
return o
x = torch.rand(10, 10, requires_grad=True)
x_clone = x.clone().requires_grad_(True)
ref_result = fn(x)
result, graphs, fw_graphs = self.run_and_return_graphs(fn, x_clone)
torch.allclose(ref_result, result)
ref_result.sum().backward()
result.sum().backward()
self.assertEqual(len(graphs), 1)
self.assertEqual(len(fw_graphs), 1)
self.assertExpectedInline(
graph_str(graphs[0]),
"""\
| GraphModule |
python | xlwings__xlwings | tests/test_app.py | {
"start": 5035,
"end": 5854
} | class ____(unittest.TestCase):
def test_properties_context_manager(self):
book = xw.Book()
app = book.app
self.assertTrue(app.display_alerts)
self.assertTrue(app.enable_events)
with app.properties(display_alerts=False):
self.assertFalse(app.display_alerts)
self.assertTrue(app.enable_events)
with app.properties(display_alerts=True, enable_events=False):
self.assertTrue(app.display_alerts)
self.assertFalse(app.enable_events)
self.assertFalse(app.display_alerts)
self.assertTrue(app.enable_events)
self.assertTrue(app.display_alerts)
self.assertTrue(app.enable_events)
book.close()
if __name__ == "__main__":
unittest.main()
| TestAppPropertiesContextManager |
python | getsentry__sentry | src/sentry/sentry_apps/installations.py | {
"start": 8702,
"end": 9752
} | class ____:
sentry_app_installation: SentryAppInstallation
status: str | None = None
def run(self) -> SentryAppInstallation:
with transaction.atomic(router.db_for_write(SentryAppInstallation)):
self._update_status()
self.record_analytics()
return self.sentry_app_installation
def _update_status(self) -> None:
# convert from string to integer
if self.status == SentryAppInstallationStatus.INSTALLED_STR:
for install in SentryAppInstallation.objects.filter(id=self.sentry_app_installation.id):
install.update(status=SentryAppInstallationStatus.INSTALLED)
def record_analytics(self) -> None:
analytics.record(
SentryAppInstallationUpdatedEvent(
sentry_app_installation_id=self.sentry_app_installation.id,
sentry_app_id=self.sentry_app_installation.sentry_app.id,
organization_id=self.sentry_app_installation.organization_id,
)
)
| SentryAppInstallationUpdater |
python | walkccc__LeetCode | solutions/1993. Operations on Tree/1993.py | {
"start": 93,
"end": 1434
} | class ____:
def __init__(self, parent: list[int]):
self.parent = parent
self.nodes = [Node() for _ in range(len(parent))]
for i in range(1, len(parent)):
self.nodes[parent[i]].children.append(i)
def lock(self, num: int, user: int) -> bool:
if self.nodes[num].lockedBy != -1:
return False
self.nodes[num].lockedBy = user
return True
def unlock(self, num: int, user: int) -> bool:
if self.nodes[num].lockedBy != user:
return False
self.nodes[num].lockedBy = -1
return True
def upgrade(self, num: int, user: int) -> bool:
if self.nodes[num].lockedBy != -1:
return False
if not self._anyLockedDescendant(num):
return False
# Walk up the hierarchy to ensure that there are no locked ancestors.
i = num
while i != -1:
if self.nodes[i].lockedBy != -1:
return False
i = self.parent[i]
self._unlockDescendants(num)
self.nodes[num].lockedBy = user
return True
def _anyLockedDescendant(self, i: int) -> bool:
return (self.nodes[i].lockedBy != -1 or
any(self._anyLockedDescendant(child)
for child in self.nodes[i].children))
def _unlockDescendants(self, i: int) -> None:
self.nodes[i].lockedBy = -1
for child in self.nodes[i].children:
self._unlockDescendants(child)
| LockingTree |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 115258,
"end": 122725
} | class ____:
def test_invalid_descriptions(self):
with pytest.raises(TypeError):
x509.SubjectInformationAccess(
["notanAccessDescription"] # type:ignore[list-item]
)
def test_iter_len(self):
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
)
assert len(sia) == 2
assert list(sia) == [
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
def test_iter_input(self):
desc = [
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
)
]
sia = x509.SubjectInformationAccess(iter(desc))
assert list(sia) == desc
def test_repr(self):
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
)
]
)
assert repr(sia) == (
"<SubjectInformationAccess([<AccessDescription(access_method"
"=<ObjectIdentifier(oid=1.3.6.1.5.5.7.48.5, name=caRepositor"
"y)>, access_location=<UniformResourceIdentifier(value='http"
"://ca.domain.com')>)>])>"
)
def test_eq(self):
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
)
sia2 = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
)
assert sia == sia2
def test_ne(self):
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
)
sia2 = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
]
)
assert sia != sia2
assert sia != object()
def test_indexing(self):
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca3.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca4.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca5.domain.com"),
),
]
)
assert sia[-1] == sia[4]
assert sia[2:6:2] == [sia[2], sia[4]]
def test_hash(self):
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
)
sia2 = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca2.domain.com"),
),
]
)
sia3 = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca3.domain.com"),
),
]
)
assert hash(sia) == hash(sia2)
assert hash(sia) != hash(sia3)
def test_public_bytes(self):
ext = x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca3.domain.com"),
),
]
)
assert (
ext.public_bytes()
== b"0E0 \x06\x08+\x06\x01\x05\x05\x070\x05\x86\x14http://"
b"ca.domain.com0!\x06\x08+\x06\x01\x05\x05\x070\x05\x86\x15"
b"http://ca3.domain.com"
)
| TestSubjectInformationAccess |
python | sphinx-doc__sphinx | sphinx/ext/ifconfig.py | {
"start": 878,
"end": 2512
} | class ____(SphinxDirective):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
node = ifconfig()
node.document = self.state.document
self.set_source_info(node)
node['expr'] = self.arguments[0]
node += self.parse_content_to_nodes(allow_section_headings=True)
return [node]
def process_ifconfig_nodes(app: Sphinx, doctree: nodes.document, docname: str) -> None:
ns = {confval.name: confval.value for confval in app.config}
ns.update(app.config.__dict__.copy())
ns['builder'] = app.builder.name
for node in list(doctree.findall(ifconfig)):
try:
res = eval(node['expr'], ns) # NoQA: S307
except Exception as err:
# handle exceptions in a clean fashion
from traceback import format_exception_only
msg = ''.join(format_exception_only(err.__class__, err))
newnode = doctree.reporter.error(
f'Exception occurred in ifconfig expression: \n{msg}', base_node=node
)
node.replace_self(newnode)
else:
if not res:
node.replace_self([])
else:
node.replace_self(node.children)
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_node(ifconfig)
app.add_directive('ifconfig', IfConfig)
app.connect('doctree-resolved', process_ifconfig_nodes)
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
| IfConfig |
python | aimacode__aima-python | games.py | {
"start": 9525,
"end": 10408
} | class ____(Game):
"""The game represented in [Figure 5.2]. Serves as a simple test case."""
succs = dict(A=dict(a1='B', a2='C', a3='D'),
B=dict(b1='B1', b2='B2', b3='B3'),
C=dict(c1='C1', c2='C2', c3='C3'),
D=dict(d1='D1', d2='D2', d3='D3'))
utils = dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
initial = 'A'
def actions(self, state):
return list(self.succs.get(state, {}).keys())
def result(self, state, move):
return self.succs[state][move]
def utility(self, state, player):
if player == 'MAX':
return self.utils[state]
else:
return -self.utils[state]
def terminal_test(self, state):
return state not in ('A', 'B', 'C', 'D')
def to_move(self, state):
return 'MIN' if state in 'BCD' else 'MAX'
| Fig52Game |
python | huggingface__transformers | src/transformers/models/longcat_flash/modeling_longcat_flash.py | {
"start": 20871,
"end": 24013
} | class ____(GradientCheckpointingLayer):
"""
LongCat decoder layer with dual-sublayer + shortcut MoE architecture.
Each logical layer contains:
- 2 attention sublayers (with layer indices: layer_idx*2, layer_idx*2+1)
- 2 MLP sublayers
- 1 shortcut MoE connection
"""
def __init__(self, config, layer_idx: int):
super().__init__()
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.mlp = LongcatFlashMoE(config)
self.self_attn = nn.ModuleList([LongcatFlashMLA(config=config, layer_idx=layer_idx * 2 + i) for i in [0, 1]])
self.mlps = nn.ModuleList([LongcatFlashMLP(config) for _ in [0, 1]])
self.input_layernorm = nn.ModuleList(
[LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]]
)
self.post_attention_layernorm = nn.ModuleList(
[LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) for _ in [0, 1]]
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm[0](hidden_states)
hidden_states, _ = self.self_attn[0](
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm[0](hidden_states)
shortcut_mlp_output = self.mlp(hidden_states)
hidden_states = self.mlps[0](hidden_states)
hidden_states = residual + hidden_states
# shortcut connection after second sublayer
residual = hidden_states
hidden_states = self.input_layernorm[1](hidden_states)
hidden_states, _ = self.self_attn[1](
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm[1](hidden_states)
hidden_states = self.mlps[1](hidden_states)
hidden_states = residual + hidden_states + shortcut_mlp_output
return hidden_states
@auto_docstring
| LongcatFlashDecoderLayer |
python | joke2k__faker | tests/providers/test_job.py | {
"start": 1506,
"end": 1695
} | class ____:
"""Test az_AZ job provider"""
def test_job(self, faker, num_samples):
for _ in range(num_samples):
assert faker.job() in AzAzJobProvider.jobs
| TestAzAz |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-konko/llama_index/llms/konko/base.py | {
"start": 1373,
"end": 1458
} | class ____:
name: str
max_context_length: int
is_chat_model: bool
| ModelInfo |
python | jazzband__django-oauth-toolkit | tests/test_rest_framework.py | {
"start": 2985,
"end": 4066
} | class ____(MockView):
authentication_classes = [AuthenticationNone]
urlpatterns = [
path("oauth2/", include("oauth2_provider.urls")),
path("oauth2-test/", OAuth2View.as_view()),
path("oauth2-scoped-test/", ScopedView.as_view()),
path("oauth2-scoped-missing-auth/", TokenHasScopeViewWrongAuth.as_view()),
path("oauth2-read-write-test/", ReadWriteScopedView.as_view()),
path("oauth2-resource-scoped-test/", ResourceScopedView.as_view()),
path("oauth2-authenticated-or-scoped-test/", AuthenticatedOrScopedView.as_view()),
re_path(r"oauth2-method-scope-test/.*$", MethodScopeAltView.as_view()),
path("oauth2-method-scope-fail/", MethodScopeAltViewBad.as_view()),
path("oauth2-method-scope-missing-auth/", MethodScopeAltViewWrongAuth.as_view()),
path("oauth2-authentication-none/", AuthenticationNoneOAuth2View.as_view()),
]
@override_settings(ROOT_URLCONF=__name__)
@pytest.mark.nologinrequiredmiddleware
@pytest.mark.usefixtures("oauth2_settings")
@pytest.mark.oauth2_settings(presets.REST_FRAMEWORK_SCOPES)
| AuthenticationNoneOAuth2View |
python | tensorflow__tensorflow | tensorflow/python/training/saving/saveable_object_util.py | {
"start": 24346,
"end": 27593
} | class ____(saveable_object.SaveableObject):
"""Saves Python state in a checkpoint."""
def __init__(self, name, state_callback, restore_callback):
"""Configure saving.
Args:
name: The checkpoint key to write to.
state_callback: A function taking no arguments which returns a string.
This function is run every time a checkpoint is written.
restore_callback: A function taking a Python string, used to restore
state.
"""
def _state_callback_wrapper():
with ops.init_scope():
return state_callback()
self._state_callback = _state_callback_wrapper
self._restore_callback = restore_callback
with ops.device("/cpu:0"):
self._save_string = constant_op.constant("", dtype=dtypes.string)
spec = saveable_object.SaveSpec(
self._save_string, "", name, dtype=dtypes.string)
super(_PythonStringStateSaveable, self).__init__(self._save_string, [spec],
name)
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed."""
return {self._save_string: self._state_callback()}
def freeze(self):
"""Create a frozen `SaveableObject` which saves the current state."""
def _constant_state():
return constant_op.constant(self._state_callback(), dtype=dtypes.string)
return trackable.NoRestoreSaveable(
tensor=_constant_state,
dtype=dtypes.string,
name=self.name,
device="cpu:0")
def trackable_has_serialize_to_tensor(obj):
"""Returns whether obj's class has `_serialize_to_tensors` defined."""
if obj is base_delegate.DelegatingTrackableMixin:
# DelegatingTrackableMixin always delegates "_serialize_to_tensors"
# to its inner `trackable`, so we check whether the inner trackable
# has `_serialize_to_tensor`.
return trackable_has_serialize_to_tensor(obj._trackable) # pylint: disable=protected-access
try:
if "_serialize_to_tensors" in obj.__dict__:
# In some cases (e.g. restored objects), the object may have
# `_serialize_to_tensors` even if the class does not.
return True
except (AttributeError, TypeError):
# Data structure proxy wrappers don't have __dict__.
pass
# Use MRO so that if a parent class has `_serialize_to_tensors`, but the
# object class has not yet been migrated, we'll continue to use the obj
# class's `_gather_saveables_for_checkpoint` method.
for t in type(obj).mro():
if t is base_delegate.DelegatingTrackableMixin:
# DelegatingTrackableMixin always delegates "_serialize_to_tensors"
# to its inner `trackable`, so we check whether the inner trackable
# has `_serialize_to_tensor`.
return trackable_has_serialize_to_tensor(obj._trackable) # pylint: disable=protected-access
if t is trackable.Trackable:
# Base case. Return False since _serialize_to_tensors will raise a
# NotImplemented Error.
return False
elif "_serialize_to_tensors" in t.__dict__:
return True
elif "_gather_saveables_for_checkpoint" in t.__dict__:
return False
return False
def _convert_to_string(x):
return compat.as_str(tensor_util.constant_value(x))
| _PythonStringStateSaveable |
python | pandas-dev__pandas | pandas/tests/io/excel/test_readers.py | {
"start": 52767,
"end": 63079
} | class ____:
def test_raises_bytes_input(self, engine, read_ext):
# GH 53830
msg = "Expected file path name or file-like object"
with pytest.raises(TypeError, match=msg):
with open("test1" + read_ext, "rb") as f:
pd.read_excel(f.read(), engine=engine)
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for ExcelFile objects.
"""
func = partial(pd.ExcelFile, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "ExcelFile", func)
def test_engine_used(self, read_ext, engine):
expected_defaults = {
"xlsx": "openpyxl",
"xlsm": "openpyxl",
"xlsb": "pyxlsb",
"xls": "xlrd",
"ods": "odf",
}
with pd.ExcelFile("test1" + read_ext) as excel:
result = excel.engine
if engine is not None:
expected = engine
else:
expected = expected_defaults[read_ext[1:]]
assert result == expected
def test_excel_passes_na(self, read_ext):
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
)
expected = DataFrame(
[["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
)
expected = DataFrame(
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
# 13967
with pd.ExcelFile("test5" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
)
expected = DataFrame(
[["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
with pd.ExcelFile("test5" + read_ext) as excel:
parsed = pd.read_excel(
excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
)
expected = DataFrame(
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("na_filter", [None, True, False])
def test_excel_passes_na_filter(self, read_ext, na_filter):
# gh-25453
kwargs = {}
if na_filter is not None:
kwargs["na_filter"] = na_filter
with pd.ExcelFile("test5" + read_ext) as excel:
parsed = pd.read_excel(
excel,
sheet_name="Sheet1",
keep_default_na=True,
na_values=["apple"],
**kwargs,
)
if na_filter is False:
expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]]
else:
expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]]
expected = DataFrame(expected, columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref
adjust_expected(expected, read_ext, engine)
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = pd.read_excel(excel, sheet_name=0, index_col=0)
df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, expected)
tm.assert_frame_equal(df2, expected)
with pd.ExcelFile("test1" + read_ext) as excel:
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, expected)
tm.assert_frame_equal(df2, expected)
with pd.ExcelFile("test1" + read_ext) as excel:
df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with pd.ExcelFile("test1" + read_ext) as excel:
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_sheet_name(self, request, engine, read_ext, df_ref):
xfail_datetimes_with_pyxlsb(engine, request)
expected = df_ref
adjust_expected(expected, read_ext, engine)
filename = "test1"
sheet_name = "Sheet1"
with pd.ExcelFile(filename + read_ext) as excel:
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
with pd.ExcelFile(filename + read_ext) as excel:
df2_parse = excel.parse(index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1_parse, expected)
tm.assert_frame_equal(df2_parse, expected)
@pytest.mark.parametrize(
"sheet_name",
[3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
)
def test_bad_sheetname_raises(self, read_ext, sheet_name):
# GH 39250
msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
with pytest.raises(ValueError, match=msg):
with pd.ExcelFile("blank" + read_ext) as excel:
excel.parse(sheet_name=sheet_name)
def test_excel_read_buffer(self, engine, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine)
with open(pth, "rb") as f:
with pd.ExcelFile(f) as xls:
actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, engine, read_ext):
with open("test1" + read_ext, "rb") as f:
with pd.ExcelFile(f) as xlsx:
# parses okay
pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine)
assert f.closed
def test_conflicting_excel_engines(self, read_ext):
# GH 26566
msg = "Engine should not be specified when passing an ExcelFile"
with pd.ExcelFile("test1" + read_ext) as xl:
with pytest.raises(ValueError, match=msg):
pd.read_excel(xl, engine="foo")
def test_excel_read_binary(self, engine, read_ext):
# GH 15914
expected = pd.read_excel("test1" + read_ext, engine=engine)
with open("test1" + read_ext, "rb") as f:
data = f.read()
actual = pd.read_excel(BytesIO(data), engine=engine)
tm.assert_frame_equal(expected, actual)
def test_excel_read_binary_via_read_excel(self, read_ext, engine):
# GH 38424
with open("test1" + read_ext, "rb") as f:
result = pd.read_excel(f, engine=engine)
expected = pd.read_excel("test1" + read_ext, engine=engine)
tm.assert_frame_equal(result, expected)
def test_read_excel_header_index_out_of_range(self, engine):
# GH#43143
with open("df_header_oob.xlsx", "rb") as f:
with pytest.raises(ValueError, match="exceeds maximum"):
pd.read_excel(f, header=[0, 1])
@pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"])
def test_header_with_index_col(self, filename):
# GH 33476
idx = Index(["Z"], name="I2")
cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64")
result = pd.read_excel(
filename, sheet_name="Sheet1", index_col=0, header=[0, 1]
)
tm.assert_frame_equal(expected, result)
def test_read_datetime_multiindex(self, request, engine, read_ext):
# GH 34748
xfail_datetimes_with_pyxlsb(engine, request)
f = "test_datetime_mi" + read_ext
with pd.ExcelFile(f) as excel:
actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine)
unit = get_exp_unit(read_ext, engine)
dti = pd.DatetimeIndex(["2020-02-29", "2020-03-01"], dtype=f"M8[{unit}]")
expected_column_index = MultiIndex.from_arrays(
[dti[:1], dti[1:]],
names=[
dti[0].to_pydatetime(),
dti[1].to_pydatetime(),
],
)
expected = DataFrame([], index=[], columns=expected_column_index)
tm.assert_frame_equal(expected, actual)
def test_engine_invalid_option(self, read_ext):
# read_ext includes the '.' hence the weird formatting
with pytest.raises(ValueError, match="Value must be one of *"):
with pd.option_context(f"io.excel{read_ext}.reader", "abc"):
pass
def test_ignore_chartsheets(self, request, engine, read_ext):
# GH 41448
if read_ext == ".ods":
pytest.skip("chartsheets do not exist in the ODF format")
if engine == "pyxlsb":
request.applymarker(
pytest.mark.xfail(
reason="pyxlsb can't distinguish chartsheets from worksheets"
)
)
with pd.ExcelFile("chartsheet" + read_ext) as excel:
assert excel.sheet_names == ["Sheet1"]
def test_corrupt_files_closed(self, engine, tmp_excel):
# GH41778
errors = (BadZipFile,)
if engine is None:
pytest.skip(f"Invalid test for engine={engine}")
elif engine == "xlrd":
import xlrd
errors = (BadZipFile, xlrd.biffh.XLRDError)
elif engine == "calamine":
from python_calamine import CalamineError
errors = (CalamineError,)
Path(tmp_excel).write_text("corrupt", encoding="utf-8")
with tm.assert_produces_warning(False):
try:
pd.ExcelFile(tmp_excel, engine=engine)
except errors:
pass
| TestExcelFileRead |
python | tensorflow__tensorflow | tensorflow/tools/proto_splitter/testdata/many_field_gen.py | {
"start": 1344,
"end": 3112
} | class ____(split.ComposableSplitter):
"""Splitter for ManyField proto."""
def build_chunks(self):
self.add_chunk(
self._proto.field_one,
[
test_message_pb2.ManyFields.DESCRIPTOR.fields_by_name[
"field_one"
].number
],
)
self._proto.ClearField("field_one")
for map_key, map_value in self._proto.nested_map_bool.items():
self.add_chunk(
map_value,
[
test_message_pb2.ManyFields.DESCRIPTOR.fields_by_name[
"nested_map_bool"
].number,
map_key,
],
)
self._proto.ClearField("nested_map_bool")
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
proto = test_message_pb2.ManyFields(
field_one=test_message_pb2.ManyFields(
repeated_field=[
test_message_pb2.ManyFields(),
test_message_pb2.ManyFields(
string_field="inner_inner_string",
map_field_uint32={
324: "map_value_324",
543: "map_value_543",
},
),
]
),
map_field_int64={
-1345: "map_value_-1345",
},
nested_map_bool={
True: test_message_pb2.ManyFields(string_field="string_true"),
False: test_message_pb2.ManyFields(string_field="string_false"),
},
)
file_io.write_string_to_file(
os.path.join(SPLITTER_TESTDATA_PATH.value, "many-field.pbtxt"), str(proto)
)
ManyFieldSplitter(proto).write(
os.path.join(SPLITTER_TESTDATA_PATH.value, "many-field")
)
if __name__ == "__main__":
app.run(main)
| ManyFieldSplitter |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1005668,
"end": 1015412
} | class ____(FieldChannelMixin, core.SecondaryFieldDef):
r"""
XError2 schema wrapper.
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError2"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> XError2: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> XError2: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> XError2: ...
@overload
def bandPosition(self, _: float, /) -> XError2: ...
@overload
def bin(self, _: None, /) -> XError2: ...
@overload
def field(self, _: str | RepeatRef, /) -> XError2: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> XError2: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> XError2: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> XError2: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> XError2: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
**kwds,
)
@with_property_setters
| XError2 |
python | doocs__leetcode | solution/0100-0199/0100.Same Tree/Solution.py | {
"start": 192,
"end": 494
} | class ____:
def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:
if p == q:
return True
if p is None or q is None or p.val != q.val:
return False
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
| Solution |
python | arrow-py__arrow | arrow/locales.py | {
"start": 126374,
"end": 128039
} | class ____(Locale):
names = ["ta", "ta-in", "ta-lk"]
past = "{0} நேரத்திற்கு முன்பு"
future = "இல் {0}"
timeframes = {
"now": "இப்போது",
"second": "ஒரு இரண்டாவது",
"seconds": "{0} விநாடிகள்",
"minute": "ஒரு நிமிடம்",
"minutes": "{0} நிமிடங்கள்",
"hour": "ஒரு மணி",
"hours": "{0} மணிநேரம்",
"day": "ஒரு நாள்",
"days": "{0} நாட்கள்",
"week": "ஒரு வாரம்",
"weeks": "{0} வாரங்கள்",
"month": "ஒரு மாதம்",
"months": "{0} மாதங்கள்",
"year": "ஒரு ஆண்டு",
"years": "{0} ஆண்டுகள்",
}
month_names = [
"",
"சித்திரை",
"வைகாசி",
"ஆனி",
"ஆடி",
"ஆவணி",
"புரட்டாசி",
"ஐப்பசி",
"கார்த்திகை",
"மார்கழி",
"தை",
"மாசி",
"பங்குனி",
]
month_abbreviations = [
"",
"ஜன",
"பிப்",
"மார்",
"ஏப்",
"மே",
"ஜூன்",
"ஜூலை",
"ஆக",
"செப்",
"அக்",
"நவ",
"டிச",
]
day_names = [
"",
"திங்கட்கிழமை",
"செவ்வாய்க்கிழமை",
"புதன்கிழமை",
"வியாழக்கிழமை",
"வெள்ளிக்கிழமை",
"சனிக்கிழமை",
"ஞாயிற்றுக்கிழமை",
]
day_abbreviations = [
"",
"திங்கட்",
"செவ்வாய்",
"புதன்",
"வியாழன்",
"வெள்ளி",
"சனி",
"ஞாயிறு",
]
def _ordinal_number(self, n: int) -> str:
if n == 1:
return f"{n}வது"
elif n >= 0:
return f"{n}ஆம்"
else:
return ""
| TamilLocale |
python | jazzband__django-oauth-toolkit | oauth2_provider/generators.py | {
"start": 175,
"end": 348
} | class ____:
"""
All generators should extend this class overriding `.hash()` method.
"""
def hash(self):
raise NotImplementedError()
| BaseHashGenerator |
python | donnemartin__system-design-primer | solutions/system_design/pastebin/pastebin.py | {
"start": 55,
"end": 1055
} | class ____(MRJob):
def extract_url(self, line):
"""Extract the generated url from the log line."""
pass
def extract_year_month(self, line):
"""Return the year and month portions of the timestamp."""
pass
def mapper(self, _, line):
"""Parse each log line, extract and transform relevant lines.
Emit key value pairs of the form:
(2016-01, url0), 1
(2016-01, url0), 1
(2016-01, url1), 1
"""
url = self.extract_url(line)
period = self.extract_year_month(line)
yield (period, url), 1
def reducer(self, key, values):
"""Sum values for each key.
(2016-01, url0), 2
(2016-01, url1), 1
"""
yield key, sum(values)
def steps(self):
"""Run the map and reduce steps."""
return [
self.mr(mapper=self.mapper,
reducer=self.reducer)
]
if __name__ == '__main__':
HitCounts.run()
| HitCounts |
python | openai__gym | tests/wrappers/test_video_recorder.py | {
"start": 356,
"end": 2910
} | class ____(gym.Env):
metadata = {"render_modes": [None]}
def __init__(self, render_mode=None):
self.render_mode = render_mode
def render(self):
pass
def test_record_simple():
env = gym.make(
"CartPole-v1", render_mode="rgb_array_list", disable_env_checker=True
)
rec = VideoRecorder(env)
env.reset()
rec.capture_frame()
rec.close()
assert not rec.broken
assert os.path.exists(rec.path)
f = open(rec.path)
assert os.fstat(f.fileno()).st_size > 100
def test_autoclose():
def record():
env = gym.make(
"CartPole-v1", render_mode="rgb_array_list", disable_env_checker=True
)
rec = VideoRecorder(env)
env.reset()
rec.capture_frame()
rec_path = rec.path
# The function ends without an explicit `rec.close()` call
# The Python interpreter will implicitly do `del rec` on garbage cleaning
return rec_path
rec_path = record()
gc.collect() # do explicit garbage collection for test
time.sleep(5) # wait for subprocess exiting
assert os.path.exists(rec_path)
f = open(rec_path)
assert os.fstat(f.fileno()).st_size > 100
def test_no_frames():
env = BrokenRecordableEnv()
rec = VideoRecorder(env)
rec.close()
assert rec.functional
assert not os.path.exists(rec.path)
def test_record_unrecordable_method():
with pytest.warns(
UserWarning,
match=re.escape(
"\x1b[33mWARN: Disabling video recorder because environment <UnrecordableEnv instance> was not initialized with any compatible video mode between `rgb_array` and `rgb_array_list`\x1b[0m"
),
):
env = UnrecordableEnv()
rec = VideoRecorder(env)
assert not rec.enabled
rec.close()
def test_record_breaking_render_method():
with pytest.warns(
UserWarning,
match=re.escape(
"Env returned None on `render()`. Disabling further rendering for video recorder by marking as disabled:"
),
):
env = BrokenRecordableEnv()
rec = VideoRecorder(env)
rec.capture_frame()
rec.close()
assert rec.broken
assert not os.path.exists(rec.path)
def test_text_envs():
env = gym.make(
"FrozenLake-v1", render_mode="rgb_array_list", disable_env_checker=True
)
video = VideoRecorder(env)
try:
env.reset()
video.capture_frame()
video.close()
finally:
os.remove(video.path)
| UnrecordableEnv |
python | sanic-org__sanic | sanic/signals.py | {
"start": 2955,
"end": 3054
} | class ____(Route):
"""A `Route` that is used to dispatch signals to handlers"""
@dataclass
| Signal |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_crossing06.py | {
"start": 315,
"end": 1396
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_crossing06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [72794880, 72796416]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis({"crossing": "min"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | faif__python-patterns | patterns/behavioral/chain_of_responsibility.py | {
"start": 1381,
"end": 1697
} | class ____(Handler):
"""Each handler can be different.
Be simple and static...
"""
@staticmethod
def check_range(request: int) -> Optional[bool]:
if 0 <= request < 10:
print(f"request {request} handled in handler 0")
return True
return None
| ConcreteHandler0 |
python | sphinx-doc__sphinx | doc/development/tutorials/examples/helloworld.py | {
"start": 433,
"end": 937
} | class ____(SphinxDirective):
"""A directive to say hello!"""
required_arguments = 1
def run(self) -> list[nodes.Node]:
paragraph_node = nodes.paragraph(text=f'hello {self.arguments[0]}!')
return [paragraph_node]
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_role('hello', HelloRole())
app.add_directive('hello', HelloDirective)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| HelloDirective |
python | kamyu104__LeetCode-Solutions | Python/goal-parser-interpretation.py | {
"start": 29,
"end": 525
} | class ____(object):
def interpret(self, command):
"""
:type command: str
:rtype: str
"""
result, i = [], 0
while i < len(command):
if command[i] == 'G':
result += ["G"]
i += 1
elif command[i] == '(' and command[i+1] == ')':
result += ["o"]
i += 2
else:
result += ["al"]
i += 4
return "".join(result)
| Solution |
python | getsentry__sentry | src/sentry_plugins/splunk/client.py | {
"start": 46,
"end": 639
} | class ____(ApiClient):
plugin_name = "splunk"
allow_redirects = False
metrics_prefix = "integrations.splunk"
def __init__(self, endpoint, token):
self.endpoint = endpoint
self.token = token
super().__init__(verify_ssl=False)
def request(self, data):
headers = {"Authorization": f"Splunk {self.token}"}
return self._request(
path=self.endpoint,
method="post",
data=data,
headers=headers,
json=True,
timeout=5,
allow_text=True,
)
| SplunkApiClient |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 9186,
"end": 9493
} | class ____(BaseException):
"""Raised when a test should stop running and return control to
the Hypothesis engine, which should then continue normally.
"""
def __init__(self, testcounter: int) -> None:
super().__init__(repr(testcounter))
self.testcounter = testcounter
| StopTest |
python | pandas-dev__pandas | asv_bench/benchmarks/inference.py | {
"start": 443,
"end": 916
} | class ____:
def setup(self):
N = 10000
self.float = Series(np.random.randn(N))
self.numstr = self.float.astype("str")
self.str = Series(Index([f"i-{i}" for i in range(N)], dtype=object))
def time_from_float(self):
to_numeric(self.float, errors="coerce")
def time_from_numeric_str(self):
to_numeric(self.numstr, errors="coerce")
def time_from_str(self):
to_numeric(self.str, errors="coerce")
| ToNumeric |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 45593,
"end": 47494
} | class ____(ASTExpression):
def __init__(
self, ifExpr: ASTExpression, thenExpr: ASTExpression, elseExpr: ASTExpression
) -> None:
self.ifExpr = ifExpr
self.thenExpr = thenExpr
self.elseExpr = elseExpr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTConditionalExpr):
return NotImplemented
return (
self.ifExpr == other.ifExpr
and self.thenExpr == other.thenExpr
and self.elseExpr == other.elseExpr
)
def __hash__(self) -> int:
return hash((self.ifExpr, self.thenExpr, self.elseExpr))
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
res.extend((
transform(self.ifExpr),
' ? ',
transform(self.thenExpr),
' : ',
transform(self.elseExpr),
))
return ''.join(res)
def get_id(self, version: int) -> str:
assert version >= 2
res: list[str] = []
res.extend((
_id_operator_v2['?'],
self.ifExpr.get_id(version),
self.thenExpr.get_id(version),
self.elseExpr.get_id(version),
))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.ifExpr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_operator('?', '?')
signode += addnodes.desc_sig_space()
self.thenExpr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_operator(':', ':')
signode += addnodes.desc_sig_space()
self.elseExpr.describe_signature(signode, mode, env, symbol)
| ASTConditionalExpr |
python | kamyu104__LeetCode-Solutions | Python/binary-search.py | {
"start": 32,
"end": 494
} | class ____(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
left, right = 0, len(nums)-1
while left <= right:
mid = left + (right-left)//2
if nums[mid] > target:
right = mid-1
elif nums[mid] < target:
left = mid+1
else:
return mid
return -1
| Solution |
python | rapidsai__cudf | python/cudf/cudf/core/join/join.py | {
"start": 25504,
"end": 26381
} | class ____(Merge):
@staticmethod
@acquire_spill_lock()
def _joiner( # type: ignore[override]
lhs: list[ColumnBase],
rhs: list[ColumnBase],
how: str,
) -> tuple[ColumnBase, None]:
if (
join_func := getattr(
plc.join, f"{how.replace('left', 'left_')}_join", None
)
) is None:
raise ValueError(f"Invalid join type {how}")
return ColumnBase.from_pylibcudf(
join_func(
plc.Table([col.to_pylibcudf(mode="read") for col in lhs]),
plc.Table([col.to_pylibcudf(mode="read") for col in rhs]),
plc.types.NullEquality.EQUAL,
)
), None
def _merge_results(self, lhs: DataFrame, rhs: DataFrame):
# semi-join result includes only lhs columns
return lhs._data, lhs.index
| MergeSemi |
python | lazyprogrammer__machine_learning_examples | rnn_class/batch_units.py | {
"start": 3726,
"end": 6828
} | class ____:
def __init__(self, Mi, Mo, activation):
self.Mi = Mi
self.Mo = Mo
self.f = activation
# numpy init
Wxi = init_weight(Mi, Mo)
Whi = init_weight(Mo, Mo)
Wci = init_weight(Mo, Mo)
bi = np.zeros(Mo)
Wxf = init_weight(Mi, Mo)
Whf = init_weight(Mo, Mo)
Wcf = init_weight(Mo, Mo)
bf = np.zeros(Mo)
Wxc = init_weight(Mi, Mo)
Whc = init_weight(Mo, Mo)
bc = np.zeros(Mo)
Wxo = init_weight(Mi, Mo)
Who = init_weight(Mo, Mo)
Wco = init_weight(Mo, Mo)
bo = np.zeros(Mo)
c0 = np.zeros(Mo)
h0 = np.zeros(Mo)
# theano vars
self.Wxi = theano.shared(Wxi)
self.Whi = theano.shared(Whi)
self.Wci = theano.shared(Wci)
self.bi = theano.shared(bi)
self.Wxf = theano.shared(Wxf)
self.Whf = theano.shared(Whf)
self.Wcf = theano.shared(Wcf)
self.bf = theano.shared(bf)
self.Wxc = theano.shared(Wxc)
self.Whc = theano.shared(Whc)
self.bc = theano.shared(bc)
self.Wxo = theano.shared(Wxo)
self.Who = theano.shared(Who)
self.Wco = theano.shared(Wco)
self.bo = theano.shared(bo)
self.c0 = theano.shared(c0)
self.h0 = theano.shared(h0)
self.params = [
self.Wxi,
self.Whi,
self.Wci,
self.bi,
self.Wxf,
self.Whf,
self.Wcf,
self.bf,
self.Wxc,
self.Whc,
self.bc,
self.Wxo,
self.Who,
self.Wco,
self.bo,
self.c0,
self.h0,
]
def get_ht_ct(self, xWxi_t, xWxf_t, xWxc_t, xWxo_t, h_t1, c_t1):
i_t = T.nnet.sigmoid(xWxi_t + h_t1.dot(self.Whi) + c_t1.dot(self.Wci) + self.bi)
f_t = T.nnet.sigmoid(xWxf_t + h_t1.dot(self.Whf) + c_t1.dot(self.Wcf) + self.bf)
c_t = f_t * c_t1 + i_t * T.tanh(xWxc_t + h_t1.dot(self.Whc) + self.bc)
o_t = T.nnet.sigmoid(xWxo_t + h_t1.dot(self.Who) + c_t.dot(self.Wco) + self.bo)
h_t = o_t * T.tanh(c_t)
return h_t, c_t
def recurrence(self, xWxi_t, xWxf_t, xWxc_t, xWxo_t, is_start, h_t1, c_t1, h0, c0):
h_t_c_t = T.switch(
T.eq(is_start, 1),
self.get_ht_ct(xWxi_t, xWxf_t, xWxc_t, xWxo_t, h0, c0),
self.get_ht_ct(xWxi_t, xWxf_t, xWxc_t, xWxo_t, h_t1, c_t1)
)
return h_t_c_t[0], h_t_c_t[1]
def output(self, Xflat, startPoints):
# Xflat should be (NT, D)
# calculate X after multiplying input weights
XWxi = Xflat.dot(self.Wxi)
XWxf = Xflat.dot(self.Wxf)
XWxc = Xflat.dot(self.Wxc)
XWxo = Xflat.dot(self.Wxo)
[h, c], _ = theano.scan(
fn=self.recurrence,
sequences=[XWxi, XWxf, XWxc, XWxo, startPoints],
outputs_info=[self.h0, self.c0],
non_sequences=[self.h0, self.c0],
n_steps=Xflat.shape[0],
)
return h
| LSTM |
python | mlflow__mlflow | mlflow/utils/thread_utils.py | {
"start": 52,
"end": 2208
} | class ____:
"""
Class for creating a thread local variable.
Args:
default_factory: A function used to create the default value
reset_in_subprocess: Indicating whether the variable is reset in subprocess.
"""
def __init__(self, default_factory, reset_in_subprocess=True):
self.reset_in_subprocess = reset_in_subprocess
self.default_factory = default_factory
self.thread_local = threading.local()
# The `__global_thread_values` attribute saves all thread-local values,
# the key is thread ID.
self.__global_thread_values: dict[int, Any] = {}
def get(self):
"""
Get the thread-local variable value.
If the thread-local variable is not set, return the provided `init_value` value.
If `get` is called in a forked subprocess and `reset_in_subprocess` is True,
return the provided `init_value` value
"""
if hasattr(self.thread_local, "value"):
value, pid = self.thread_local.value
if self.reset_in_subprocess and pid != os.getpid():
# `get` is called in a forked subprocess, reset it.
init_value = self.default_factory()
self.set(init_value)
return init_value
else:
return value
else:
init_value = self.default_factory()
self.set(init_value)
return init_value
def set(self, value):
"""
Set a value for the thread-local variable.
"""
self.thread_local.value = (value, os.getpid())
self.__global_thread_values[threading.get_ident()] = value
def get_all_thread_values(self) -> dict[int, Any]:
"""
Return all thread values as a dict, dict key is the thread ID.
"""
return self.__global_thread_values.copy()
def reset(self):
"""
Reset the thread-local variable.
Clear the global thread values and create a new thread local variable.
"""
self.__global_thread_values.clear()
self.thread_local = threading.local()
| ThreadLocalVariable |
python | joke2k__faker | faker/providers/person/yo_NG/__init__.py | {
"start": 44,
"end": 7239
} | class ____(PersonProvider):
"""
A Faker provider for generating fake Zulu names in South Africa.
"""
formats = (
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}} {{last_name_male}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}} {{last_name_female}}",
"{{prefix_male}} {{first_name_male}} {{last_name_male}}",
"{{prefix_female}} {{first_name_female}} {{last_name_female}}",
"{{prefix_male}} {{first_name_male}} {{last_name_male}}",
"{{prefix_female}} {{first_name_female}} {{last_name_female}}",
)
# first names sourced from:
# 1. https://github.com/faker-js/faker/blob/next/src/locales/yo_NG/person/last_name.ts
# 2. https://github.com/faker-js/faker/blob/next/src/locales/yo_NG/person/male_first_name.ts
first_names_male = (
"Abayomi",
"Abiodun",
"Abiona",
"Adebiyi",
"Adebowale",
"Adedayo",
"Adedeji",
"Adekitan",
"Adekola",
"Adekunle",
"Adeleke",
"Adeniyi",
"Adeolu",
"Adeoti",
"Aderopo",
"Adeshina",
"Adesoji",
"Adetayo",
"Adeyi",
"Adigun",
"Afolarin",
"Ajala",
"Ajani",
"Akanmu",
"Akinkunmi",
"Akinlabi",
"Akinwale",
"Alade",
"Alamu",
"Anjolaoluwa",
"Ayinde",
"Ayodeji",
"Ayodele",
"Babasola",
"Babatunji",
"Babawale",
"Damife",
"Demilade",
"Durodola",
"Ekundayo",
"Esupofo",
"Folu",
"Gbadebo",
"Gbolahan",
"Gbowoade",
"Ibidapo",
"Ige",
"Ikeoluwa",
"Inioluwa",
"Iseoluwa",
"Ishola",
"Juwon",
"Keji",
"Kolawole",
"Korede",
"Leke",
"Lere",
"Niyilolawa",
"Oba",
"ObaniJesu",
"Ogooluwa",
"Oke",
"Oladare",
"Oladimeji",
"Olakunle",
"Olanrewaju",
"Olansile",
"Olumorotimi",
"Oluwafemi",
"Oluwagbemiga",
"Oluwamumibori",
"Oluwamuyiwa",
"Oluwasanmi",
"Oluwasegun",
"Oluwole",
"Omobobola",
"Omotayo",
"Osunleke",
"Seye",
"Shekoni",
"Sijuade",
"Tade",
"Temidayo",
"Toki",
"Tokunbo",
"Tomori",
)
first_names_female = (
"Aanuoluwapo",
"Abebi",
"Abeni",
"Abosede",
"Adebukola",
"Adenike",
"Adepeju",
"Adesewa",
"Adesua",
"Adetoke",
"Adetoun",
"Adunni",
"Ajoke",
"Amoke",
"Amope",
"Arike",
"Arinola",
"Asake",
"Atinuke",
"Awero",
"Ayinke",
"Ayoka",
"Bolatito",
"Boluwatife",
"Bunmi",
"Doyinsola",
"Eniola",
"Ewatomi",
"Fadekemi",
"Faderera",
"Fehintola",
"Fibikemi",
"Fikayomi",
"Folashade",
"Ibironke",
"Iretioluwa",
"Iyabode",
"Iyadunni",
"Kikelomo",
"Modupe",
"Mofifoluwa",
"Mojisola",
"Mojisoluwa",
"Moradeke",
"Morayo",
"Morenike",
"Morolake",
"Mosinmileoluwa",
"Mosunmola",
"Motunrayo",
"Moyosore",
"Ninioluwa",
"Olajumoke",
"Olasunmbo",
"Ololade",
"Olufunke",
"Olufunmilayo",
"Oluwakemi",
"Omobolanle",
"Omodunni",
"Omolabake",
"Omolara",
"Omosalewa",
"Omotara",
"Omotola",
"Omotoun",
"Omowumi",
"Oreofe",
"Oyenike",
"Oyindasola",
"Radeke",
"Ronke",
"Segilola",
"Similoluwa",
"Simisola",
"Sowande",
"Subomi",
"Titilayo",
"Tolulope",
"Toluwanimi",
"Wuraola",
"Yejide",
"Yetunde",
"Yewande",
)
first_names = first_names_male + first_names_female
# last names sourced from :
# 1. https://github.com/faker-js/faker/blob/next/src/locales/yo_NG/person/last_name.ts
last_names_male = (
"Adebisi",
"Adegbite",
"Adegoke",
"Adekunle",
"Adelakun",
"Adeleke",
"Adelusi",
"Ademiluyi",
"Aderibigbe",
"Aderogba",
"Adesiyan",
"Adeyemo",
"Adisa",
"Afolabi",
"Afolayan",
"Afonja",
"Ajao",
"Ajayi",
"Ajewole",
"Akinrinola",
"Alabi",
"Aloba",
"Awodiran",
"Awolowo",
"Ayandokun",
"Ayoola",
"Babtunde",
"Bakare",
"Balogun",
"Bamidele",
"Bamiloye",
"Edun",
"Fadipe",
"Fagunwa",
"Fajimi",
"Falabi",
"Faleti",
"Faloye",
"Fasasi",
"Ibikunle",
"Ilori",
"Ilupeju",
"Iyanda",
"Jaiyeola",
"Kolade",
"Kosoko",
"Koya",
"Makinde",
"Makinwa",
"Morawo",
"Ninalowo",
"Odetola",
"Odunsi",
"Ogindan",
"Oginni",
"Ogulana",
"Ogunbamigbe",
"Ogunbiyi",
"Ogunbo",
"Ogunde",
"Ogunwobi",
"Ogunyeye",
"Ojo",
"Ojua",
"Olabode",
"Oladipupo",
"Olaiya",
"Olasupo",
"Olowokeere",
"Oloyede",
"Olubode",
"Olugbayila",
"Olujimi",
"Olukotun",
"Olukunga",
"Olusanya",
"Oluwagbemi",
"Omidina",
"Omojola",
"Omotoso",
"Oparinde",
"Oshin",
"Osuntokun",
"Owokoniran",
"Owolabi",
"Owoyemi",
"Oyadiran",
"Oyaifo",
"Oyeniyi",
"Oyetoro",
"Oyeyemi",
"Oyinlola",
"Paimo",
"Salako",
"Salami",
"Shekoni",
"Sobowale",
"Soyinka",
)
# last names are not sex dependant
last_names_female = last_names_male
last_names = last_names_male + last_names_female
prefixes_female = (
"Mrs.",
"Ms.",
"Dr.",
"Alhaja",
"Mama",
"Iya",
"Madam",
"Chief",
"Lady",
"Erelu",
"Olori",
"Princess",
)
prefixes_male = (
"Mr.",
"Dr.",
"Alhaji",
"Baba",
"Ogbeni",
"Oloye",
"Chief",
"Prince",
"Oba",
"Kabiyesi",
)
| Provider |
python | pytest-dev__pluggy | src/pluggy/_result.py | {
"start": 404,
"end": 484
} | class ____(Exception):
"""Hook was called incorrectly."""
@final
| HookCallError |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataproc.py | {
"start": 108057,
"end": 122875
} | class ____(GoogleCloudBaseOperator):
"""
Create a batch workload.
:param project_id: Optional. The ID of the Google Cloud project that the cluster belongs to. (templated)
:param region: Required. The Cloud Dataproc region in which to handle the request. (templated)
:param batch: Required. The batch to create. (templated)
:param batch_id: Required. The ID to use for the batch, which will become the final component
of the batch's resource name.
This value must be 4-63 characters. Valid characters are /[a-z][0-9]-/. (templated)
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``CreateBatchRequest`` requests with the same id, then the second request will be ignored and
the first ``google.longrunning.Operation`` created and stored in the backend is returned.
:param num_retries_if_resource_is_not_ready: Optional. The number of retry for cluster creation request
when resource is not ready error appears.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param result_retry: Result retry object used to retry requests. Is used to decrease delay between
executing chained tasks in a DAG by specifying exact amount of seconds for executing.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after creating batch to the Dataproc API.
This is useful for creating long-running batch and
waiting on them asynchronously using the DataprocBatchSensor
:param deferrable: Run operator in the deferrable mode.
:param polling_interval_seconds: Time (seconds) to wait between calls to check the run status.
"""
template_fields: Sequence[str] = (
"project_id",
"batch",
"batch_id",
"region",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataprocBatchLink(),)
def __init__(
self,
*,
region: str,
project_id: str = PROVIDE_PROJECT_ID,
batch: dict | Batch,
batch_id: str | None = None,
request_id: str | None = None,
num_retries_if_resource_is_not_ready: int = 0,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
result_retry: AsyncRetry | _MethodDefault | Retry = DEFAULT,
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 5,
openlineage_inject_parent_job_info: bool = conf.getboolean(
"openlineage", "spark_inject_parent_job_info", fallback=False
),
openlineage_inject_transport_info: bool = conf.getboolean(
"openlineage", "spark_inject_transport_info", fallback=False
),
**kwargs,
):
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.region = region
self.project_id = project_id
self.batch = batch
self.batch_id = batch_id
self.request_id = request_id
self.num_retries_if_resource_is_not_ready = num_retries_if_resource_is_not_ready
self.retry = retry
self.result_retry = result_retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.operation: operation.Operation | None = None
self.asynchronous = asynchronous
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
self.openlineage_inject_parent_job_info = openlineage_inject_parent_job_info
self.openlineage_inject_transport_info = openlineage_inject_transport_info
def execute(self, context: Context):
if self.asynchronous and self.deferrable:
raise AirflowException(
"Both asynchronous and deferrable parameters were passed. Please, provide only one."
)
batch_id: str = ""
if self.batch_id:
batch_id = self.batch_id
self.log.info("Starting batch %s", batch_id)
# Persist the link earlier so users can observe the progress
DataprocBatchLink.persist(
context=context,
project_id=self.project_id,
region=self.region,
batch_id=self.batch_id,
)
else:
self.log.info("Starting batch. The batch ID will be generated since it was not provided.")
if self.openlineage_inject_parent_job_info or self.openlineage_inject_transport_info:
self.log.info("Automatic injection of OpenLineage information into Spark properties is enabled.")
self._inject_openlineage_properties_into_dataproc_batch(context)
self.__update_batch_labels()
try:
self.operation = self.hook.create_batch(
region=self.region,
project_id=self.project_id,
batch=self.batch,
batch_id=self.batch_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Batch with given id already exists.")
self.log.info("Attaching to the job %s if it is still running.", batch_id)
else:
if self.operation and self.operation.metadata:
batch_id = self.operation.metadata.batch.split("/")[-1]
else:
raise AirflowException("Operation metadata is not available.")
self.log.info("The batch %s was created.", batch_id)
DataprocBatchLink.persist(
context=context,
project_id=self.project_id,
region=self.region,
batch_id=batch_id,
)
if self.asynchronous:
batch = self.hook.get_batch(
batch_id=batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("The batch %s was created asynchronously. Exiting.", batch_id)
return Batch.to_dict(batch)
if self.deferrable:
self.defer(
trigger=DataprocBatchTrigger(
batch_id=batch_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
),
method_name="execute_complete",
)
self.log.info("Waiting for the completion of batch job %s", batch_id)
batch = self.hook.wait_for_batch(
batch_id=batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
if self.num_retries_if_resource_is_not_ready and self.hook.check_error_for_resource_is_not_ready_msg(
batch.state_message
):
attempt = self.num_retries_if_resource_is_not_ready
while attempt > 0:
attempt -= 1
batch, batch_id = self.retry_batch_creation(batch_id)
if not self.hook.check_error_for_resource_is_not_ready_msg(batch.state_message):
break
self.handle_batch_status(context, batch.state.name, batch_id, batch.state_message)
return Batch.to_dict(batch)
@cached_property
def hook(self) -> DataprocHook:
return DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
if event is None:
raise AirflowException("Batch failed.")
state = event["batch_state"]
batch_id = event["batch_id"]
self.handle_batch_status(context, state, batch_id, state_message=event["batch_state_message"])
def on_kill(self):
if self.operation:
self.operation.cancel()
def handle_batch_status(
self, context: Context, state: str, batch_id: str, state_message: str | None = None
) -> None:
# The existing batch may be a number of states other than 'SUCCEEDED'\
# wait_for_operation doesn't fail if the job is cancelled, so we will check for it here which also
# finds a cancelling|canceled|unspecified job from wait_for_batch or the deferred trigger
link = DATAPROC_BATCH_LINK.format(region=self.region, project_id=self.project_id, batch_id=batch_id)
if state == Batch.State.FAILED.name: # type: ignore
raise AirflowException(
f"Batch job {batch_id} failed with error: {state_message}.\nDriver logs: {link}"
)
if state in (Batch.State.CANCELLED.name, Batch.State.CANCELLING.name): # type: ignore
raise AirflowException(f"Batch job {batch_id} was cancelled.\nDriver logs: {link}")
if state == Batch.State.STATE_UNSPECIFIED.name: # type: ignore
raise AirflowException(f"Batch job {batch_id} unspecified.\nDriver logs: {link}")
self.log.info("Batch job %s completed.\nDriver logs: %s", batch_id, link)
def retry_batch_creation(
self,
previous_batch_id: str,
):
self.log.info("Retrying creation process for batch_id %s", self.batch_id)
self.log.info("Deleting previous failed Batch")
self.hook.delete_batch(
batch_id=previous_batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Starting a new creation for batch_id %s", self.batch_id)
try:
self.operation = self.hook.create_batch(
region=self.region,
project_id=self.project_id,
batch=self.batch,
batch_id=self.batch_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Batch with given id already exists.")
self.log.info("Attaching to the job %s if it is still running.", self.batch_id)
else:
if self.operation and self.operation.metadata:
batch_id = self.operation.metadata.batch.split("/")[-1]
self.log.info("The batch %s was created.", batch_id)
else:
raise AirflowException("Operation metadata is not available.")
self.log.info("Waiting for the completion of batch job %s", batch_id)
batch = self.hook.wait_for_batch(
batch_id=batch_id,
region=self.region,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return batch, batch_id
def _inject_openlineage_properties_into_dataproc_batch(self, context: Context) -> None:
try:
from airflow.providers.google.cloud.openlineage.utils import (
inject_openlineage_properties_into_dataproc_batch,
)
self.batch = inject_openlineage_properties_into_dataproc_batch(
batch=self.batch,
context=context,
inject_parent_job_info=self.openlineage_inject_parent_job_info,
inject_transport_info=self.openlineage_inject_transport_info,
)
except Exception as e:
self.log.warning(
"An error occurred while trying to inject OpenLineage information. "
"Dataproc batch has not been modified by OpenLineage.",
exc_info=e,
)
def __update_batch_labels(self):
dag_id = re.sub(r"[.\s]", "_", self.dag_id.lower())
task_id = re.sub(r"[.\s]", "_", self.task_id.lower())
labels_regex = re.compile(r"^[a-z][\w-]{0,62}$")
if not labels_regex.match(dag_id) or not labels_regex.match(task_id):
return
labels_limit = 32
new_labels = {"airflow-dag-id": dag_id, "airflow-task-id": task_id}
if self._dag:
dag_display_name = re.sub(r"[.\s]", "_", self._dag.dag_display_name.lower())
if labels_regex.match(dag_id):
new_labels["airflow-dag-display-name"] = dag_display_name
if isinstance(self.batch, Batch):
if len(self.batch.labels) + len(new_labels) <= labels_limit:
self.batch.labels.update(new_labels)
elif "labels" not in self.batch:
self.batch["labels"] = new_labels
elif isinstance(self.batch.get("labels"), dict):
if len(self.batch["labels"]) + len(new_labels) <= labels_limit:
self.batch["labels"].update(new_labels)
| DataprocCreateBatchOperator |
python | ray-project__ray | rllib/env/external/env_runner_server_for_external_inference.py | {
"start": 1196,
"end": 14138
} | class ____(EnvRunner, Checkpointable):
"""An EnvRunner communicating with an external env through a TCP socket.
This implementation assumes:
- Only one external client ever connects to this env runner.
- The external client owns the connector pipelines (env-to-module and module-to-env)
as well as the RLModule and thus performs inference locally. Samples are sent in
bulk as lists of RLlib episodes once a certain number of timesteps has been executed
on the client's side.
- A copy of the RLModule is kept at all times on this EnvRunner, but is never used
for inference, only as a weights container.
TODO (sven): The above might be inefficient as we have to store basically two
models, one in this EnvRunner, one in the env (as ONNX).
- As a consequence, there are no environment and no connectors on this env runner.
The external env is responsible for generating all the data to create episodes.
"""
@override(EnvRunner)
def __init__(self, *, config, **kwargs):
"""
Initializes an EnvRunnerServerForExternalInference instance.
Args:
config: The AlgorithmConfig to use for setup.
Keyword Args:
port: The base port number. The server socket is then actually bound to
`port` + self.worker_index.
"""
super().__init__(config=config, **kwargs)
self.worker_index: int = kwargs.get("worker_index", 0)
self._weights_seq_no = 0
# Build the module from its spec.
module_spec = self.config.get_rl_module_spec(
spaces=self.get_spaces(), inference_only=True
)
self.module = module_spec.build()
self.host = "localhost"
self.port = int(self.config.env_config.get("port", 5555)) + self.worker_index
self.server_socket = None
self.client_socket = None
self.address = None
self.metrics = MetricsLogger()
self._episode_chunks_to_return: Optional[List[SingleAgentEpisode]] = None
self._done_episodes_for_metrics: List[SingleAgentEpisode] = []
self._ongoing_episodes_for_metrics: DefaultDict[
EpisodeID, List[SingleAgentEpisode]
] = defaultdict(list)
self._sample_lock = threading.Lock()
self._on_policy_lock = threading.Lock()
self._blocked_on_state = False
# Start a background thread for client communication.
self.thread = threading.Thread(
target=self._client_message_listener, daemon=True
)
self.thread.start()
@override(EnvRunner)
def assert_healthy(self):
"""Checks that the server socket is open and listening."""
assert (
self.server_socket is not None
), "Server socket is None (not connected, not listening)."
@override(EnvRunner)
def sample(self, **kwargs):
"""Waits for the client to send episodes."""
while True:
with self._sample_lock:
if self._episode_chunks_to_return is not None:
num_env_steps = 0
num_episodes_completed = 0
for eps in self._episode_chunks_to_return:
if eps.is_done:
self._done_episodes_for_metrics.append(eps)
num_episodes_completed += 1
else:
self._ongoing_episodes_for_metrics[eps.id_].append(eps)
num_env_steps += len(eps)
ret = self._episode_chunks_to_return
self._episode_chunks_to_return = None
SingleAgentEnvRunner._increase_sampled_metrics(
self, num_env_steps, num_episodes_completed
)
return ret
time.sleep(0.01)
@override(EnvRunner)
def get_metrics(self):
# TODO (sven): We should probably make this a utility function to be called
# from within Single/MultiAgentEnvRunner and other EnvRunner subclasses, as
# needed.
# Compute per-episode metrics (only on already completed episodes).
for eps in self._done_episodes_for_metrics:
assert eps.is_done
episode_length = len(eps)
episode_return = eps.get_return()
episode_duration_s = eps.get_duration_s()
# Don't forget about the already returned chunks of this episode.
if eps.id_ in self._ongoing_episodes_for_metrics:
for eps2 in self._ongoing_episodes_for_metrics[eps.id_]:
episode_length += len(eps2)
episode_return += eps2.get_return()
episode_duration_s += eps2.get_duration_s()
del self._ongoing_episodes_for_metrics[eps.id_]
self._log_episode_metrics(
episode_length, episode_return, episode_duration_s
)
# Now that we have logged everything, clear cache of done episodes.
self._done_episodes_for_metrics.clear()
# Return reduced metrics.
return self.metrics.reduce()
def get_spaces(self):
return {
INPUT_ENV_SPACES: (self.config.observation_space, self.config.action_space),
DEFAULT_MODULE_ID: (
self.config.observation_space,
self.config.action_space,
),
}
@override(EnvRunner)
def stop(self):
"""Closes the client and server sockets."""
self._close_sockets_if_necessary()
@override(Checkpointable)
def get_ctor_args_and_kwargs(self):
return (
(), # *args
{"config": self.config}, # **kwargs
)
@override(Checkpointable)
def get_checkpointable_components(self):
return [
(COMPONENT_RL_MODULE, self.module),
]
@override(Checkpointable)
def get_state(
self,
components: Optional[Union[str, Collection[str]]] = None,
*,
not_components: Optional[Union[str, Collection[str]]] = None,
**kwargs,
) -> StateDict:
return {
COMPONENT_RL_MODULE: self.module.get_state(),
WEIGHTS_SEQ_NO: self._weights_seq_no,
}
@override(Checkpointable)
def set_state(self, state: StateDict) -> None:
# Update the RLModule state.
if COMPONENT_RL_MODULE in state:
# A missing value for WEIGHTS_SEQ_NO or a value of 0 means: Force the
# update.
weights_seq_no = state.get(WEIGHTS_SEQ_NO, 0)
# Only update the weigths, if this is the first synchronization or
# if the weights of this `EnvRunner` lacks behind the actual ones.
if weights_seq_no == 0 or self._weights_seq_no < weights_seq_no:
rl_module_state = state[COMPONENT_RL_MODULE]
if (
isinstance(rl_module_state, dict)
and DEFAULT_MODULE_ID in rl_module_state
):
rl_module_state = rl_module_state[DEFAULT_MODULE_ID]
self.module.set_state(rl_module_state)
# Update our weights_seq_no, if the new one is > 0.
if weights_seq_no > 0:
self._weights_seq_no = weights_seq_no
if self._blocked_on_state is True:
self._send_set_state_message()
self._blocked_on_state = False
def _client_message_listener(self):
"""Entry point for the listener thread."""
# Set up the server socket and bind to the specified host and port.
self._recycle_sockets()
# Enter an endless message receival- and processing loop.
while True:
# As long as we are blocked on a new state, sleep a bit and continue.
# Do NOT process any incoming messages (until we send out the new state
# back to the client).
if self._blocked_on_state is True:
time.sleep(0.01)
continue
try:
# Blocking call to get next message.
msg_type, msg_body = get_rllink_message(self.client_socket)
# Process the message received based on its type.
# Initial handshake.
if msg_type == RLlink.PING:
self._send_pong_message()
# Episode data from the client.
elif msg_type in [
RLlink.EPISODES,
RLlink.EPISODES_AND_GET_STATE,
]:
self._process_episodes_message(msg_type, msg_body)
# Client requests the state (model weights).
elif msg_type == RLlink.GET_STATE:
self._send_set_state_message()
# Clients requests config information.
elif msg_type == RLlink.GET_CONFIG:
self._send_set_config_message()
except ConnectionError as e:
print(f"Messaging/connection error {e}! Recycling sockets ...")
self._recycle_sockets(5.0)
continue
def _recycle_sockets(self, sleep: float = 0.0):
# Close all old sockets, if they exist.
self._close_sockets_if_necessary()
time.sleep(sleep)
# Start listening on the configured port.
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allow reuse of the address.
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((self.host, self.port))
# Listen for a single connection.
self.server_socket.listen(1)
print(f"Waiting for client to connect to port {self.port}...")
self.client_socket, self.address = self.server_socket.accept()
print(f"Connected to client at {self.address}")
def _close_sockets_if_necessary(self):
if self.client_socket:
self.client_socket.close()
if self.server_socket:
self.server_socket.close()
def _send_pong_message(self):
send_rllink_message(self.client_socket, {"type": RLlink.PONG.name})
def _process_episodes_message(self, msg_type, msg_body):
# On-policy training -> we have to block until we get a new `set_state` call
# (b/c the learning step is done and we can send new weights back to all
# clients).
if msg_type == RLlink.EPISODES_AND_GET_STATE:
self._blocked_on_state = True
episodes = []
for episode_state in msg_body["episodes"]:
episode = SingleAgentEpisode.from_state(episode_state)
episodes.append(episode.to_numpy())
# Push episodes into the to-be-returned list (for `sample()` requests).
with self._sample_lock:
if isinstance(self._episode_chunks_to_return, list):
self._episode_chunks_to_return.extend(episodes)
else:
self._episode_chunks_to_return = episodes
def _send_set_state_message(self):
send_rllink_message(
self.client_socket,
{
"type": RLlink.SET_STATE.name,
"state": self.get_state(inference_only=True),
},
)
def _send_set_config_message(self):
send_rllink_message(
self.client_socket,
{
"type": RLlink.SET_CONFIG.name,
# TODO (sven): We need AlgorithmConfig to be a `Checkpointable` with a
# msgpack'able state.
"config": pickle.dumps(self.config),
},
)
def _log_episode_metrics(self, length, ret, sec):
# Log general episode metrics.
# To mimic the old API stack behavior, we'll use `window` here for
# these particular stats (instead of the default EMA).
win = self.config.metrics_num_episodes_for_smoothing
self.metrics.log_value(EPISODE_LEN_MEAN, length, window=win)
self.metrics.log_value(EPISODE_RETURN_MEAN, ret, window=win)
self.metrics.log_value(EPISODE_DURATION_SEC_MEAN, sec, window=win)
# Per-agent returns.
self.metrics.log_value(
("agent_episode_returns_mean", DEFAULT_AGENT_ID), ret, window=win
)
# Per-RLModule returns.
self.metrics.log_value(
("module_episode_returns_mean", DEFAULT_MODULE_ID), ret, window=win
)
# For some metrics, log min/max as well.
self.metrics.log_value(EPISODE_LEN_MIN, length, reduce="min", window=win)
self.metrics.log_value(EPISODE_RETURN_MIN, ret, reduce="min", window=win)
self.metrics.log_value(EPISODE_LEN_MAX, length, reduce="max", window=win)
self.metrics.log_value(EPISODE_RETURN_MAX, ret, reduce="max", window=win)
| EnvRunnerServerForExternalInference |
python | astropy__astropy | astropy/utils/masked/core.py | {
"start": 12451,
"end": 14087
} | class ____(ShapedLikeNDArray):
"""Like ShapedLikeNDArray, but for classes that can work with masked data.
Defines default unmasked property as well as a filled method, and inherits
private class methods that help deal with masked inputs.
Any class using this must provide a masked property, which tells whether
the underlying data are Masked, as well as a mask property, which
generally should provide a read-only copy of the underlying mask.
"""
@property
@abc.abstractmethod
def masked(self):
"""Whether or not the instance uses masked values."""
@property
@abc.abstractmethod
def mask(self):
"""The mask."""
@property
def unmasked(self):
"""Get an instance without the mask.
Note that while one gets a new instance, the underlying data will be shared.
See Also
--------
filled : get a copy of the underlying data, with masked values filled in.
"""
return self._apply(lambda x: getattr(x, "unmasked", x))
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
Returns
-------
filled : instance
Copy of ``self`` with masked items replaced by ``fill_value``.
See Also
--------
unmasked : get an instance without the mask.
"""
unmasked = self.unmasked.copy()
unmasked[self.mask] = fill_value
return unmasked
| MaskableShapedLikeNDArray |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1580053,
"end": 1583630
} | class ____(sgqlc.types.Type, Node, UniformResourceLocatable):
"""A workflow run."""
__schema__ = github_schema
__field_names__ = (
"check_suite",
"created_at",
"database_id",
"deployment_reviews",
"event",
"file",
"pending_deployment_requests",
"run_number",
"updated_at",
"workflow",
)
check_suite = sgqlc.types.Field(sgqlc.types.non_null(CheckSuite), graphql_name="checkSuite")
"""The check suite this workflow run belongs to."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
deployment_reviews = sgqlc.types.Field(
sgqlc.types.non_null(DeploymentReviewConnection),
graphql_name="deploymentReviews",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The log of deployment reviews
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
event = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="event")
"""The event that triggered the workflow run"""
file = sgqlc.types.Field("WorkflowRunFile", graphql_name="file")
"""The workflow file"""
pending_deployment_requests = sgqlc.types.Field(
sgqlc.types.non_null(DeploymentRequestConnection),
graphql_name="pendingDeploymentRequests",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The pending deployment requests of all check runs in this workflow
run
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
run_number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="runNumber")
"""A number that uniquely identifies this workflow run in its parent
workflow.
"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
workflow = sgqlc.types.Field(sgqlc.types.non_null(Workflow), graphql_name="workflow")
"""The workflow executed in this workflow run."""
| WorkflowRun |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_rename_table_app/migrations/0002_rename_table.py | {
"start": 145,
"end": 399
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_rename_table_app", "0001_initial"),
]
operations = [
migrations.RenameModel(
old_name="TestTable",
new_name="NewTable",
),
]
| Migration |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 177696,
"end": 178182
} | class ____(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
| TCPCloserTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.