language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_wrap_model_call.py | {
"start": 827,
"end": 2944
} | class ____:
"""Test basic wrap_model_call functionality."""
def test_passthrough_middleware(self) -> None:
"""Test middleware that simply passes through without modification."""
class PassthroughMiddleware(AgentMiddleware):
def wrap_model_call(self, request, handler):
return handler(request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(model=model, middleware=[PassthroughMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert len(result["messages"]) == 2
assert result["messages"][1].content == "Hello"
def test_logging_middleware(self) -> None:
"""Test middleware that logs calls without modification."""
call_log = []
class LoggingMiddleware(AgentMiddleware):
def wrap_model_call(self, request, handler):
call_log.append("before")
result = handler(request)
call_log.append("after")
return result
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, middleware=[LoggingMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Test")]})
assert call_log == ["before", "after"]
assert result["messages"][1].content == "Response"
def test_counting_middleware(self) -> None:
"""Test middleware that counts model calls."""
class CountingMiddleware(AgentMiddleware):
def __init__(self):
super().__init__()
self.call_count = 0
def wrap_model_call(self, request, handler):
self.call_count += 1
return handler(request)
counter = CountingMiddleware()
model = GenericFakeChatModel(messages=iter([AIMessage(content="Reply")]))
agent = create_agent(model=model, middleware=[counter])
agent.invoke({"messages": [HumanMessage("Test")]})
assert counter.call_count == 1
| TestBasicWrapModelCall |
python | getsentry__sentry | tests/sentry/rules/processing/test_delayed_processing.py | {
"start": 28646,
"end": 61118
} | class ____(ProcessDelayedAlertConditionsTestBase):
def test_get_condition_groups(self) -> None:
self._push_base_events()
project_three = self.create_project(organization=self.organization)
env3 = self.create_environment(project=project_three)
rule_1 = self.create_project_rule(
project=project_three,
condition_data=[self.event_frequency_condition],
filter_match=[self.tag_filter],
environment_id=env3.id,
)
rule_2 = self.create_project_rule(
project=project_three,
condition_data=[self.event_frequency_condition2],
environment_id=env3.id,
)
rules_to_groups = {rule_1.id: {1, 2, 3}, rule_2.id: {3, 4, 5}}
orig_rules_to_groups = deepcopy(rules_to_groups)
get_condition_query_groups([rule_1, rule_2], rules_to_groups) # type: ignore[arg-type]
assert orig_rules_to_groups == rules_to_groups
@patch("sentry.rules.processing.delayed_processing.logger")
def test_apply_delayed_nonexistent_project(self, mock_logger: MagicMock) -> None:
self.push_to_hash(self.project.id, self.rule1.id, self.group1.id, self.event1.event_id)
project_id = self.project.id
self.project.delete()
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
assert RuleFireHistory.objects.count() == 0
mock_logger.info.assert_called_once_with(
"delayed_processing.project_does_not_exist",
extra={"project_id": project_id},
)
@patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1)
def test_apply_delayed_rules_to_fire(self) -> None:
"""
Test that rules of various event frequency conditions, projects,
environments, etc. are properly fired.
"""
self._push_base_events()
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, self.rule2],
group__in=[self.group1, self.group2],
event_id__in=[self.event1.event_id, self.event2.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule1.id, self.group1.id) in rule_fire_histories
assert (self.rule2.id, self.group2.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
apply_delayed(project_ids[1][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule3, self.rule4],
group__in=[self.group3, self.group4],
event_id__in=[self.event3.event_id, self.event4.event_id],
project=self.project_two,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule3.id, self.group3.id) in rule_fire_histories
assert (self.rule4.id, self.group4.id) in rule_fire_histories
rule_group_data = buffer.backend.get_hash(Project, {"project_id": self.project_two.id})
assert rule_group_data == {}
def test_apply_delayed_issue_platform_event(self) -> None:
"""
Test that we fire rules triggered from issue platform events
"""
self._push_base_events()
rule5 = self.create_project_rule(
project=self.project,
condition_data=[self.event_frequency_condition2],
)
tags = [["foo", "guux"], ["sentry:release", "releaseme"]]
contexts = {"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}}
for i in range(3):
event5 = self.create_performance_issue(
tags=tags,
fingerprint="group-5",
contexts=contexts,
)
group5 = event5.group
assert group5
assert isinstance(event5, GroupEvent)
self.push_to_hash(
self.project.id,
rule5.id,
group5.id,
event5.event_id,
occurrence_id=event5.occurrence_id,
)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, rule5],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule1.id, self.group1.id) in rule_fire_histories
assert (rule5.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_snoozed_rule(self) -> None:
"""
Test that we do not fire a rule that's been snoozed (aka muted)
"""
self._push_base_events()
rule5 = self.create_project_rule(
project=self.project,
condition_data=[self.event_frequency_condition2],
environment_id=self.environment.id,
)
self.snooze_rule(owner_id=self.user.id, rule=rule5)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
assert event5.group
group5 = event5.group
self.push_to_hash(self.project.id, rule5.id, group5.id, event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[rule5],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 0
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_same_condition_diff_value(self) -> None:
"""
Test that two rules with the same condition and interval but a
different value are both fired.
"""
self._push_base_events()
rule5 = self.create_project_rule(
project=self.project,
condition_data=[self.event_frequency_condition2],
environment_id=self.environment.id,
)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
assert event5.group
group5 = event5.group
self.push_to_hash(self.project.id, rule5.id, group5.id, event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, rule5],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule1.id, self.group1.id) in rule_fire_histories
assert (rule5.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_same_condition_diff_interval(self) -> None:
"""
Test that two rules with the same condition and value but a
different interval are both fired.
"""
self._push_base_events()
diff_interval_rule = self.create_project_rule(
project=self.project,
condition_data=[self.event_frequency_condition3],
environment_id=self.environment.id,
)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
assert event5.group
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
group5 = event5.group
self.push_to_hash(self.project.id, diff_interval_rule.id, group5.id, event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, diff_interval_rule],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule1.id, self.group1.id) in rule_fire_histories
assert (diff_interval_rule.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_same_condition_diff_env(self) -> None:
"""
Test that two rules with the same condition, value, and interval
but different environment are both fired.
"""
self._push_base_events()
environment3 = self.create_environment(project=self.project)
diff_env_rule = self.create_project_rule(
project=self.project,
condition_data=[self.event_frequency_condition],
environment_id=environment3.id,
)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5", environment3.name)
assert event5.group
self.create_event(self.project.id, FROZEN_TIME, "group-5", environment3.name)
group5 = event5.group
self.push_to_hash(self.project.id, diff_env_rule.id, group5.id, event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, diff_env_rule],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule1.id, self.group1.id) in rule_fire_histories
assert (diff_env_rule.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_two_rules_one_fires(self) -> None:
"""
Test that with two rules in one project where one rule hasn't met
the trigger threshold, only one is fired
"""
self._push_base_events()
high_event_frequency_condition = {
"interval": "1d",
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"value": 100,
"name": "The issue is seen more than 100 times in 1d",
}
no_fire_rule = self.create_project_rule(
project=self.project,
condition_data=[high_event_frequency_condition],
environment_id=self.environment.id,
)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
assert event5.group
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
group5 = event5.group
self.push_to_hash(self.project.id, no_fire_rule.id, group5.id, event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, no_fire_rule],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 1
assert (self.rule1.id, self.group1.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_action_match_all(self) -> None:
"""
Test that a rule with multiple conditions and an action match of
'all' is fired.
"""
self._push_base_events()
two_conditions_match_all_rule = self.create_project_rule(
project=self.project,
condition_data=[self.event_frequency_condition, self.user_frequency_condition],
environment_id=self.environment.id,
)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
assert event5.group
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
group5 = event5.group
self.create_event(
self.project.id,
FROZEN_TIME,
"group-6",
self.environment.name,
)
self.create_event(
self.project.id,
FROZEN_TIME,
"group-5",
self.environment.name,
)
condition_wont_pass_rule = self.create_project_rule(
project=self.project,
condition_data=[self.create_event_frequency_condition(value=100)],
environment_id=self.environment.id,
)
self.push_to_hash(
self.project.id, two_conditions_match_all_rule.id, group5.id, event5.event_id
)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.rule1, two_conditions_match_all_rule, condition_wont_pass_rule],
group__in=[self.group1, group5],
event_id__in=[self.event1.event_id, event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.rule1.id, self.group1.id) in rule_fire_histories
assert (two_conditions_match_all_rule.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
assert (two_conditions_match_all_rule.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
@with_feature("organizations:event-unique-user-frequency-condition-with-conditions")
def test_special_event_frequency_condition(self) -> None:
Rule.objects.all().delete()
event_frequency_special_condition = Rule.objects.create(
label="Event Frequency Special Condition",
project=self.project,
environment_id=self.environment.id,
data={
"filter_match": "all",
"action_match": "all",
"actions": [
{"id": "sentry.rules.actions.notify_event.NotifyEventAction"},
{
"id": "sentry.rules.actions.notify_event_service.NotifyEventServiceAction",
"service": "mail",
},
],
"conditions": [
{
"id": "sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyConditionWithConditions",
"value": 2,
"comparisonType": "count",
"interval": "1m",
},
{
"match": "eq",
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "region",
"value": "EU",
},
],
},
)
self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name, tags=[["region", "US"]]
)
self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name, tags=[["region", "US"]]
)
evaluated_event = self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name, tags=[["region", "EU"]]
)
assert evaluated_event.group
group1 = evaluated_event.group
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
rp = RuleProcessor(
evaluated_event.for_group(evaluated_event.group),
is_new=False,
is_regression=False,
is_new_group_environment=False,
has_reappeared=False,
)
rp.apply()
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[event_frequency_special_condition],
group__in=[group1],
event_id__in=[evaluated_event.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 0
self.assert_buffer_cleared(project_id=self.project.id)
@with_feature("organizations:event-unique-user-frequency-condition-with-conditions")
def test_special_event_frequency_condition_passes(self) -> None:
Rule.objects.all().delete()
event_frequency_special_condition = Rule.objects.create(
label="Event Frequency Special Condition",
project=self.project,
environment_id=self.environment.id,
data={
"filter_match": "all",
"action_match": "all",
"actions": [
{"id": "sentry.rules.actions.notify_event.NotifyEventAction"},
{
"id": "sentry.rules.actions.notify_event_service.NotifyEventServiceAction",
"service": "mail",
},
],
"conditions": [
{
"id": "sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyConditionWithConditions",
"value": 2,
"comparisonType": "count",
"interval": "1m",
},
{
"match": "eq",
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "region",
"value": "EU",
},
],
},
)
self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name, tags=[["region", "EU"]]
)
self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name, tags=[["region", "EU"]]
)
evaluated_event = self.create_event(
self.project.id, FROZEN_TIME, "group-1", self.environment.name, tags=[["region", "EU"]]
)
assert evaluated_event.group
group1 = evaluated_event.group
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
rp = RuleProcessor(
evaluated_event.for_group(evaluated_event.group),
is_new=False,
is_regression=False,
is_new_group_environment=False,
has_reappeared=False,
)
rp.apply()
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[event_frequency_special_condition],
group__in=[group1],
event_id__in=[evaluated_event.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 1
assert (event_frequency_special_condition.id, group1.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_shared_condition_diff_filter(self) -> None:
self._push_base_events()
project_three = self.create_project(organization=self.organization)
env3 = self.create_environment(project=project_three)
buffer.backend.push_to_sorted_set(key=PROJECT_ID_BUFFER_LIST_KEY, value=project_three.id)
rule_1 = self.create_project_rule(
project=project_three,
condition_data=[self.event_frequency_condition],
filter_match=[self.tag_filter],
environment_id=env3.id,
)
rule_2 = self.create_project_rule(
project=project_three,
condition_data=[self.event_frequency_condition],
environment_id=env3.id,
)
event1 = self.create_event(
project_three.id, FROZEN_TIME, "group-5", env3.name, tags=[["foo", "bar"]]
)
assert event1.group
self.create_event(
project_three.id, FROZEN_TIME, "group-5", env3.name, tags=[["foo", "bar"]]
)
group1 = event1.group
event2 = self.create_event(project_three.id, FROZEN_TIME, "group-6", env3.name)
assert event2.group
self.create_event(project_three.id, FROZEN_TIME, "group-6", env3.name)
group2 = event2.group
self.push_to_hash(project_three.id, rule_1.id, group1.id, event1.event_id)
self.push_to_hash(project_three.id, rule_2.id, group2.id, event2.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
assert project_three.id == project_ids[2][0]
apply_delayed(project_ids[2][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[rule_1, rule_2],
group__in=[group1, group2],
event_id__in=[event1.event_id, event2.event_id],
project=project_three,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (rule_1.id, group1.id) in rule_fire_histories
assert (rule_2.id, group2.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=project_three.id)
def test_apply_delayed_percent_comparison_condition_interval(self) -> None:
"""
Test that a rule with a percent condition is querying backwards against
the correct comparison interval, e.g. # events is ... compared to 1 hr ago
"""
percent_condition = self.create_event_frequency_condition(
interval="1h",
value=50,
comparison_type=ComparisonType.PERCENT,
comparison_interval="15m",
)
percent_comparison_rule = self.create_project_rule(
project=self.project,
condition_data=[percent_condition],
)
incorrect_interval_time = FROZEN_TIME - timedelta(hours=1, minutes=30)
correct_interval_time = FROZEN_TIME - timedelta(hours=1, minutes=10)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5")
assert event5.group
self.create_event(self.project.id, FROZEN_TIME, "group-5")
# Create events for the incorrect interval that will not trigger the rule
self.create_event(self.project.id, incorrect_interval_time, "group-5")
self.create_event(self.project.id, incorrect_interval_time, "group-5")
# Create an event for the correct interval that will trigger the rule
self.create_event(self.project.id, correct_interval_time, "group-5")
group5 = event5.group
self.push_to_hash(self.project.id, percent_comparison_rule.id, group5.id, event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[percent_comparison_rule],
group__in=[group5],
event_id__in=[event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 1
assert (percent_comparison_rule.id, group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
def test_apply_delayed_event_frequency_percent_condition_fires_on_small_value(self) -> None:
event_frequency_percent_condition_2 = self.create_event_frequency_condition(
interval="1h", id="EventFrequencyPercentCondition", value=0.1
)
self.project_three = self.create_project(organization=self.organization)
# 1 event / 600 sessions ~= 0.17%
self._make_sessions(600, project=self.project_three)
percent_comparison_rule = self.create_project_rule(
project=self.project_three,
condition_data=[event_frequency_percent_condition_2],
)
event5 = self.create_event(self.project_three.id, FROZEN_TIME, "group-6")
assert event5.group
buffer.backend.push_to_sorted_set(
key=PROJECT_ID_BUFFER_LIST_KEY, value=self.project_three.id
)
self.push_to_hash(
self.project_three.id, percent_comparison_rule.id, event5.group.id, event5.event_id
)
apply_delayed(self.project_three.id)
assert RuleFireHistory.objects.filter(
rule__in=[percent_comparison_rule],
project=self.project_three,
).exists()
self.assert_buffer_cleared(project_id=self.project_three.id)
def test_apply_delayed_event_frequency_percent_comparison_interval(self) -> None:
"""
Test that the event frequency percent condition with a percent
comparison is using the COMPARISON_INTERVALS for it's
comparison_interval and does not fail with a KeyError.
"""
percent_condition = self.create_event_frequency_condition(
id="EventFrequencyPercentCondition",
interval="1h",
value=50,
comparison_type=ComparisonType.PERCENT,
comparison_interval="1d",
)
percent_comparison_rule = self.create_project_rule(
project=self.project,
condition_data=[percent_condition],
)
event5 = self.create_event(self.project.id, FROZEN_TIME, "group-5")
assert event5.group
self.push_to_hash(
self.project.id, percent_comparison_rule.id, event5.group.id, event5.event_id
)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
apply_delayed(project_ids[0][0])
assert not RuleFireHistory.objects.filter(
rule__in=[percent_comparison_rule],
project=self.project,
).exists()
self.assert_buffer_cleared(project_id=self.project.id)
def _setup_count_percent_test(self) -> int:
fires_percent_condition = self.create_event_frequency_condition(
interval="1h",
value=50,
comparison_type=ComparisonType.PERCENT,
comparison_interval="15m",
)
self.fires_percent_rule = self.create_project_rule(
project=self.project,
condition_data=[fires_percent_condition],
environment_id=self.environment.id,
)
fires_count_condition = self.create_event_frequency_condition(
interval="1h",
value=1,
)
self.fires_count_rule = self.create_project_rule(
project=self.project,
condition_data=[fires_count_condition],
environment_id=self.environment.id,
)
skips_count_condition = self.create_event_frequency_condition(
interval="1h",
value=75,
)
self.skips_count_rule = self.create_project_rule(
project=self.project,
condition_data=[skips_count_condition],
environment_id=self.environment.id,
)
# Create events to trigger the fires count condition.
self.event5 = self.create_event(
self.project.id, FROZEN_TIME, "group-5", self.environment.name
)
assert self.event5.group
self.create_event(self.project.id, FROZEN_TIME, "group-5", self.environment.name)
self.group5 = self.event5.group
# Create a past event to trigger the fires percent condition.
self.create_event(
self.project.id,
FROZEN_TIME - timedelta(hours=1, minutes=10),
"group-5",
self.environment.name,
)
for rule in [self.fires_percent_rule, self.fires_count_rule, self.skips_count_rule]:
self.push_to_hash(self.project.id, rule.id, self.group5.id, self.event5.event_id)
project_ids = buffer.backend.get_sorted_set(
PROJECT_ID_BUFFER_LIST_KEY, 0, self.buffer_timestamp
)
return project_ids[0][0]
def _assert_count_percent_results(self, safe_execute_callthrough: Mock) -> None:
rule_fire_histories = RuleFireHistory.objects.filter(
rule__in=[self.fires_percent_rule, self.fires_count_rule, self.skips_count_rule],
group__in=[self.group5],
event_id__in=[self.event5.event_id],
project=self.project,
).values_list("rule", "group")
assert len(rule_fire_histories) == 2
assert (self.fires_percent_rule.id, self.group5.id) in rule_fire_histories
assert (self.fires_count_rule.id, self.group5.id) in rule_fire_histories
self.assert_buffer_cleared(project_id=self.project.id)
# Ensure we're only making two queries. The count query and first
# percent query of both percent conditions can share one query, and
# the second query of both percent conditions share the other query.
assert safe_execute_callthrough.call_count == 2
@patch("sentry.rules.processing.delayed_processing.safe_execute", side_effect=safe_execute)
def test_apply_delayed_process_percent_then_count(
self, safe_execute_callthrough: MagicMock
) -> None:
"""
Test that having both count and percent comparison type conditions do
not affect each other and that processing the percent condition first
does not matter.
"""
# Have the percent condition be processed first. The calculated percent
# value is 100, but the skips_count_rule with a threshold of 75 should
# not be triggered.
project_id = self._setup_count_percent_test()
with patch(
"sentry.rules.processing.delayed_processing.get_condition_query_groups",
side_effect=mock_get_condition_group(descending=False),
):
apply_delayed(project_id)
self._assert_count_percent_results(safe_execute_callthrough)
@patch("sentry.rules.processing.delayed_processing.safe_execute", side_effect=safe_execute)
def test_apply_delayed_process_count_then_percent(
self, safe_execute_callthrough: MagicMock
) -> None:
"""
Test that having both count and percent comparison type conditions do
not affect each other and that processing the count condition first
does not matter.
"""
# Have a count condition be processed first. It's calculated value is 2,
# but the fires_percent_rule with a 50 threshold should still be triggered.
project_id = self._setup_count_percent_test()
with patch(
"sentry.rules.processing.delayed_processing.get_condition_query_groups",
side_effect=mock_get_condition_group(descending=True),
):
apply_delayed(project_id)
self._assert_count_percent_results(safe_execute_callthrough)
| ApplyDelayedTest |
python | ansible__ansible | test/lib/ansible_test/_internal/provider/__init__.py | {
"start": 1858,
"end": 2240
} | class ____(metaclass=abc.ABCMeta):
"""Base class for provider plugins that are path based."""
sequence = 500
priority = 500
def __init__(self, root: str) -> None:
self.root = root
@staticmethod
@abc.abstractmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
| PathProvider |
python | ipython__ipython | IPython/utils/text.py | {
"start": 2475,
"end": 11644
} | class ____(list[Any]):
"""List derivative with a special access attributes.
These are normal lists, but with the special attributes:
* .l (or .list) : value as list (the list itself).
* .n (or .nlstr): value as a string, joined on newlines.
* .s (or .spstr): value as a string, joined on spaces.
* .p (or .paths): list of path objects (requires path.py package)
Any values which require transformations are computed only once and
cached."""
__spstr: str
__nlstr: str
__paths: List[Path]
def get_list(self) -> Self:
return self
l = list = property(get_list)
def get_spstr(self) -> str:
try:
return self.__spstr
except AttributeError:
self.__spstr = ' '.join(self)
return self.__spstr
s = spstr = property(get_spstr)
def get_nlstr(self) -> str:
try:
return self.__nlstr
except AttributeError:
self.__nlstr = '\n'.join(self)
return self.__nlstr
n = nlstr = property(get_nlstr)
def get_paths(self) -> List[Path]:
try:
return self.__paths
except AttributeError:
self.__paths = [Path(p) for p in self if os.path.exists(p)]
return self.__paths
p = paths = property(get_paths)
def grep(
self,
pattern: Union[str, Callable[[Any], re.Match[str] | None]],
prune: bool = False,
field: Optional[int] = None,
) -> Self:
"""Return all strings matching 'pattern' (a regex or callable)
This is case-insensitive. If prune is true, return all items
NOT matching the pattern.
If field is specified, the match must occur in the specified
whitespace-separated field.
Examples::
a.grep( lambda x: x.startswith('C') )
a.grep('Cha.*log', prune=1)
a.grep('chm', field=-1)
"""
def match_target(s: str) -> str:
if field is None:
return s
parts = s.split()
try:
tgt = parts[field]
return tgt
except IndexError:
return ""
if isinstance(pattern, str):
pred = lambda x : re.search(pattern, x, re.IGNORECASE)
else:
pred = pattern
if not prune:
return type(self)([el for el in self if pred(match_target(el))]) # type: ignore [no-untyped-call]
else:
return type(self)([el for el in self if not pred(match_target(el))]) # type: ignore [no-untyped-call]
def fields(self, *fields: List[str]) -> List[List[str]]:
"""Collect whitespace-separated fields from string list
Allows quick awk-like usage of string lists.
Example data (in var a, created by 'a = !ls -l')::
-rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
* ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
* ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
(note the joining by space).
* ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
IndexErrors are ignored.
Without args, fields() just split()'s the strings.
"""
if len(fields) == 0:
return [el.split() for el in self]
res = SList()
for el in [f.split() for f in self]:
lineparts = []
for fd in fields:
try:
lineparts.append(el[fd])
except IndexError:
pass
if lineparts:
res.append(" ".join(lineparts))
return res
def sort( # type:ignore[override]
self,
field: Optional[List[str]] = None,
nums: bool = False,
) -> Self:
"""sort by specified fields (see fields())
Example::
a.sort(1, nums = True)
Sorts a by second field, in numerical order (so that 21 > 3)
"""
#decorate, sort, undecorate
if field is not None:
dsu = [[SList([line]).fields(field), line] for line in self]
else:
dsu = [[line, line] for line in self]
if nums:
for i in range(len(dsu)):
numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
try:
n = int(numstr)
except ValueError:
n = 0
dsu[i][0] = n
dsu.sort()
return type(self)([t[1] for t in dsu])
def indent(instr: str, nspaces: int = 4, ntabs: int = 0, flatten: bool = False) -> str:
"""Indent a string a given number of spaces or tabstops.
indent(str, nspaces=4, ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str : string indented by ntabs and nspaces.
"""
ind = "\t" * ntabs + " " * nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep+ind):
return outstr[:-len(ind)]
else:
return outstr
def list_strings(arg: Union[str, List[str]]) -> List[str]:
"""Always return a list of strings, given a string or list of strings
as input.
Examples
--------
::
In [7]: list_strings('A single string')
Out[7]: ['A single string']
In [8]: list_strings(['A single string in a list'])
Out[8]: ['A single string in a list']
In [9]: list_strings(['A','list','of','strings'])
Out[9]: ['A', 'list', 'of', 'strings']
"""
if isinstance(arg, str):
return [arg]
else:
return arg
def marquee(txt: str = "", width: int = 78, mark: str = "*") -> str:
"""Return the input string centered in a 'marquee'.
Examples
--------
::
In [16]: marquee('A test',40)
Out[16]: '**************** A test ****************'
In [17]: marquee('A test',40,'-')
Out[17]: '---------------- A test ----------------'
In [18]: marquee('A test',40,' ')
Out[18]: ' A test '
"""
if not txt:
return (mark*width)[:width]
nmark = (width-len(txt)-2)//len(mark)//2
if nmark < 0: nmark =0
marks = mark*nmark
return '%s %s %s' % (marks,txt,marks)
def format_screen(strng: str) -> str:
"""Format a string for screen printing.
This removes some latex-type format codes."""
# Paragraph continue
par_re = re.compile(r'\\$',re.MULTILINE)
strng = par_re.sub('',strng)
return strng
def dedent(text: str) -> str:
"""Equivalent of textwrap.dedent that ignores unindented first line.
This means it will still dedent strings like:
'''foo
is a bar
'''
For use in wrap_paragraphs.
"""
if text.startswith('\n'):
# text starts with blank line, don't ignore the first line
return textwrap.dedent(text)
# split first line
splits = text.split('\n',1)
if len(splits) == 1:
# only one line
return textwrap.dedent(text)
first, rest = splits
# dedent everything but the first line
rest = textwrap.dedent(rest)
return '\n'.join([first, rest])
def strip_email_quotes(text: str) -> str:
"""Strip leading email quotation characters ('>').
Removes any combination of leading '>' interspersed with whitespace that
appears *identically* in all lines of the input text.
Parameters
----------
text : str
Examples
--------
Simple uses::
In [2]: strip_email_quotes('> > text')
Out[2]: 'text'
In [3]: strip_email_quotes('> > text\\n> > more')
Out[3]: 'text\\nmore'
Note how only the common prefix that appears in all lines is stripped::
In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
Out[4]: '> text\\n> more\\nmore...'
So if any line has no quote marks ('>'), then none are stripped from any
of them ::
In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
Out[5]: '> > text\\n> > more\\nlast different'
"""
lines = text.splitlines()
strip_len = 0
for characters in zip(*lines):
# Check if all characters in this position are the same
if len(set(characters)) > 1:
break
prefix_char = characters[0]
if prefix_char in string.whitespace or prefix_char == ">":
strip_len += 1
else:
break
text = "\n".join([ln[strip_len:] for ln in lines])
return text
| SList |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 57205,
"end": 58874
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("sk_SK")
Faker.seed(0)
def test_name_male(self):
male_name = self.fake.name_male()
name_parts = male_name.split(" ")
first_name, last_name = "", ""
if len(name_parts) == 2:
first_name = name_parts[0]
last_name = name_parts[1]
elif len(name_parts) == 4:
first_name = name_parts[1]
last_name = name_parts[2]
elif len(name_parts) == 3:
if name_parts[-1] in SkSKProvider.suffixes:
first_name = name_parts[0]
last_name = name_parts[1]
else:
first_name = name_parts[1]
last_name = name_parts[2]
assert first_name in SkSKProvider.first_names_male
assert last_name in SkSKProvider.last_names_male
def test_name_female(self):
female_name = self.fake.name_female()
name_parts = female_name.split(" ")
first_name, last_name = "", ""
if len(name_parts) == 2:
first_name = name_parts[0]
last_name = name_parts[1]
elif len(name_parts) == 4:
first_name = name_parts[1]
last_name = name_parts[2]
elif len(name_parts) == 3:
if name_parts[-1] in SkSKProvider.suffixes:
first_name = name_parts[0]
last_name = name_parts[1]
else:
first_name = name_parts[1]
last_name = name_parts[2]
assert first_name in SkSKProvider.first_names_female
assert last_name in SkSKProvider.last_names_female
| TestSkSK |
python | getsentry__sentry | src/sentry/utils/locking/__init__.py | {
"start": 0,
"end": 97
} | class ____(Exception):
"""Exception raised when a lock cannot be acquired."""
| UnableToAcquireLock |
python | doocs__leetcode | solution/0700-0799/0793.Preimage Size of Factorial Zeroes Function/Solution.py | {
"start": 0,
"end": 272
} | class ____:
def preimageSizeFZF(self, k: int) -> int:
def f(x):
if x == 0:
return 0
return x // 5 + f(x // 5)
def g(k):
return bisect_left(range(5 * k), k, key=f)
return g(k + 1) - g(k)
| Solution |
python | mlflow__mlflow | mlflow/openai/_agent_tracer.py | {
"start": 2082,
"end": 9988
} | class ____(oai.TracingProcessor):
def __init__(
self,
project_name: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._span_id_to_mlflow_span: dict[str, SpanWithToken] = {}
def on_trace_start(self, trace: oai.Trace) -> None:
if (active_span := get_current_active_span()) and active_span.name == _AGENT_RUN_SPAN_NAME:
# The root span is already started by the _patched_agent_run
mlflow_span = active_span
token = None
else:
# Users create a trace using `agents.trace` in OpenAI Agent SDK
# Ref: ...
# We need to create a corresponding MLflow span to track the trace
mlflow_span = start_span_no_context(
name=trace.name,
span_type=SpanType.AGENT,
# TODO: Trace object doesn't contain input/output. Can we get it somehow?
inputs="",
attributes=trace.metadata,
)
token = set_span_in_context(mlflow_span)
# NB: Trace ID has different prefix as span ID so will not conflict
self._span_id_to_mlflow_span[trace.trace_id] = SpanWithToken(mlflow_span, token)
if trace.group_id:
# Group ID is used for grouping multiple agent executions together
mlflow_span.set_tag("group_id", trace.group_id)
def on_trace_end(self, trace: oai.Trace) -> None:
try:
st = self._span_id_to_mlflow_span.pop(trace.trace_id, None)
if st and st.token:
detach_span_from_context(st.token)
st.span.end(status=st.span.status, outputs="")
except Exception:
_logger.debug("Failed to end MLflow trace", exc_info=True)
def on_span_start(self, span: oai.Span[Any]) -> None:
try:
parent_st: SpanWithToken | None = self._span_id_to_mlflow_span.get(span.parent_id, None)
# Parent might be a trace
if not parent_st:
parent_st = self._span_id_to_mlflow_span.get(span.trace_id, None)
inputs, _, attributes = _parse_span_data(span.span_data)
span_type = _SPAN_TYPE_MAP.get(span.span_data.type, SpanType.CHAIN)
mlflow_span = start_span_no_context(
name=_get_span_name(span.span_data),
span_type=span_type,
parent_span=parent_st.span if parent_st else None,
inputs=inputs,
attributes=attributes,
)
token = set_span_in_context(mlflow_span)
if span_type == SpanType.CHAT_MODEL:
mlflow_span.set_attribute(SpanAttributeKey.MESSAGE_FORMAT, "openai-agent")
self._span_id_to_mlflow_span[span.span_id] = SpanWithToken(mlflow_span, token)
except Exception:
_logger.debug("Failed to start MLflow span", exc_info=True)
def on_span_end(self, span: oai.Span[Any]) -> None:
try:
# parsed_span_data = parse_spandata(span.span_data)
st: SpanWithToken | None = self._span_id_to_mlflow_span.pop(span.span_id, None)
detach_span_from_context(st.token)
mlflow_span = st.span
inputs, outputs, attributes = _parse_span_data(span.span_data)
mlflow_span.set_inputs(inputs)
mlflow_span.set_outputs(outputs)
mlflow_span.set_attributes(attributes)
if span.error:
status = SpanStatus(
status_code=SpanStatusCode.ERROR,
description=span.error["message"],
)
mlflow_span.add_event(
SpanEvent(
name="exception",
attributes={
"exception.message": span.error["message"],
"exception.type": "",
"exception.stacktrace": json.dumps(span.error["data"]),
},
)
)
else:
status = SpanStatusCode.OK
mlflow_span.end(status=status)
except Exception:
_logger.debug("Failed to end MLflow span", exc_info=True)
def force_flush(self) -> None:
# MLflow doesn't need flush but this method is required by the interface
pass
def shutdown(self) -> None:
self.force_flush()
def _get_span_name(span_data: oai.SpanData) -> str:
if hasattr(span_data, "name"):
return span_data.name
elif isinstance(span_data, oai.GenerationSpanData):
return "Generation"
elif isinstance(span_data, oai.ResponseSpanData):
return "Response"
elif isinstance(span_data, oai.HandoffSpanData):
return "Handoff"
else:
return "Unknown"
def _parse_span_data(span_data: oai.SpanData) -> tuple[Any, Any, dict[str, Any]]:
inputs = None
outputs = None
attributes = {}
if span_data.type == OpenAISpanType.AGENT:
attributes = {
"handoffs": span_data.handoffs,
"tools": span_data.tools,
"output_type": span_data.output_type,
}
outputs = {"output_type": span_data.output_type}
elif span_data.type == OpenAISpanType.FUNCTION:
try:
inputs = json.loads(span_data.input)
except Exception:
inputs = span_data.input
outputs = span_data.output
elif span_data.type == OpenAISpanType.GENERATION:
inputs = span_data.input
outputs = span_data.output
attributes = {
"model": span_data.model,
"model_config": span_data.model_config,
"usage": span_data.usage,
}
elif span_data.type == OpenAISpanType.RESPONSE:
inputs, outputs, attributes = _parse_response_span_data(span_data)
elif span_data.type == OpenAISpanType.HANDOFF:
inputs = {"from_agent": span_data.from_agent}
outputs = {"to_agent": span_data.to_agent}
elif span_data.type == OpenAISpanType.CUSTOM:
outputs = span_data.data
elif span_data.type == OpenAISpanType.GUARDRAIL:
outputs = {"triggered": span_data.triggered}
return inputs, outputs, attributes
def _parse_response_span_data(span_data: oai.ResponseSpanData) -> tuple[Any, Any, dict[str, Any]]:
inputs = span_data.input
response = span_data.response
response_dict = response.model_dump() if response else {}
outputs = response_dict.get("output")
attributes = {k: v for k, v in response_dict.items() if k != "output"}
# Extract chat tools
chat_tools = []
for tool in response_dict.get("tools", []):
try:
tool = ChatTool(
type="function",
function=FunctionToolDefinition(
name=tool["name"],
description=tool.get("description"),
parameters=tool.get("parameters"),
strict=tool.get("strict"),
),
)
chat_tools.append(tool)
except Exception as e:
_logger.debug(f"Failed to parse chat tool: {tool}. Error: {e}")
if chat_tools:
attributes[SpanAttributeKey.CHAT_TOOLS] = chat_tools
return inputs, outputs, attributes
async def _patched_agent_run(original, self, *args, **kwargs):
inputs = construct_full_inputs(original, self, *args, **kwargs)
attributes = {k: v for k, v in inputs.items() if k not in ("starting_agent", "input")}
with start_span(
name=_AGENT_RUN_SPAN_NAME,
span_type=SpanType.AGENT,
attributes=attributes,
) as span:
span.set_inputs(inputs.get("input"))
result = await original(self, *args, **kwargs)
span.set_outputs(result.final_output)
return result
| MlflowOpenAgentTracingProcessor |
python | openai__openai-python | src/openai/types/responses/response_completed_event.py | {
"start": 230,
"end": 517
} | class ____(BaseModel):
response: Response
"""Properties of the completed response."""
sequence_number: int
"""The sequence number for this event."""
type: Literal["response.completed"]
"""The type of the event. Always `response.completed`."""
| ResponseCompletedEvent |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instatus/components.py | {
"start": 1698,
"end": 4219
} | class ____(SubstreamPartitionRouter):
"""
UpdatesSubstreamPartitionRouter iterates over the list of id to create a correct stream slices.
In case we need to make request from parent stream with list of object by their ids we need to use
a ListAddFields transformer class -> put oll object ids in custom list field -> UpdatesSubstreamPartitionRouter puts every
id from that list to slices.
"""
parent_stream_configs: List[ParentStreamConfig]
parameters: InitVar[Mapping[str, Any]]
def stream_slices(self) -> Iterable[StreamSlice]:
if not self.parent_stream_configs:
yield from []
else:
for parent_stream_config in self.parent_stream_configs:
parent_stream = parent_stream_config.stream
parent_field = parent_stream_config.parent_key.eval(self.config)
partition_field = parent_stream_config.partition_field.eval(self.config)
for parent_stream_slice in parent_stream.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=None, stream_state=None
):
empty_parent_slice = True
parent_slice = parent_stream_slice
for parent_record in parent_stream.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=None, stream_slice=parent_stream_slice, stream_state=None
):
# Skip non-records (eg AirbyteLogMessage)
if isinstance(parent_record, AirbyteMessage):
if parent_record.type == Type.RECORD:
parent_record = parent_record.record.data
else:
continue
empty_parent_slice = False
stream_state_values = parent_record.get(parent_field)
updates_object_id = parent_record.get("id")
for stream_state_value in stream_state_values:
yield StreamSlice(
partition={partition_field: stream_state_value, "parent_slice": parent_slice},
cursor_slice={"updates_object_id": updates_object_id},
)
# If the parent slice contains no records,
if empty_parent_slice:
yield from []
| UpdatesSubstreamPartitionRouter |
python | pytorch__pytorch | torch/_dynamo/guards.py | {
"start": 143477,
"end": 188040
} | class ____:
def __init__(
self,
f_code: types.CodeType,
output_graph: OutputGraphCommon,
cache_entry: Optional[CacheEntry] = None,
guard_fail_fn: Optional[Callable[[GuardFail], None]] = None,
guard_filter_fn: Optional[
Callable[[list[GuardFilterEntry]], list[bool]]
] = None,
shape_code_parts: Optional[ShapeCodeParts] = None,
runtime_global_scope: Optional[dict[str, Any]] = None,
save_guards: bool = False,
strict_error: bool = False,
source_get_cache: Optional[dict[str, Any]] = None,
):
guards = output_graph.guards if output_graph else None
self._weakrefs: dict[int, ReferenceType[object]] = {}
existing_diff_guard_sources = (
update_diff_guard_managers_for_existing_cache_entries(cache_entry)
)
self.output_graph: Optional[OutputGraphCommon] = output_graph
assert self.output_graph is not None
# Only used for serialization.
self.shape_code_parts = shape_code_parts
# NB: Until we trace device contexts, we need to use the stack recorded at the beginning of tracing
# in case a set default device call was made in the graph.
self.torch_function_mode_stack = (
output_graph.torch_function_mode_stack if output_graph else None
)
self.used_builtin_vars: OrderedSet[str] = OrderedSet()
self.additional_used_local_vars: OrderedSet[str] = OrderedSet()
self.additional_used_global_vars: OrderedSet[str] = OrderedSet()
self.runtime_global_scope = runtime_global_scope
self.global_state: Optional[torch._C._dynamo.guards.GlobalStateGuard] = None
self.torch_function_mode_stack_check_fn: Optional[Callable[[], bool]] = None
if not justknobs_check("pytorch/compiler:guard_nn_modules"):
log.warning("guard_nn_modules is turned off using justknobs killswitch")
# TODO Be more explicit about the behavior for the users.
if torch._dynamo.config.caching_precompile:
_guard_filter_fn = guard_filter_fn or (lambda gs: [True for g in gs])
def guard_filter_fn(guards: list[GuardFilterEntry]) -> list[bool]:
ret = []
for keep, g in zip(_guard_filter_fn(guards), guards):
if not keep:
ret.append(False)
elif (
g.guard_type
in (
"ID_MATCH",
"CLOSURE_MATCH",
"WEAKREF_ALIVE",
"DICT_VERSION",
)
or "ID_MATCH" in g.derived_guard_types
or "DICT_VERSION" in g.derived_guard_types
):
log.warning(
"%s guard on %s is dropped with caching_precompile=True.",
g.guard_type,
g.orig_guard.name,
)
ret.append(False)
else:
ret.append(True)
return ret
sorted_guards = sorted(guards or (), key=Guard.sort_key)
if guard_filter_fn:
# If we're filtering guards, we need to build it an extra time first
# because filtering depends on the builder/guard_manager results
builder, guard_manager = self.build_guards(
sorted_guards,
existing_diff_guard_sources,
f_code,
output_graph,
False,
source_get_cache=source_get_cache,
)
def make_guard_filter_entry(guard: Guard) -> GuardFilterEntry:
MISSING = object()
name = strip_local_scope(guard.name)
if name == "":
has_value = False
value = MISSING
else:
try:
# Guard evaluation is expected to fail when we guard on
# things like "not hasattr(x, 'foo')". In cases like this,
# we don't have a well defined value because such thing
# doesn't exist.
value = builder.get(guard.name)
has_value = True
except: # noqa: B001,E722
value = MISSING
has_value = False
is_global = get_global_source_name(guard.originating_source) is not None
return GuardFilterEntry(
name=name,
has_value=has_value,
value=value,
guard_type=guard.create_fn_name(),
derived_guard_types=(
tuple(guard.guard_types) if guard.guard_types else ()
),
is_global=is_global,
orig_guard=guard,
)
filter_results = guard_filter_fn(
[make_guard_filter_entry(guard) for guard in sorted_guards]
)
assert len(filter_results) == len(sorted_guards)
assert all(type(x) is bool for x in filter_results)
sorted_guards = [
guard for i, guard in enumerate(sorted_guards) if filter_results[i]
]
# Redo the guards because filtering relies on the results from the last guard builder.
builder, guard_manager = self.build_guards(
sorted_guards,
existing_diff_guard_sources,
f_code,
output_graph,
save_guards,
source_get_cache=source_get_cache,
)
self.guard_manager = guard_manager
self.compile_check_fn(builder, sorted_guards, guard_fail_fn)
# Keep track of weak references of objects with ID_MATCH guard. This
# info is stored alongside optimized_code and guard_manager and is used to
# limit the number of cache entries with same ID_MATCH'd object.
# TODO(anijain2305) - Currently this information is stored as an attr on
# the guard_manager itself to avoid changing CacheEntry data structure in
# eval_frame.c. In future, we should probably replace guard_manager with a
# queryable data structure such that this information is already present
# in some form.
self.guard_manager.id_matched_objs = builder.id_matched_objs
guards_log.debug("%s", self.guard_manager)
self.guard_manager.id_matched_objs = builder.id_matched_objs
# Check that the guard returns True. False means that we will always
# recompile.
# TODO(anijain2305, ydwu4) - Skipping export because of following test
# python -s test/dynamo/test_export.py -k test_export_with_symbool_inputs
latency = 0.0
if not output_graph.skip_guards_check and not output_graph.export:
if not self.guard_manager.check(output_graph.local_scope):
reasons = get_guard_fail_reason_helper(
self.guard_manager,
output_graph.local_scope,
CompileContext.current_compile_id(),
backend=None, # no need to set this because we are trying to find the offending guard entry
)
raise AssertionError(
"Guard failed on the same frame it was created. This is a bug - please create an issue."
f"Guard fail reason: {reasons}"
)
if guard_manager_testing_hook_fn is not None:
guard_manager_testing_hook_fn(
self.guard_manager, output_graph.local_scope, builder
)
# NB for developers: n_iters is chosen to be 1 to prevent excessive
# increase in compile time. We first do a cache flush to measure the
# guard latency more accurately. This cache flush is expensive.
# Note - If you are working on a guard optimization, it might be a
# good idea to increase this number for more stability during
# development.
latency = profile_guard_manager(
self.guard_manager.root, output_graph.local_scope, 1
)
guards_log.debug("Guard eval latency = %s us", f"{latency:.2f}")
# Note: We use `increment_toplevel` instead of `compilation_metric`
# here. This is because, in scenarios where `torch._dynamo.reset`
# is invoked, the same frame ID and compile ID may be reused during
# a new compilation cycle. This behavior causes issues with
# `compilation_metric`, as it expects the metric field to be empty.
# Ideally, we would overwrite the existing entry in such cases, but
# we currently lack an API to support overwriting metrics. However,
# since these situations are rare and typically impractical to
# account for, we simply increment at the toplevel instead.
CompileEventLogger.increment_toplevel("guard_latency_us", int(latency))
self.guards_state: Optional[bytes] = None
if save_guards:
from torch._dynamo.output_graph import OutputGraphCommon
assert isinstance(self.output_graph, OutputGraphCommon)
try:
self.guards_state = self.serialize_guards(
builder, sorted_guards, self.output_graph
)
except exc.PackageError as e:
if torch._dynamo.config.strict_precompile or strict_error:
raise e
self.output_graph.bypass_package(
f"Guard evaluation failed: {str(e)}",
traceback=traceback.format_exc().split("\n"),
)
# TODO: don't do the string rep, do something more structured here
torch._logging.trace_structured(
"dynamo_cpp_guards_str",
payload_fn=lambda: f"{self.guard_manager}\nGuard latency = {latency:.2f} us",
)
# NB - We have to very careful of cleaning up here. Because of the
# invalidate function, we can create a weakref finalizer that keeps
# `self` alive for very long. Sometimes by mistake, we can run
# invalidate for a type/object (check id_ref method) that Python can
# leak by design, preventing us from calling the finalizer. In that
# case, the `self` will be alive even though the cache entry will be
# deleted (check invalidate method), which can cause a memory leak,
# e.g., not setting output_graph = None can keep hold of nn_modules.
self._weakrefs.clear()
self.output_graph = None
UNSUPPORTED_SERIALIZATION_GUARD_TYPES: tuple[LiteralString, ...] = (
"DICT_VERSION",
"NN_MODULE",
"ID_MATCH",
"FUNCTION_MATCH",
"CLASS_MATCH",
"MODULE_MATCH",
"CLOSURE_MATCH",
"WEAKREF_ALIVE",
)
def serialize_guards(
self,
builder: GuardBuilder,
sorted_guards: list[Guard],
output_graph: OutputGraphCommon,
) -> bytes:
# We check whether our list of guards are serializable here
for guard in sorted_guards:
guard_type = guard.create_fn_name()
derived_guard_types = tuple(guard.guard_types) if guard.guard_types else ()
# BUILTIN_MATCH calls TYPE_MATCH sometimes, so we need to check both for
# a chance that the guard is unserializable
if guard_type in ("TYPE_MATCH", "BUILTIN_MATCH"):
if guard._unserializable:
# Only call builder.get again if we know we're going to throw
obj = builder.get(guard.name)
raise_local_type_error(obj)
elif (
guard_type in CheckFunctionManager.UNSUPPORTED_SERIALIZATION_GUARD_TYPES
):
raise torch._dynamo.exc.PackageError(
f"{guard_type} guard cannot be serialized."
)
elif failed := next(
(
i
for i in derived_guard_types
if i in CheckFunctionManager.UNSUPPORTED_SERIALIZATION_GUARD_TYPES
),
None,
):
# Just raise the first failed guard name
raise torch._dynamo.exc.PackageError(
f"{failed} guard cannot be serialized."
)
builtins_dict_name = output_graph.name_of_builtins_dict_key_in_fglobals or ""
used_global_vars = set()
used_local_vars = set()
def prune_variable(source: Source) -> None:
if name := get_global_source_name(source):
assert isinstance(name, str)
# Leave out the builtins dict key, as we will special handle
# it later because the guarded code rarely use the entire
# builtin dict in the common case.
if name != builtins_dict_name:
used_global_vars.add(name)
elif name := get_local_source_name(source):
assert isinstance(name, str)
used_local_vars.add(name)
output_graph_guards_state = output_graph.dump_guards_state()
# Only serialize the global variables that are actually used in guards.
for guard in sorted_guards:
if isinstance(guard.originating_source, ShapeEnvSource):
assert self.shape_code_parts
for source in self.shape_code_parts.shape_env_sources:
prune_variable(source)
else:
prune_variable(guard.originating_source)
for source in output_graph.guard_on_key_order:
prune_variable(source)
def normalize_create_fn(x: Callable[..., None]) -> Callable[..., None]:
if isinstance(x, functools.partial):
def _ref(x: Any) -> Any:
if isinstance(x, (TensorWeakRef, weakref.ref)):
return x()
return x
new_args = tuple(_ref(a) for a in x.args)
new_keywords = {k: _ref(v) for k, v in x.keywords.items()}
return functools.partial(x.func, *new_args, **new_keywords)
return x
global_scope_state = {
k: v
for k, v in output_graph_guards_state.global_scope.items()
if k in used_global_vars or k in self.additional_used_global_vars
}
global_scope_state[builtins_dict_name] = {
k: v
for k, v in output_graph_guards_state.global_scope[
builtins_dict_name
].items() # type: ignore[attr-defined]
if k in self.used_builtin_vars
}
output_graph_guards_state = dataclasses.replace(
output_graph_guards_state,
local_scope={
k: v
for k, v in output_graph_guards_state.local_scope.items()
if k in used_local_vars or k in self.additional_used_local_vars
},
global_scope=global_scope_state,
_guards=torch._guards.GuardsSet(
{
dataclasses.replace(
guard,
obj_weakref=None,
guarded_class_weakref=None,
create_fn=normalize_create_fn(guard.create_fn),
)
for guard in sorted_guards
}
),
input_source_to_sizes_strides=pytree.tree_map(
convert_int_to_concrete_values,
output_graph_guards_state.input_source_to_sizes_strides,
),
skip_guards_check=True,
)
guards_state = GuardsState(
output_graph=output_graph_guards_state,
shape_code_parts=self.shape_code_parts,
source_get_cache=builder.source_get_cache,
)
return pickle_guards_state(guards_state, builder.guard_tree_values)
def build_guards(
self,
sorted_guards: list[Guard],
existing_diff_guard_sources: OrderedSet[str],
f_code: types.CodeType,
output_graph: OutputGraphGuardsState,
save_guards: bool,
source_get_cache: Optional[dict[str, Any]] = None,
) -> tuple[GuardBuilder, GuardManagerWrapper]:
guard_manager = GuardManagerWrapper()
guard_manager.diff_guard_sources = existing_diff_guard_sources
w_builder = None
def source_ref(source: Source) -> str:
guard_source = source.guard_source()
if guard_source is GuardSource.CONSTANT:
# No need to track constants
return source.name()
assert w_builder
r_builder = w_builder()
assert r_builder is not None
return r_builder.arg_ref(source.name())
builder = GuardBuilder(
f_code,
self.id_ref,
source_ref,
self.lookup_weakrefs,
output_graph.local_scope,
output_graph.global_scope,
guard_manager,
self,
save_guards,
runtime_global_scope=self.runtime_global_scope,
source_get_cache=source_get_cache,
)
# Break retain cycle. See test_release_scope_memory
def cleanup_builder(weak_b: weakref.ref[GuardBuilder]) -> None:
b = weak_b()
if b:
b.scope = None # type: ignore[assignment]
# Break retain cycle. See test_release_input_memory
w_builder = weakref.ref(builder, cleanup_builder)
guard_on_nn_modules = config.guard_nn_modules and justknobs_check(
"pytorch/compiler:guard_nn_modules"
)
for guard in sorted_guards:
if (
not guard_on_nn_modules
and guard.is_specialized_nn_module()
# Default func args must be guarded on.
# TODO: we could make use of 'DefaultsSource' and offer a .guard.is_defaults() API
and "__defaults__" not in guard.name
and "__kwdefaults__" not in guard.name
and (config.skip_nnmodule_hook_guards or "hooks" not in guard.name)
):
continue
guard.create(builder)
return builder, guard_manager
def compile_check_fn(
self,
builder: GuardBuilder,
guards_out: list[Guard],
guard_fail_fn: Optional[Callable[[GuardFail], None]],
) -> None:
# see parallel handling of ".0" / "___implicit0" in _eval_frame.c
largs = builder.argnames
largs += ["**___kwargs_ignored"]
guards_log.debug("GUARDS:")
code_parts = []
verbose_code_parts = []
structured_guard_fns: list[Callable[[], dict[str, Any]]] = []
# Add compile id info in the guard manager for debugging purpose
self.guard_manager.root.attach_compile_id(
str(CompileContext.current_compile_id())
)
# Clear references to torch_function modes held in the list
self.torch_function_mode_stack = None
def add_code_part(
code_part: str, guard: Optional[Guard], log_only: bool = False
) -> None:
verbose_code_part = get_verbose_code_part(code_part, guard)
guards_log.debug("%s", verbose_code_part)
structured_guard_fns.append(
lambda: {
"code": code_part,
"stack": (
structured.from_traceback(guard.stack.summary())
if guard and guard.stack
else None
),
"user_stack": (
structured.from_traceback(guard.user_stack)
if guard and guard.user_stack
else None
),
}
)
if verbose_guards_log.isEnabledFor(logging.DEBUG):
maybe_stack = ""
maybe_user_stack = ""
if guard is not None:
if guard.stack:
maybe_stack = f"\nStack:\n{''.join(guard.stack.format())}"
if guard.user_stack:
maybe_user_stack = (
f"\nUser stack:\n{''.join(guard.user_stack.format())}"
)
verbose_guards_log.debug(
"Guard: %s%s%s",
code_part,
maybe_stack,
maybe_user_stack,
)
if not log_only:
code_parts.append(code_part)
verbose_code_parts.append(verbose_code_part)
seen = set()
for gcl in builder.code:
for code in gcl.code_list:
if code not in seen:
# If Cpp guard manager is enabled, we don't need to add to
# code_parts.
add_code_part(code, gcl.guard, True)
seen.add(code)
no_tensor_aliasing_names = builder.no_tensor_aliasing_names
check_tensors_fn = None
check_tensors_verbose_fn = None
if len(no_tensor_aliasing_names) > 1:
# Install tensor aliasing guard. TENSOR_MATCH guards are already
# installed for cpp guard manager.
install_no_tensor_aliasing_guard(
builder.no_tensor_aliasing_guard_managers,
no_tensor_aliasing_names,
["check_no_aliasing(" + ", ".join(no_tensor_aliasing_names) + ")"],
)
# Note - On Lambda guarding of object aliasing
# We previously installed object-aliasing guards as relational guards,
# but that undermined the recursive-dict guard optimization: placing the
# aliasing guard at a leaf prevented the parent dict node from
# qualifying as a recursive-dict guard root. Because aliasing guards are
# rare, we now emit them as epilogue guards via a small Python lambda.
# This repeats the access in Python—adding a bit of work—but the
# overhead is outweighed by the gains from enabling recursive-dict guard
# optimization.
if (
config.use_lamba_guard_for_object_aliasing
and builder.object_aliasing_guard_codes
):
aliasing_code_parts, aliasing_verbose_code_parts = map(
list, zip(*builder.object_aliasing_guard_codes)
)
builder.add_python_lambda_leaf_guard_to_root(
aliasing_code_parts, aliasing_verbose_code_parts
)
aotautograd_guards: list[GuardEnvExpr] = (
self.output_graph.aotautograd_guards if self.output_graph else []
)
# TODO(anijain2305) - There is a duplicate logic in Dynamo to find
# aliased input tensors. So most probably we don't need this here.
# Revisit.
for guard in aotautograd_guards:
if isinstance(guard, DuplicateInputs):
source_a = guard.input_source_a
source_b = guard.input_source_b
code_part = f"{source_a.name()} is {source_b.name()}"
install_object_aliasing_guard(
builder.get_guard_manager_from_source(source_a),
builder.get_guard_manager_from_source(source_b),
[code_part],
)
add_code_part(code_part, None, True)
elif isinstance(guard, StorageOverlap):
overlapping_guard_managers = [
builder.get_guard_manager_from_source(s)
for s in guard.overlapping_sources
]
non_overlapping_guard_managers = [
builder.get_guard_manager_from_source(s)
for s in guard.non_overlapping_sources
]
code_part = (
"""check_overlapping("""
f"""overlapping=[{", ".join(s.name() for s in guard.overlapping_sources)}], """
f"""non_overlapping=[{", ".join(s.name() for s in guard.non_overlapping_sources)}])"""
)
install_storage_overlapping_guard(
overlapping_guard_managers,
non_overlapping_guard_managers,
[code_part],
)
add_code_part(code_part, None, True)
else:
raise RuntimeError(f"Unknown GuardEnvExpr: {guard}")
# TODO: the "guard" here is actually just the top level SHAPE_ENV
# which is useless. Get ShapeEnv to pass in more provenance.
for gcl in builder.shape_env_code:
for code in gcl.code_list:
# Shape env guards are already added for CPP guard manager in
# SHAPE_ENV implementation.
add_code_part(code, gcl.guard, True)
# OK, all done generating guards
if structured_guard_fns:
torch._logging.trace_structured(
"dynamo_guards", payload_fn=lambda: [f() for f in structured_guard_fns]
)
if convert_frame.initial_global_state is None:
# we should only hit this case in NopTests()
check_global_state = convert_frame.GlobalStateGuard().check
else:
check_global_state = getattr(self.global_state, "check", None)
closure_vars = {
"___check_tensors": check_tensors_fn,
"___check_tensors_verbose": check_tensors_verbose_fn,
"___check_global_state": check_global_state,
"___check_torch_function_mode_stack": self.torch_function_mode_stack_check_fn,
**SYMPY_INTERP,
**_get_closure_vars(),
}
self.guard_manager.finalize()
globals_for_guard_fn = {"G": builder.scope["G"]}
# Guard manager construction is complete. Ensure we did not miss to
# insert a guard in cpp guard manager.
assert len(code_parts) == 0
self.guard_manager.closure_vars = closure_vars
self.guard_manager.args = largs
self.guard_manager.populate_code_parts_for_debugging()
self.guard_manager.verbose_code_parts = verbose_code_parts
# Grab only G, but preserve "G" because guards access it as "G"
self.guard_manager.global_scope = globals_for_guard_fn
self.guard_manager.guard_fail_fn = guard_fail_fn
# will be populated by a non-owning reference to CacheEntry/ExtraState
# when the CacheEntry is constructed
self.guard_manager.cache_entry = None
self.guard_manager.extra_state = None
self.guard_manager.no_tensor_aliasing_sources = no_tensor_aliasing_names
def invalidate(self, obj_str: str) -> None:
# Some tests reveal that CheckFunctionManager has no attribute
# guard_manager, but this case should not be of any concern.
# This case doesn't seem easy to repro.
if (
hasattr(self, "guard_manager")
and not isinstance(self.guard_manager, DeletedGuardManagerWrapper)
and (cache_entry := self.guard_manager.cache_entry) is not None
and (extra_state := self.guard_manager.extra_state) is not None
):
assert isinstance(cache_entry, CacheEntry)
assert isinstance(extra_state, ExtraState)
reason = f"Cache line invalidated because {obj_str} got deallocated"
deleted_guard_manager = DeletedGuardManagerWrapper(reason)
extra_state.invalidate(cache_entry, deleted_guard_manager)
self.guard_manager = deleted_guard_manager
def id_ref(self, obj: object, obj_str: str) -> int:
"""add a weakref, return the id"""
try:
if id(obj) not in self._weakrefs:
# We will clear the _weakrefs dict at the end of __init__
# function, which will delete the callbacks as well. Therefore,
# we are using a finalizer which is kept alive.
self._weakrefs[id(obj)] = weakref.ref(obj)
weakref.finalize(
obj, functools.partial(self.invalidate, obj_str=obj_str)
)
except TypeError:
pass # cannot weakref bool object
return id(obj)
def lookup_weakrefs(self, obj: object) -> Optional[weakref.ref[object]]:
"""Lookup the _weakrefs created in id_ref function for ID_MATCH'd objects"""
if id(obj) in self._weakrefs:
return self._weakrefs[id(obj)]
return None
def build_guard_function(code_parts: list[str], closure_args: str) -> tuple[str, str]:
from torch._inductor.utils import IndentedBuffer
csepass = PyExprCSEPass()
try:
csepass.count(code_parts)
def replace(expr: str) -> tuple[list[str], str]:
return csepass.replace(expr)
except RecursionError:
# If we hit recursion limits during CSE analysis, fall back to a no-op replace function
# This can happen with extremely complex guard expressions
def replace(expr: str) -> tuple[list[str], str]:
return [], expr
# Generate the inner body of the guard function.
# i.e. if-chain of the guard expressions.
guard_body = IndentedBuffer()
for expr in code_parts:
preface, expr = replace(expr)
guard_body.writelines(preface)
guard_body.writeline(f"if not ({expr}):")
with guard_body.indent():
guard_body.writeline("return False")
# Wrap the inner body into the actual guard function.
guard = IndentedBuffer()
guard.writeline("def guard(L):")
with guard.indent():
guard.splice(guard_body)
guard.writeline("return True")
# Wrap the whole guard function into another function
# with the closure variables.
make_guard_fn = IndentedBuffer()
make_guard_fn.writeline(f"def ___make_guard_fn({closure_args}):")
with make_guard_fn.indent():
make_guard_fn.splice(guard)
make_guard_fn.writeline("return guard")
return guard_body.getvalue(), make_guard_fn.getvalue()
def is_recompiles_enabled() -> bool:
return torch._logging._internal.log_state.is_artifact_enabled("recompiles")
def is_recompiles_verbose_enabled() -> bool:
return torch._logging._internal.log_state.is_artifact_enabled("recompiles_verbose")
# this will only be used if cpp guards are disabled
def make_torch_function_mode_stack_guard(
initial_stack: list[torch.overrides.TorchFunctionMode],
) -> Callable[[], bool]:
types = [type(x) for x in initial_stack]
def check_torch_function_mode_stack() -> bool:
cur_stack = get_torch_function_mode_stack()
if len(cur_stack) != len(types):
return False
for ty, mode in zip(types, cur_stack):
if ty is not type(mode):
return False
return True
return check_torch_function_mode_stack
Scope = TypeAliasType("Scope", dict[str, object])
def recompilation_reason_for_no_tensor_aliasing_guard(
guard_manager: GuardManagerWrapper, scope: Scope
) -> list[str]:
assert guard_manager.global_scope is not None
global_scope = dict(guard_manager.global_scope)
ids_to_source = collections.defaultdict(list)
for tensor_source in guard_manager.no_tensor_aliasing_sources:
global_scope["__compile_source__"] = tensor_source
tensor_id = id(eval(tensor_source, global_scope, scope))
ids_to_source[tensor_id].append(tensor_source)
duplicate_tensors = [
f"{ids_to_source[key]}" for key in ids_to_source if len(ids_to_source[key]) > 1
]
reason = ", ".join(duplicate_tensors)
return [f"Duplicate tensors found: {reason}"]
def strip_local_scope(s: str) -> str:
"""
Replace occurrences of L[...] with just the inner content.
Handles both single and double quotes.
This is to generate user friendly recompilation messages.
"""
import re
pattern = r"L\[\s*['\"](.*?)['\"]\s*\]"
return re.sub(pattern, r"\1", s)
def get_guard_fail_reason_helper(
guard_manager: GuardManagerWrapper,
f_locals: dict[str, object],
compile_id: Optional[CompileId],
backend: Optional[Callable],
) -> str:
"""
Return the reason why `guard_manager` failed.
Updates `guard_failures` with the generated reason.
Only the first failed check of guard_manager is reported.
"""
assert guard_manager.global_scope is not None
assert guard_manager.closure_vars is not None
scope = {"L": f_locals, "G": guard_manager.global_scope["G"]}
scope.update(guard_manager.closure_vars)
reasons: list[str] = []
cache_entry_backend = None
if guard_manager.cache_entry:
cache_entry_backend = guard_manager.cache_entry.backend
no_tensor_aliasing_check_failed = False
verbose_code_parts: list[str] = []
guard_debug_info = guard_manager.check_verbose(f_locals)
# For test_export_with_map_cond, the check_verbose fail even without the
# C++ guard manager. We need to fix the issue to remove the comment.
# assert not guard_debug_info.result
if not guard_debug_info.result:
verbose_code_parts = guard_debug_info.verbose_code_parts
# verbose_code_parts is either the actual reason (e.g. in case of
# TENSOR_MATCH) or it could be a list of verbose_code_part that we
# passed to the leaf guard at construction time. If its a list, we
# walk through this list and find the guard that failed. This is
# very important for symbolic shape guards which are currently
# installed as a lambda guard and can encompass a long list of code_parts.
if len(verbose_code_parts) == 1:
if "Duplicate tensor found" in verbose_code_parts[0]:
no_tensor_aliasing_check_failed = True
else:
reasons = verbose_code_parts
verbose_code_parts = []
elif cache_entry_backend != backend:
# None of the guard entries failed - a backend match issue
reason = (
"BACKEND_MATCH failure: torch.compile detected different backend callables."
" If this is unexpected, wrap your backend in functools.partial (or reuse the"
" same cached backend) to avoid creating a new backend function each time."
" More details: https://github.com/pytorch/pytorch/issues/168373"
)
reasons.append(reason)
else:
# Unexpected recompilation - points to a bug
reason = (
"Unexpected recompilation: runtime guards failed even though they passed"
" during recompilation-reason analysis."
" Please open an issue with a minimal repro:"
" https://github.com/pytorch/pytorch"
)
reasons.append(reason)
if no_tensor_aliasing_check_failed:
reasons = recompilation_reason_for_no_tensor_aliasing_guard(
guard_manager, scope
)
else:
for part in verbose_code_parts:
global_scope = dict(guard_manager.global_scope)
global_scope["__compile_source__"] = part
with report_compile_source_on_error():
try:
fail_reason = eval(part, global_scope, scope)
except Exception:
if is_recompiles_verbose_enabled():
continue
else:
raise
# Only ___check_tensors knows how to return a fancy fail reason;
# for everything else we just report the code that failed
if isinstance(fail_reason, bool) and not fail_reason:
fail_reason = part
if isinstance(fail_reason, str):
reasons.append(fail_reason)
if not is_recompiles_verbose_enabled():
break
reason_str = f"{compile_id}: " + "; ".join(reasons)
return strip_local_scope(reason_str)
def get_guard_fail_reason(
guard_manager: GuardManagerWrapper,
code: types.CodeType,
f_locals: dict[str, object],
compile_id: CompileId,
backend: Callable,
skip_logging: bool = False,
) -> str:
if isinstance(guard_manager, DeletedGuardManagerWrapper):
return f"{compile_id}: {guard_manager.invalidation_reason}"
reason_str = get_guard_fail_reason_helper(
guard_manager, f_locals, compile_id, backend
)
if skip_logging:
return reason_str
guard_failures[orig_code_map[code]].append(reason_str)
try:
if guard_manager.guard_fail_fn is not None:
guard_manager.guard_fail_fn(
GuardFail(reason_str or "unknown reason", orig_code_map[code])
)
except Exception:
log.exception(
"Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval",
)
return reason_str
def get_and_maybe_log_recompilation_reasons(
cache_entry: Optional[CacheEntry],
frame: DynamoFrameType,
backend: Callable,
skip_logging: bool = False,
) -> list[str]:
"""
Return the list of guard failure reasons using cache_entry.
Logs the recompilation reason if `recompiles` logging is enabled.
Raises a RecompileError if `config.error_on_recompile` is enabled.
"""
reasons = []
while cache_entry is not None:
reason = get_guard_fail_reason(
cache_entry.guard_manager,
cache_entry.code,
frame.f_locals,
cache_entry.compile_id,
backend,
skip_logging,
)
if reason:
reasons.append(reason)
cache_entry = cache_entry.next
code = frame.f_code
if skip_logging:
return reasons
# at least one of "recompiles" or "recompiles_verbose" is enabled
do_recompiles_log = is_recompiles_enabled() or is_recompiles_verbose_enabled()
if do_recompiles_log or config.error_on_recompile:
if is_recompiles_verbose_enabled():
failures = "\n\n".join(
f"guard {i} failures:\n" + textwrap.indent(reason, "- ")
for i, reason in enumerate(reasons)
)
else:
failures = textwrap.indent("\n".join(reasons), "- ")
guard_failure_details = (
f"triggered by the following guard failure(s):\n{failures}"
)
message = (
f"Recompiling function {code.co_name} in {code.co_filename}:{code.co_firstlineno}\n"
f"{textwrap.indent(guard_failure_details, ' ')}"
)
if do_recompiles_log:
if is_recompiles_verbose_enabled():
recompiles_verbose_log.debug(message)
else:
recompiles_log.debug(message)
if config.error_on_recompile:
raise exc.RecompileError(message)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "recompile_reasons",
"encoding": "json",
},
payload_fn=lambda: reasons,
)
return reasons
def update_diff_guard_managers_for_existing_cache_entries(
cache_entry: Optional[CacheEntry],
) -> OrderedSet[str]:
first_cache_entry = cache_entry
# On the first pass, go through the cache entries and accumulate the diff
# guard sources. Different guard managers can fail with different sources.
# So, we collect all of them first.
acc_diff_guard_sources: OrderedSet[str] = OrderedSet()
while cache_entry is not None:
acc_diff_guard_sources.update(
cache_entry.guard_manager.collect_diff_guard_sources()
)
cache_entry = cache_entry.next # type: ignore[assignment]
# On the second pass, set the diff_guard_sources for each cache line to the
# accumulated value. And the re-populate the diff guard manager.
cache_entry = first_cache_entry
while cache_entry is not None:
cache_entry.guard_manager.diff_guard_sources = acc_diff_guard_sources
cache_entry.guard_manager.populate_diff_guard_manager()
cache_entry = cache_entry.next # type: ignore[assignment]
# return the accumulated sources to set up the new cache line.
return acc_diff_guard_sources
def guard_error_hook(
guard_manager: GuardFn,
code: types.CodeType,
f_locals: dict[str, object],
index: int,
last: bool,
) -> None:
print(
f"ERROR RUNNING GUARDS {code.co_name} {code.co_filename}:{code.co_firstlineno}"
)
print("lambda " + ", ".join(guard_manager.args) + ":")
print(" ", " and\n ".join(guard_manager.code_parts))
print(guard_manager)
local_scope = {"L": f_locals, **guard_manager.closure_vars}
for guard in guard_manager.code_parts:
try:
eval(guard, guard_manager.global_scope, local_scope)
except: # noqa: B001,E722
print(f"Malformed guard:\n{guard}")
set_guard_error_hook(guard_error_hook)
def unique(seq: Sequence[T]) -> Generator[T, None, None]:
seen = set()
for x in seq:
if x not in seen:
yield x
seen.add(x)
def make_dupe_guard(
obj_source: Source, dupe_source: Source
) -> Optional[functools.partial[Any]]:
# Note - we may end up in a situation where we invoke something like
# def fn(x, y)
# with fn(x, x)
# Prior to the addition of tracking to all relevant objects, we would handle this just fine by
# eagerly re-entering VB and rewrapping inputs, correctly creating graphargs and placeholders. However,
# with tracking on inputs, duplicate inputs or aliased relationships may end up getting erased here -
# In the fn(x, x) example call above look like a graph with a single input.
# In order to ensure that we do not reuse fn(x, x) for fn(x, y), we create a duplicate input guard.
# Note - we may not have a source, that is fine, it just means we had an object that is safe to have
# leave unsourced - like a local list created and discharged entirely within a local scope.
if dupe_source and dupe_source != obj_source:
ser_source_is_local = is_from_local_source(dupe_source)
source_is_local = is_from_local_source(obj_source)
if is_from_flatten_script_object_source(
dupe_source
) or is_from_flatten_script_object_source(obj_source):
raise exc.UnsafeScriptObjectError(
f"{obj_source.name()} is aliasing {dupe_source.name()}. This is not supported."
f" Please do a clone for corresponding input."
)
# Note - both must be local, or global, or we will run afoul of a lack of merging in how we currently
# reconcile guards builder scopes in compile_check_fn. This technically means we miss a guard here,
# so maybe we should do this refactor before we land this...
# TODO(voz): Combine local and global guard builders.
if ser_source_is_local == source_is_local:
# Note - this is a little aggressive - these being duplicate input does not always matter.
# However, this should always be a sound guard to add here.
return functools.partial(GuardBuilder.DUPLICATE_INPUT, source_b=dupe_source)
return None
def install_guard(*guards: Guard, skip: int = 0) -> None:
"""
Add dynamo guards to the current tracing context.
Args:
guards: guard(s) to add
skip: number of stack frames to ignore for debug stack trace
"""
from torch._guards import TracingContext
collect_debug_stack = guards_log.isEnabledFor(
logging.DEBUG
) or verbose_guards_log.isEnabledFor(logging.DEBUG)
add = TracingContext.get().guards_context.dynamo_guards.add
for guard in guards:
assert isinstance(guard, Guard)
if is_from_skip_guard_source(guard.originating_source):
continue
add(guard, collect_debug_stack=collect_debug_stack, skip=skip + 1)
| CheckFunctionManager |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 2609,
"end": 2751
} | class ____(TypedDict):
isEditableByEveryone: bool
teamsWithEditAccess: list[int]
@register(DashboardWidget)
| DashboardPermissionsResponse |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 154922,
"end": 157169
} | class ____(Operation):
def __init__(self, source, destination, *, name=None):
super().__init__(name=name)
if isinstance(source, int):
self.source = [source]
else:
self.source = source
if isinstance(destination, int):
self.destination = [destination]
else:
self.destination = destination
if len(self.source) != len(self.destination):
raise ValueError(
"`source` and `destination` arguments must have the same "
f"number of elements, but received `source={source}` and "
f"`destination={destination}`."
)
def call(self, x):
return backend.numpy.moveaxis(x, self.source, self.destination)
def compute_output_spec(self, x):
x_shape = list(x.shape)
output_shape = [-1 for _ in range(len(x.shape))]
for sc, dst in zip(self.source, self.destination):
output_shape[dst] = x_shape[sc]
x_shape[sc] = -1
i, j = 0, 0
while i < len(output_shape):
while i < len(output_shape) and output_shape[i] != -1:
# Find the first dim unset.
i += 1
while j < len(output_shape) and x_shape[j] == -1:
# Find the first dim not being passed.
j += 1
if i == len(output_shape):
break
output_shape[i] = x_shape[j]
i += 1
j += 1
return KerasTensor(output_shape, dtype=x.dtype)
@keras_export(["keras.ops.moveaxis", "keras.ops.numpy.moveaxis"])
def moveaxis(x, source, destination):
"""Move axes of a tensor to new positions.
Other axes remain in their original order.
Args:
x: Tensor whose axes should be reordered.
source: Original positions of the axes to move. These must be unique.
destination: Destinations positions for each of the original axes.
These must also be unique.
Returns:
Tensor with moved axes.
"""
if any_symbolic_tensors((x,)):
return Moveaxis(source, destination).symbolic_call(x)
return backend.numpy.moveaxis(x, source=source, destination=destination)
| Moveaxis |
python | walkccc__LeetCode | solutions/1003. Check If Word Is Valid After Substitutions/1003.py | {
"start": 0,
"end": 292
} | class ____:
def isValid(self, s: str) -> bool:
stack = []
for c in s:
if c == 'c':
if len(stack) < 2 or stack[-2] != 'a' or stack[-1] != 'b':
return False
stack.pop()
stack.pop()
else:
stack.append(c)
return not stack
| Solution |
python | scikit-learn__scikit-learn | sklearn/externals/_arff.py | {
"start": 14290,
"end": 14924
} | class ____(ArffException):
'''Error raised when the object representing the ARFF file has something
wrong.'''
def __init__(self, msg='Invalid object.'):
self.msg = msg
def __str__(self):
return '%s' % self.msg
# =============================================================================
# INTERNAL ====================================================================
def _unescape_sub_callback(match):
return _UNESCAPE_SUB_MAP[match.group()]
def encode_string(s):
if _RE_QUOTE_CHARS.search(s):
return "'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, s)
return s
| BadObject |
python | uqfoundation__dill | dill/logger.py | {
"start": 3772,
"end": 6465
} | class ____(logging.LoggerAdapter):
"""
Tracks object tree depth and calculates pickled object size.
A single instance of this wraps the module's logger, as the logging API
doesn't allow setting it directly with a custom Logger subclass. The added
'trace()' method receives a pickle instance as the first argument and
creates extra values to be added in the LogRecord from it, then calls
'info()'.
Usage of logger with 'trace()' method:
>>> from dill.logger import adapter as logger #NOTE: not dill.logger.logger
>>> ...
>>> def save_atype(pickler, obj):
>>> logger.trace(pickler, "Message with %s and %r etc. placeholders", 'text', obj)
>>> ...
"""
def __init__(self, logger):
self.logger = logger
def addHandler(self, handler):
formatter = TraceFormatter("%(prefix)s%(message)s%(suffix)s", handler=handler)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def removeHandler(self, handler):
self.logger.removeHandler(handler)
def process(self, msg, kwargs):
# A no-op override, as we don't have self.extra.
return msg, kwargs
def trace_setup(self, pickler):
# Called by Pickler.dump().
if not dill._dill.is_dill(pickler, child=False):
return
if self.isEnabledFor(logging.INFO):
pickler._trace_depth = 1
pickler._size_stack = []
else:
pickler._trace_depth = None
def trace(self, pickler, msg, *args, **kwargs):
if not hasattr(pickler, '_trace_depth'):
logger.info(msg, *args, **kwargs)
return
if pickler._trace_depth is None:
return
extra = kwargs.get('extra', {})
pushed_obj = msg.startswith('#')
size = None
try:
# Streams are not required to be tellable.
size = pickler._file.tell()
frame = pickler.framer.current_frame
try:
size += frame.tell()
except AttributeError:
# PyPy may use a BytesBuilder as frame
size += len(frame)
except (AttributeError, TypeError):
pass
if size is not None:
if not pushed_obj:
pickler._size_stack.append(size)
else:
size -= pickler._size_stack.pop()
extra['size'] = size
if pushed_obj:
pickler._trace_depth -= 1
extra['depth'] = pickler._trace_depth
kwargs['extra'] = extra
self.info(msg, *args, **kwargs)
if not pushed_obj:
pickler._trace_depth += 1
| TraceAdapter |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 16772,
"end": 23003
} | class ____(
ProjectTriggerBuildMixin,
ProjectForm,
ProjectPRBuildsMixin,
):
"""Main project settings form."""
class Meta:
model = Project
fields = (
# Basics and repo settings
"name",
"repo",
"remote_repository",
"language",
"default_version",
"privacy_level",
"versioning_scheme",
"default_branch",
"readthedocs_yaml_path",
"search_indexing_enabled",
# Meta data
"programming_language",
"project_url",
"description",
"tags",
# Booleans
"external_builds_privacy_level",
"external_builds_enabled",
"show_version_warning",
)
# Make description smaller, only a CharField
description = forms.CharField(
required=False,
max_length=150,
help_text=_("Short description of this project"),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.had_search_disabled = not self.instance.search_indexing_enabled
# Remove empty choice from options.
self.fields["versioning_scheme"].choices = [
(key, value) for key, value in self.fields["versioning_scheme"].choices if key
]
if self.instance.main_language_project:
link = reverse(
"projects_advanced",
args=[self.instance.main_language_project.slug],
)
self.fields["versioning_scheme"].help_text = _(
f'This setting is inherited from the <a href="{link}">parent translation</a>.',
)
self.fields["versioning_scheme"].disabled = True
# Only show this field if search is disabled for the project.
# We allow enabling it from the form, but not disabling it.
if self.instance.search_indexing_enabled:
self.fields.pop("search_indexing_enabled")
# NOTE: we are deprecating this feature.
# However, we will keep it available for projects that already using it.
# Old projects not using it already or new projects won't be able to enable.
if not self.instance.has_feature(Feature.ALLOW_VERSION_WARNING_BANNER):
self.fields.pop("show_version_warning")
if not settings.ALLOW_PRIVATE_REPOS:
for field in ["privacy_level", "external_builds_privacy_level"]:
self.fields.pop(field)
default_choice = (None, "-" * 9)
versions_choices = (
self.instance.versions(manager=INTERNAL)
.filter(machine=False)
.values_list("verbose_name", flat=True)
)
self.fields["default_branch"].widget = forms.Select(
choices=[default_choice] + list(zip(versions_choices, versions_choices)),
)
active_versions = self.get_all_active_versions()
if active_versions:
self.fields["default_version"].widget = forms.Select(
choices=active_versions,
)
else:
self.fields["default_version"].widget.attrs["readonly"] = True
self.setup_external_builds_option()
def clean_readthedocs_yaml_path(self):
"""
Validate user input to help user.
We also validate this path during the build process, so this validation step is
only considered as helpful to a user, not a security measure.
"""
filename = self.cleaned_data.get("readthedocs_yaml_path")
filename = (filename or "").strip()
return filename
def get_all_active_versions(self):
"""
Returns all active versions.
Returns a smartly sorted list of tuples.
First item of each tuple is the version's slug,
and the second item is version's verbose_name.
"""
version_qs = self.instance.all_active_versions()
if version_qs.exists():
version_qs = sort_version_aware(version_qs)
all_versions = [(version.slug, version.verbose_name) for version in version_qs]
return all_versions
return None
def clean_language(self):
"""Ensure that language isn't already active."""
language = self.cleaned_data["language"]
project = self.instance
if project:
msg = _(
'There is already a "{lang}" translation for the {proj} project.',
)
if project.translations.filter(language=language).exists():
raise forms.ValidationError(
msg.format(lang=language, proj=project.slug),
)
main_project = project.main_language_project
if main_project:
if main_project.language == language:
raise forms.ValidationError(
msg.format(lang=language, proj=main_project.slug),
)
siblings = (
main_project.translations.filter(language=language)
.exclude(pk=project.pk)
.exists()
)
if siblings:
raise forms.ValidationError(
msg.format(lang=language, proj=main_project.slug),
)
return language
def clean_tags(self):
tags = self.cleaned_data.get("tags", [])
for tag in tags:
if len(tag) > 100:
raise forms.ValidationError(
_(
"Length of each tag must be less than or equal to 100 characters.",
),
)
return tags
def save(self, commit=True):
instance = super().save(commit)
# Trigger a reindex when enabling search from the form.
if self.had_search_disabled and instance.search_indexing_enabled:
index_project.delay(project_slug=instance.slug)
Notification.objects.cancel(
message_id=MESSAGE_PROJECT_SEARCH_INDEXING_DISABLED,
attached_to=instance,
)
return instance
| UpdateProjectForm |
python | ray-project__ray | rllib/connectors/common/frame_stacking.py | {
"start": 453,
"end": 5971
} | class ____(ConnectorV2):
"""A connector piece that stacks the previous n observations into one."""
@override(ConnectorV2)
def recompute_output_observation_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
# Change our observation space according to the given stacking settings.
if self._multi_agent:
ret = {}
for agent_id, obs_space in input_observation_space.spaces.items():
ret[agent_id] = self._convert_individual_space(obs_space)
return gym.spaces.Dict(ret)
else:
return self._convert_individual_space(input_observation_space)
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
num_frames: int = 1,
multi_agent: bool = False,
as_learner_connector: bool = False,
**kwargs,
):
"""Initializes a FrameStackingConnector instance.
Args:
num_frames: The number of observation frames to stack up (into a single
observation) for the RLModule's forward pass.
multi_agent: Whether this is a connector operating on a multi-agent
observation space mapping AgentIDs to individual agents' observations.
as_learner_connector: Whether this connector is part of a Learner connector
pipeline, as opposed to an env-to-module pipeline.
"""
super().__init__(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
**kwargs,
)
self._multi_agent = multi_agent
self.num_frames = num_frames
self._as_learner_connector = as_learner_connector
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
# Learner connector pipeline. Episodes have been numpy'ized.
if self._as_learner_connector:
for sa_episode in self.single_agent_episode_iterator(
episodes, agents_that_stepped_only=False
):
def _map_fn(s, _sa_episode=sa_episode):
# Squeeze out last dim.
s = np.squeeze(s, axis=-1)
# Calculate new shape and strides
new_shape = (len(_sa_episode), self.num_frames) + s.shape[1:]
new_strides = (s.strides[0],) + s.strides
# Create a strided view of the array.
# But return a copy to avoid non-contiguous memory in the object
# store (which is very expensive to deserialize).
return np.transpose(
np.lib.stride_tricks.as_strided(
s, shape=new_shape, strides=new_strides
),
axes=[0, 2, 3, 1],
).copy()
# Get all observations from the episode in one np array (except for
# the very last one, which is the final observation not needed for
# learning).
self.add_n_batch_items(
batch=batch,
column=Columns.OBS,
items_to_add=tree.map_structure(
_map_fn,
sa_episode.get_observations(
indices=slice(-self.num_frames + 1, len(sa_episode)),
neg_index_as_lookback=True,
fill=0.0,
),
),
num_items=len(sa_episode),
single_agent_episode=sa_episode,
)
# Env-to-module pipeline. Episodes still operate on lists.
else:
for sa_episode in self.single_agent_episode_iterator(episodes):
assert not sa_episode.is_numpy
# Get the list of observations to stack.
obs_stack = sa_episode.get_observations(
indices=slice(-self.num_frames, None),
fill=0.0,
)
# Observation components are (w, h, 1)
# -> concatenate along axis=-1 to (w, h, [num_frames]).
stacked_obs = tree.map_structure(
lambda *s: np.concatenate(s, axis=2),
*obs_stack,
)
self.add_batch_item(
batch=batch,
column=Columns.OBS,
item_to_add=stacked_obs,
single_agent_episode=sa_episode,
)
return batch
def _convert_individual_space(self, obs_space):
# Some assumptions: Space is box AND last dim (the stacking one) is 1.
assert isinstance(obs_space, gym.spaces.Box), obs_space
assert obs_space.shape[-1] == 1, obs_space
return gym.spaces.Box(
low=np.repeat(obs_space.low, repeats=self.num_frames, axis=-1),
high=np.repeat(obs_space.high, repeats=self.num_frames, axis=-1),
shape=list(obs_space.shape)[:-1] + [self.num_frames],
dtype=obs_space.dtype,
)
| FrameStacking |
python | numpy__numpy | numpy/distutils/tests/test_system_info.py | {
"start": 3582,
"end": 3668
} | class ____(_system_info):
"""For testing purposes"""
section = 'temp1'
| Temp1Info |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/brillig/package.py | {
"start": 218,
"end": 604
} | class ____(Package):
"""Mock package to test the spack versions command."""
homepage = "https://www.example.com"
url = "https://github.com/vvolkl/brillig/archive/v2.0.0.tar.gz"
version("2.0.0", sha256="d4bb8f1737d5a7c0321e1675cceccb59dbcb66a94f3a9dd66a37f58bc6df7f15")
version("1.0.0", sha256="fcef53f45e82b881af9a6f0530b2732cdaf8c5c60e49b27671594ea658bfe315")
| Brillig |
python | PrefectHQ__prefect | tests/server/utilities/test_text_search_parser.py | {
"start": 17381,
"end": 18206
} | class ____:
"""Test that original case is preserved in parsed terms"""
def test_preserves_case_in_include_terms(self):
result = parse_text_search_query("Error WARNING Timeout")
assert result == TextSearchQuery(
include=["Error", "WARNING", "Timeout"], exclude=[], required=[]
)
def test_preserves_case_in_exclude_terms(self):
result = parse_text_search_query("-DEBUG !TestMode")
assert result == TextSearchQuery(
include=[], exclude=["DEBUG", "TestMode"], required=[]
)
def test_preserves_case_in_quoted_phrases(self):
result = parse_text_search_query('"Connection Timeout Error"')
assert result == TextSearchQuery(
include=["Connection Timeout Error"], exclude=[], required=[]
)
| TestCasePreservation |
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/core_models.py | {
"start": 4228,
"end": 4861
} | class ____(Resolvable, Model):
type: Literal["multi_run"] = "multi_run"
max_partitions_per_run: int = 1
def resolve_backfill_policy(
context: ResolutionContext,
backfill_policy,
) -> Optional[BackfillPolicy]:
if backfill_policy is None:
return None
if backfill_policy.type == "single_run":
return BackfillPolicy.single_run()
elif backfill_policy.type == "multi_run":
return BackfillPolicy.multi_run(
max_partitions_per_run=backfill_policy.max_partitions_per_run
)
raise ValueError(f"Invalid backfill policy: {backfill_policy}")
| MultiRunBackfillPolicyModel |
python | ray-project__ray | python/ray/_private/metrics_agent.py | {
"start": 10012,
"end": 25468
} | class ____:
def __init__(self, namespace: str, component_timeout_s: int = 60):
"""Prometheus collector implementation for opencensus proxy export.
Prometheus collector requires to implement `collect` which is
invoked whenever Prometheus queries the endpoint.
The class is thread-safe.
Args:
namespace: Prometheus namespace.
"""
# -- Protect `self._components` --
self._components_lock = threading.Lock()
# -- Timeout until the component is marked as stale --
# Once the component is considered as stale,
# the metrics from that worker won't be exported.
self._component_timeout_s = component_timeout_s
# -- Prometheus namespace --
self._namespace = namespace
# -- Component that requests to proxy export metrics --
# Component means core worker, raylet, and GCS.
# component_id -> Components
# For workers, they contain worker ids.
# For other components (raylet, GCS),
# they contain the global key `GLOBAL_COMPONENT_KEY`.
self._components = {}
# Whether we want to export counter as gauge.
# This is for bug compatibility.
# See https://github.com/ray-project/ray/pull/43795.
self._export_counter_as_gauge = env_bool("RAY_EXPORT_COUNTER_AS_GAUGE", True)
def record(self, metrics: List[Metric], worker_id_hex: str = None):
"""Record the metrics reported from the component that reports it.
Args:
metrics: A list of opencensus protobuf to proxy export metrics.
worker_id_hex: A worker id that reports these metrics.
If None, it means they are reported from Raylet or GCS.
"""
key = GLOBAL_COMPONENT_KEY if not worker_id_hex else worker_id_hex
with self._components_lock:
if key not in self._components:
self._components[key] = Component(key)
self._components[key].record(metrics)
def clean_stale_components(self):
"""Clean up stale components.
Stale means the component is dead or unresponsive.
Stale components won't be reported to Prometheus anymore.
"""
with self._components_lock:
stale_components = []
stale_component_ids = []
for id, component in self._components.items():
elapsed = time.monotonic() - component.last_reported_time
if elapsed > self._component_timeout_s:
stale_component_ids.append(id)
logger.info(
"Metrics from a worker ({}) is cleaned up due to "
"timeout. Time since last report {}s".format(id, elapsed)
)
for id in stale_component_ids:
stale_components.append(self._components.pop(id))
return stale_components
# TODO(sang): add start and end timestamp
def to_prometheus_metrics(
self,
metric_name: str,
metric_description: str,
label_keys: List[str],
metric_units: str,
label_values: Tuple[tag_value_module.TagValue],
agg_data: Any,
metrics_map: Dict[str, List[PrometheusMetric]],
) -> None:
"""to_metric translate the data that OpenCensus create
to Prometheus format, using Prometheus Metric object.
This method is from Opencensus Prometheus Exporter.
Args:
metric_name: Name of the metric.
metric_description: Description of the metric.
label_keys: The fixed label keys of the metric.
metric_units: Units of the metric.
label_values: The values of `label_keys`.
agg_data: `opencensus.stats.aggregation_data.AggregationData` object.
Aggregated data that needs to be converted as Prometheus samples
metrics_map: The converted metric is added to this map.
"""
assert self._components_lock.locked()
metric_name = f"{self._namespace}_{metric_name}"
assert len(label_values) == len(label_keys), (label_values, label_keys)
# Prometheus requires that all tag values be strings hence
# the need to cast none to the empty string before exporting. See
# https://github.com/census-instrumentation/opencensus-python/issues/480
label_values = [tv if tv else "" for tv in label_values]
if isinstance(agg_data, CountAggregationData):
metrics = metrics_map.get(metric_name)
if not metrics:
metric = CounterMetricFamily(
name=metric_name,
documentation=metric_description,
unit=metric_units,
labels=label_keys,
)
metrics = [metric]
metrics_map[metric_name] = metrics
metrics[0].add_metric(labels=label_values, value=agg_data.count_data)
return
if isinstance(agg_data, SumAggregationData):
# This should be emitted as prometheus counter
# but we used to emit it as prometheus gauge.
# To keep the backward compatibility
# (changing from counter to gauge changes the metric name
# since prometheus client will add "_total" suffix to counter
# per OpenMetrics specification),
# we now emit both counter and gauge and in the
# next major Ray release (3.0) we can stop emitting gauge.
# This leaves people enough time to migrate their dashboards.
# See https://github.com/ray-project/ray/pull/43795.
metrics = metrics_map.get(metric_name)
if not metrics:
metric = CounterMetricFamily(
name=metric_name,
documentation=metric_description,
labels=label_keys,
)
metrics = [metric]
metrics_map[metric_name] = metrics
metrics[0].add_metric(labels=label_values, value=agg_data.sum_data)
if not self._export_counter_as_gauge:
pass
elif metric_name.endswith("_total"):
# In this case, we only need to emit prometheus counter
# since for metric name already ends with _total suffix
# prometheus client won't change it
# so there is no backward compatibility issue.
# See https://prometheus.github.io/client_python/instrumenting/counter/
pass
else:
if len(metrics) == 1:
metric = GaugeMetricFamily(
name=metric_name,
documentation=(
f"(DEPRECATED, use {metric_name}_total metric instead) "
f"{metric_description}"
),
labels=label_keys,
)
metrics.append(metric)
assert len(metrics) == 2
metrics[1].add_metric(labels=label_values, value=agg_data.sum_data)
return
elif isinstance(agg_data, DistributionAggregationData):
assert agg_data.bounds == sorted(agg_data.bounds)
# buckets are a list of buckets. Each bucket is another list with
# a pair of bucket name and value, or a triple of bucket name,
# value, and exemplar. buckets need to be in order.
buckets = []
cum_count = 0 # Prometheus buckets expect cumulative count.
for ii, bound in enumerate(agg_data.bounds):
cum_count += agg_data.counts_per_bucket[ii]
bucket = [str(bound), cum_count]
buckets.append(bucket)
# Prometheus requires buckets to be sorted, and +Inf present.
# In OpenCensus we don't have +Inf in the bucket bonds so need to
# append it here.
buckets.append(["+Inf", agg_data.count_data])
metrics = metrics_map.get(metric_name)
if not metrics:
metric = HistogramMetricFamily(
name=metric_name,
documentation=metric_description,
labels=label_keys,
)
metrics = [metric]
metrics_map[metric_name] = metrics
metrics[0].add_metric(
labels=label_values,
buckets=buckets,
sum_value=agg_data.sum,
)
return
elif isinstance(agg_data, LastValueAggregationData):
metrics = metrics_map.get(metric_name)
if not metrics:
metric = GaugeMetricFamily(
name=metric_name,
documentation=metric_description,
labels=label_keys,
)
metrics = [metric]
metrics_map[metric_name] = metrics
metrics[0].add_metric(labels=label_values, value=agg_data.value)
return
else:
raise ValueError(f"unsupported aggregation type {type(agg_data)}")
def _aggregate_metric_data(
self,
datas: List[
Union[LastValueAggregationData, CountAggregationData, SumAggregationData]
],
) -> Union[LastValueAggregationData, CountAggregationData, SumAggregationData]:
assert len(datas) > 0
sample = datas[0]
if isinstance(sample, LastValueAggregationData):
return LastValueAggregationData(
ValueDouble, sum([data.value for data in datas])
)
if isinstance(sample, CountAggregationData):
return CountAggregationData(sum([data.count_data for data in datas]))
if isinstance(sample, SumAggregationData):
return SumAggregationData(
ValueDouble, sum([data.sum_data for data in datas])
)
raise ValueError(
f"Unsupported aggregation type {type(sample)}. "
"Supported types are "
f"{CountAggregationData}, {LastValueAggregationData}, {SumAggregationData}."
f"Got {datas}."
)
def _aggregate_with_recommended_cardinality(
self,
per_worker_metrics: List[OpencensusProxyMetric],
) -> List[OpencensusProxyMetric]:
"""Collect per-worker metrics, aggregate them into per-node metrics and convert
them to Prometheus format.
Args:
per_worker_metrics: A list of per-worker metrics for the same metric name.
Returns:
A list of per-node metrics for the same metric name, with the high
cardinality labels removed and the values aggregated.
"""
metric = next(iter(per_worker_metrics), None)
if not metric or WORKER_ID_TAG_KEY not in metric.label_keys:
# No high cardinality labels, return the original metrics.
return per_worker_metrics
worker_id_label_index = metric.label_keys.index(WORKER_ID_TAG_KEY)
# map from the tuple of label values without worker_id to the list of per worker
# task metrics
label_value_to_data: Dict[
Tuple,
List[
Union[
LastValueAggregationData,
CountAggregationData,
SumAggregationData,
]
],
] = defaultdict(list)
for metric in per_worker_metrics:
for label_values, data in metric.data.items():
# remove the worker_id from the label values
label_value_to_data[
label_values[:worker_id_label_index]
+ label_values[worker_id_label_index + 1 :]
].append(data)
aggregated_metric = OpencensusProxyMetric(
name=metric.name,
desc=metric.desc,
unit=metric.unit,
# remove the worker_id from the label keys
label_keys=metric.label_keys[:worker_id_label_index]
+ metric.label_keys[worker_id_label_index + 1 :],
)
for label_values, datas in label_value_to_data.items():
aggregated_metric.add_data(
label_values,
self._aggregate_metric_data(datas),
)
return [aggregated_metric]
def collect(self): # pragma: NO COVER
"""Collect fetches the statistics from OpenCensus
and delivers them as Prometheus Metrics.
Collect is invoked every time a prometheus.Gatherer is run
for example when the HTTP endpoint is invoked by Prometheus.
This method is required as a Prometheus Collector.
"""
with self._components_lock:
# First construct the list of opencensus metrics to be converted to
# prometheus metrics. For LEGACY cardinality level, this comprises all
# metrics from all components. For RECOMMENDED cardinality level, we need
# to remove the high cardinality labels and aggreate the component metrics.
open_cencus_metrics: List[OpencensusProxyMetric] = []
# The metrics that need to be aggregated with recommended cardinality. Key
# is the metric name and value is the list of per-worker metrics.
to_lower_cardinality: Dict[str, List[OpencensusProxyMetric]] = defaultdict(
list
)
cardinality_level = MetricCardinality.get_cardinality_level()
for component in self._components.values():
for metric in component.metrics.values():
if (
cardinality_level == MetricCardinality.RECOMMENDED
and not metric.is_distribution_aggregation_data()
):
# We reduce the cardinality for all metrics except for histogram
# metrics. The aggregation of histogram metrics from worker
# level to node level is not well defined. In addition, we
# currently have very few histogram metrics in Ray
# (metric_defs.cc) so the impact of them is negligible.
to_lower_cardinality[metric.name].append(metric)
else:
open_cencus_metrics.append(metric)
for per_worker_metrics in to_lower_cardinality.values():
open_cencus_metrics.extend(
self._aggregate_with_recommended_cardinality(
per_worker_metrics,
)
)
prometheus_metrics_map = {}
for metric in open_cencus_metrics:
for label_values, data in metric.data.items():
self.to_prometheus_metrics(
metric.name,
metric.desc,
metric.label_keys,
metric.unit,
label_values,
data,
prometheus_metrics_map,
)
for metrics in prometheus_metrics_map.values():
for metric in metrics:
yield metric
| OpenCensusProxyCollector |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/s3.py | {
"start": 15679,
"end": 19674
} | class ____(AwsBaseOperator[S3Hook]):
"""
Creates a new object from `data` as string or bytes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3CreateObjectOperator`
:param s3_bucket: Name of the S3 bucket where to save the object. (templated)
It should be omitted when ``s3_key`` is provided as a full s3:// url.
:param s3_key: The key of the object to be created. (templated)
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit ``s3_bucket``.
:param data: string or bytes to save as content.
:param replace: If True, it will overwrite the key if it already exists
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:param encoding: The string to byte encoding.
It should be specified only when `data` is provided as string.
:param compression: Type of compression to use, currently only gzip is supported.
It can be specified only when `data` is provided as string.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields: Sequence[str] = aws_template_fields("s3_bucket", "s3_key", "data")
aws_hook_class = S3Hook
def __init__(
self,
*,
s3_bucket: str | None = None,
s3_key: str,
data: str | bytes,
replace: bool = False,
encrypt: bool = False,
acl_policy: str | None = None,
encoding: str | None = None,
compression: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.data = data
self.replace = replace
self.encrypt = encrypt
self.acl_policy = acl_policy
self.encoding = encoding
self.compression = compression
def execute(self, context: Context):
s3_bucket, s3_key = self.hook.get_s3_bucket_key(
self.s3_bucket, self.s3_key, "dest_bucket", "dest_key"
)
if isinstance(self.data, str):
self.hook.load_string(
self.data,
s3_key,
s3_bucket,
self.replace,
self.encrypt,
self.encoding,
self.acl_policy,
self.compression,
)
else:
self.hook.load_bytes(self.data, s3_key, s3_bucket, self.replace, self.encrypt, self.acl_policy)
def get_openlineage_facets_on_start(self):
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.openlineage.extractors import OperatorLineage
bucket, key = self.hook.get_s3_bucket_key(self.s3_bucket, self.s3_key, "dest_bucket", "dest_key")
output_dataset = Dataset(
namespace=f"s3://{bucket}",
name=key,
)
return OperatorLineage(
outputs=[output_dataset],
)
| S3CreateObjectOperator |
python | wandb__wandb | wandb/automations/events.py | {
"start": 10426,
"end": 11988
} | class ____(_BaseRunEventInput):
"""A run metric satisfies a user-defined condition.
Examples:
Define an event that triggers for any run in project "my-project" when
the average of the last 5 values of metric "my-metric" exceeds 123.45:
```python
from wandb import Api
from wandb.automations import OnRunMetric, RunEvent
api = Api()
project = api.project(name="my-project")
event = OnRunMetric(
scope=project,
filter=RunEvent.metric("my-metric").avg(5).gt(123.45),
)
```
"""
event_type: Literal[
EventType.RUN_METRIC_THRESHOLD,
EventType.RUN_METRIC_CHANGE,
EventType.RUN_METRIC_ZSCORE,
]
filter: JsonEncoded[RunMetricFilter]
"""Run and/or metric condition(s) that must be satisfied for this event to trigger."""
@model_validator(mode="before")
@classmethod
def _infer_event_type(cls, data: Any) -> Any:
"""Infer the event type from the inner filter during validation.
This supports both "threshold" and "change" metric filters, which can
only be determined after parsing and validating the inner JSON data.
"""
if isinstance(data, dict) and (raw_filter := data.get("filter")):
# At this point, `raw_filter` may or may not be JSON-serialized
parsed_filter = RunMetricFilter.model_validate_json(ensure_json(raw_filter))
return {**data, "event_type": parsed_filter.metric.event_type}
return data
| OnRunMetric |
python | tensorflow__tensorflow | tensorflow/python/util/decorator_utils_test.py | {
"start": 3429,
"end": 4083
} | class ____(test.TestCase):
def test_function(self):
decorator_utils.validate_callable(_test_function, "test")
def test_method(self):
decorator_utils.validate_callable(self.test_method, "test")
def test_callable(self):
class TestClass(object):
def __call__(self):
pass
decorator_utils.validate_callable(TestClass(), "test")
def test_partial(self):
partial = functools.partial(_test_function, unused_arg=7)
decorator_utils.validate_callable(partial, "test")
def test_fail_non_callable(self):
x = 0
self.assertRaises(ValueError, decorator_utils.validate_callable, x, "test")
| ValidateCallableTest |
python | neetcode-gh__leetcode | python/2709-greatest-common-divisor-traversal.py | {
"start": 603,
"end": 1317
} | class ____:
def canTraverseAllPairs(self, nums: List[int]) -> bool:
uf = UnionFind(len(nums))
factor_index = {}
for i, n in enumerate(nums):
f = 2
while f * f <= n:
if n % f == 0:
if f in factor_index:
uf.union(i, factor_index[f])
else:
factor_index[f] = i
while n % f == 0:
n = n // f
f += 1
if n > 1:
if n in factor_index:
uf.union(i, factor_index[n])
else:
factor_index[n] = i
return uf.count == 1
| Solution |
python | jina-ai__jina | jina/types/request/__init__.py | {
"start": 170,
"end": 1631
} | class ____(ProtoTypeMixin):
"""
:class:`Request` is one of the primitive data types in Jina, and serves as a base for
:class:`~data.DataRequest` and :class:`~data.Response`.
It offers a Pythonic interface to allow users access and manipulate
:class:`jina.jina_pb2.RequestProto` object without working with Protobuf itself.
It serves as a container for serialized :class:`jina_pb2.RequestProto` that only triggers deserialization
and decompression when receives the first read access to its member.
It overrides :meth:`__getattr__` to provide the same get/set interface as an
:class:`jina_pb2.RequestProto` object.
"""
def __getattr__(self, name: str):
return getattr(self.proto, name)
def add_exception(
self, ex: Optional['Exception'] = None, executor: 'BaseExecutor' = None
) -> None:
"""Add exception to the last route in the envelope
:param ex: Exception to be added
:param executor: Executor related to the exception
"""
d = self.header.status
d.code = jina_pb2.StatusProto.ERROR
d.description = repr(ex)
if executor:
d.exception.executor = executor.__class__.__name__
d.exception.name = ex.__class__.__name__
d.exception.args.extend([str(v) for v in ex.args])
d.exception.stacks.extend(
traceback.format_exception(type(ex), value=ex, tb=ex.__traceback__)
)
| Request |
python | doocs__leetcode | solution/2300-2399/2344.Minimum Deletions to Make Array Divisible/Solution2.py | {
"start": 0,
"end": 212
} | class ____:
def minOperations(self, nums: List[int], numsDivide: List[int]) -> int:
x = gcd(*numsDivide)
nums.sort()
return next((i for i, v in enumerate(nums) if x % v == 0), -1)
| Solution |
python | huggingface__transformers | tests/quantization/hqq/test_hqq.py | {
"start": 5902,
"end": 7694
} | class ____(unittest.TestCase):
def tearDown(self):
cleanup()
def test_fp16_quantized_model(self):
"""
Simple LLM model testing fp16 with bias
"""
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id="facebook/opt-125m", quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
check_hqqlayer(self, hqq_runner.model.model.decoder.layers[0].self_attn.v_proj)
check_forward(self, hqq_runner.model)
@require_deterministic_for_xpu
def test_save_and_load_quantized_model(self):
"""
Test saving and loading a quantized model with bias
"""
import tempfile
quant_config = HqqConfig(nbits=8, group_size=64)
hqq_runner = HQQLLMRunner(
model_id="facebook/opt-125m", quant_config=quant_config, compute_dtype=torch.float16, device=torch_device
)
input_tensor = torch.zeros((1, 8), dtype=torch.int32, device=torch_device)
# Get reference logits
with torch.no_grad():
logits_ref = hqq_runner.model.forward(input_tensor).logits
with tempfile.TemporaryDirectory() as tmpdirname:
hqq_runner.model.save_pretrained(tmpdirname)
del hqq_runner.model
backend_empty_cache(torch_device)
model_loaded = AutoModelForCausalLM.from_pretrained(
tmpdirname, dtype=torch.float16, device_map=torch_device
)
with torch.no_grad():
logits_loaded = model_loaded.forward(input_tensor).logits
self.assertEqual((logits_loaded - logits_ref).abs().mean().item(), 0)
@slow
@require_torch_accelerator
@require_accelerate
@require_hqq
| HQQTestBias |
python | ansible__ansible | lib/ansible/module_utils/common/arg_spec.py | {
"start": 1113,
"end": 2787
} | class ____:
"""Result of argument spec validation.
This is the object returned by :func:`ArgumentSpecValidator.validate()
<ansible.module_utils.common.arg_spec.ArgumentSpecValidator.validate()>`
containing the validated parameters and any errors.
"""
def __init__(self, parameters):
"""
:arg parameters: Terms to be validated and coerced to the correct type.
:type parameters: dict
"""
self._no_log_values = set()
""":class:`set` of values marked as ``no_log`` in the argument spec. This
is a temporary holding place for these values and may move in the future.
"""
self._unsupported_parameters = set()
self._supported_parameters = dict()
self._validated_parameters = deepcopy(parameters)
self._deprecations = []
self._warnings = []
self._aliases = {}
self.errors = AnsibleValidationErrorMultiple()
"""
:class:`~ansible.module_utils.errors.AnsibleValidationErrorMultiple` containing all
:class:`~ansible.module_utils.errors.AnsibleValidationError` objects if there were
any failures during validation.
"""
@property
def validated_parameters(self):
"""Validated and coerced parameters."""
return self._validated_parameters
@property
def unsupported_parameters(self):
""":class:`set` of unsupported parameter names."""
return self._unsupported_parameters
@property
def error_messages(self):
""":class:`list` of all error messages from each exception in :attr:`errors`."""
return self.errors.messages
| ValidationResult |
python | walkccc__LeetCode | solutions/1855. Maximum Distance Between a Pair of Values/1855-2.py | {
"start": 0,
"end": 247
} | class ____:
def maxDistance(self, nums1: list[int], nums2: list[int]) -> int:
i = 0
j = 0
while i < len(nums1) and j < len(nums2):
if nums1[i] > nums2[j]:
i += 1
j += 1
return 0 if i == j else j - i - 1
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/document/test_document.py | {
"start": 3846,
"end": 36794
} | class ____:
def test_basic(self) -> None:
d = document.Document()
assert not d.roots
assert d.template_variables == {}
assert d.session_context is None
def test_session_context(self) -> None:
d = document.Document()
assert d.session_context is None
sc = BokehSessionContext(None, None, d)
d._session_context = weakref.ref(sc)
assert d.session_context is sc
def test_add_roots(self) -> None:
d = document.Document()
assert not d.roots
d.add_root(AnotherModelInTestDocument())
assert len(d.roots) == 1
assert next(iter(d.roots)).document == d
def test_roots_preserves_insertion_order(self) -> None:
d = document.Document()
assert not d.roots
roots = [
AnotherModelInTestDocument(),
AnotherModelInTestDocument(),
AnotherModelInTestDocument(),
]
for r in roots:
d.add_root(r)
assert len(d.roots) == 3
assert isinstance(d.roots, list)
roots_iter = iter(d.roots)
assert next(roots_iter) is roots[0]
assert next(roots_iter) is roots[1]
assert next(roots_iter) is roots[2]
def test_title(self) -> None:
d = document.Document()
assert d.title == document.DEFAULT_TITLE
d.title = "Foo"
assert d.title == "Foo"
def test_all_models(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
m = SomeModelInTestDocument()
m2 = AnotherModelInTestDocument()
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d.models) == 2
m.child = None
assert len(d.models) == 1
m.child = m2
assert len(d.models) == 2
d.remove_root(m)
assert len(d.models) == 0
def test_get_model_by_id(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
m = SomeModelInTestDocument()
m2 = AnotherModelInTestDocument()
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d.models) == 2
assert d.get_model_by_id(m.id) == m
assert d.get_model_by_id(m2.id) == m2
assert d.get_model_by_id("not a valid ID") is None
def test_get_model_by_name(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
m = SomeModelInTestDocument(name="foo")
m2 = AnotherModelInTestDocument(name="bar")
m.child = m2
d.add_root(m)
assert len(d.roots) == 1
assert len(d.models) == 2
assert d.get_model_by_name(m.name) == m
assert d.get_model_by_name(m2.name) == m2
assert d.get_model_by_name("not a valid name") is None
def test_get_model_by_changed_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name="foo")
d.add_root(m)
assert d.get_model_by_name("foo") == m
m.name = "bar"
assert d.get_model_by_name("foo") is None
assert d.get_model_by_name("bar") == m
def test_get_model_by_changed_from_none_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name=None)
d.add_root(m)
assert d.get_model_by_name("bar") is None
m.name = "bar"
assert d.get_model_by_name("bar") == m
def test_get_model_by_changed_to_none_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name="bar")
d.add_root(m)
assert d.get_model_by_name("bar") == m
m.name = None
assert d.get_model_by_name("bar") is None
def test_can_get_name_overriding_model_by_name(self) -> None:
d = document.Document()
m = ModelThatOverridesName(name="foo")
d.add_root(m)
assert d.get_model_by_name("foo") == m
m.name = "bar"
assert d.get_model_by_name("bar") == m
def test_cannot_get_model_with_duplicate_name(self) -> None:
d = document.Document()
m = SomeModelInTestDocument(name="foo")
m2 = SomeModelInTestDocument(name="foo")
d.add_root(m)
d.add_root(m2)
got_error = False
try:
d.get_model_by_name("foo")
except ValueError as e:
got_error = True
assert 'Found more than one' in repr(e)
assert got_error
d.remove_root(m)
assert d.get_model_by_name("foo") == m2
def test_select(self) -> None:
# we aren't trying to replace test_query here, only test
# our wrappers around it, so no need to try every kind of
# query
d = document.Document()
root1 = SomeModelInTestDocument(foo=42, name='a')
child1 = SomeModelInTestDocument(foo=43, name='b')
root2 = SomeModelInTestDocument(foo=44, name='c')
root3 = SomeModelInTestDocument(foo=44, name='d')
child3 = SomeModelInTestDocument(foo=45, name='c')
root4 = AnotherModelInTestDocument(bar=20, name='A')
root1.child = child1
root3.child = child3
d.add_root(root1)
d.add_root(root2)
d.add_root(root3)
d.add_root(root4)
# select()
assert {root1} == set(d.select(dict(foo=42)))
assert {root1} == set(d.select(dict(name="a")))
assert {root2, child3} == set(d.select(dict(name="c")))
assert set() == set(d.select(dict(name="nope")))
# select() on object
assert set() == set(root3.select(dict(name="a")))
assert {child3} == set(root3.select(dict(name="c")))
# select_one()
assert root3 == d.select_one(dict(name='d'))
assert d.select_one(dict(name='nope')) is None
got_error = False
try:
d.select_one(dict(name='c'))
except ValueError as e:
got_error = True
assert 'Found more than one' in repr(e)
assert got_error
# select_one() on object
assert root3.select_one(dict(name='a')) is None
assert child3 == root3.select_one(dict(name='c'))
# set_select()
d.set_select(dict(foo=44), dict(name="c"))
assert {root2, child3, root3} == set(d.select(dict(name="c")))
# set_select() on object
root3.set_select(dict(name='c'), dict(foo=57))
assert {child3, root3} == set(d.select(dict(foo=57)))
assert {child3, root3} == set(root3.select(dict(foo=57)))
# set_select() on class
d.set_select(SomeModelInTestDocument, dict(name='new_name'))
assert len(d.select(dict(name='new_name'))) == 5
# set_select() on different class
assert len(d.select(dict(name="A"))) == 1
d.set_select(AnotherModelInTestDocument, dict(name="B"))
assert {root4} == set(d.select(dict(name="B")))
def test_all_models_with_multiple_references(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument()
root2 = SomeModelInTestDocument()
child1 = AnotherModelInTestDocument()
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
assert len(d.models) == 3
root1.child = None
assert len(d.models) == 3
root2.child = None
assert len(d.models) == 2
root1.child = child1
assert len(d.models) == 3
root2.child = child1
assert len(d.models) == 3
d.remove_root(root1)
assert len(d.models) == 2
d.remove_root(root2)
assert len(d.models) == 0
def test_all_models_with_cycles(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument()
root2 = SomeModelInTestDocument()
child1 = SomeModelInTestDocument()
root1.child = child1
root2.child = child1
child1.child = root1
print("adding root1")
d.add_root(root1)
print("adding root2")
d.add_root(root2)
assert len(d.roots) == 2
assert len(d.models) == 3
print("clearing child of root1")
root1.child = None
assert len(d.models) == 3
print("clearing child of root2")
root2.child = None
assert len(d.models) == 2
print("putting child1 back in root1")
root1.child = child1
assert len(d.models) == 3
print("Removing root1")
d.remove_root(root1)
assert len(d.models) == 1
print("Removing root2")
d.remove_root(root2)
assert len(d.models) == 0
def test_change_notification(self) -> None:
d = document.Document()
assert not d.roots
m = AnotherModelInTestDocument()
d.add_root(m)
assert len(d.roots) == 1
assert m.bar == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.bar = 42
assert events
event = events[0]
assert isinstance(event, ModelChangedEvent)
assert event.document == d
assert event.model == m
assert event.attr == 'bar'
assert event.new == 42
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_stream_notification(self) -> None:
d = document.Document()
assert not d.roots
m = ColumnDataSource(data=dict(a=[10], b=[20]))
d.add_root(m)
assert len(d.roots) == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.stream(dict(a=[11, 12], b=[21, 22]), 200)
assert events
event = events[0]
assert isinstance(event, ColumnsStreamedEvent)
assert event.document == d
assert event.model == m
assert event.attr == "data"
assert event.data == dict(a=[11, 12], b=[21, 22])
assert event.rollover == 200
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_patch_notification(self) -> None:
d = document.Document()
assert not d.roots
m = ColumnDataSource(data=dict(a=[10,11], b=[20,21]))
d.add_root(m)
assert len(d.roots) == 1
assert curdoc() is not d
events = []
curdoc_from_listener = []
def listener(event):
curdoc_from_listener.append(curdoc())
events.append(event)
d.on_change(listener)
m.patch(dict(a=[(0, 1)], b=[(0,0), (1,1)]))
assert events
event = events[0]
assert isinstance(event, ColumnsPatchedEvent)
assert event.document == d
assert event.model == m
assert event.attr == "data"
assert event.patches == dict(a=[(0, 1)], b=[(0,0), (1,1)])
assert len(curdoc_from_listener) == 1
assert curdoc_from_listener[0] is d
def test_change_notification_removal(self) -> None:
d = document.Document()
assert not d.roots
m = AnotherModelInTestDocument()
d.add_root(m)
assert len(d.roots) == 1
assert m.bar == 1
events = []
def listener(event):
events.append(event)
d.on_change(listener)
m.bar = 42
assert len(events) == 1
assert events[0].new == 42
d.remove_on_change(listener)
m.bar = 43
assert len(events) == 1
def test_notification_of_roots(self) -> None:
d = document.Document()
assert not d.roots
events = []
def listener(event):
events.append(event)
d.on_change(listener)
m = AnotherModelInTestDocument(bar=1)
d.add_root(m)
assert len(d.roots) == 1
assert len(events) == 1
assert isinstance(events[0], RootAddedEvent)
assert events[0].model == m
m2 = AnotherModelInTestDocument(bar=2)
d.add_root(m2)
assert len(d.roots) == 2
assert len(events) == 2
assert isinstance(events[1], RootAddedEvent)
assert events[1].model == m2
d.remove_root(m)
assert len(d.roots) == 1
assert len(events) == 3
assert isinstance(events[2], RootRemovedEvent)
assert events[2].model == m
d.remove_root(m2)
assert len(d.roots) == 0
assert len(events) == 4
assert isinstance(events[3], RootRemovedEvent)
assert events[3].model == m2
def test_notification_of_title(self) -> None:
d = document.Document()
assert not d.roots
assert d.title == document.DEFAULT_TITLE
events = []
def listener(event):
events.append(event)
d.on_change(listener)
d.title = "Foo"
assert d.title == "Foo"
assert len(events) == 1
assert isinstance(events[0], TitleChangedEvent)
assert events[0].document is d
assert events[0].title == "Foo"
def test_add_remove_periodic_callback(self) -> None:
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback_obj = d.add_periodic_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
assert callback_obj.period == 1
d.remove_periodic_callback(callback_obj)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_add_remove_timeout_callback(self) -> None:
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback_obj = d.add_timeout_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
assert callback_obj.timeout == 1
d.remove_timeout_callback(callback_obj)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_add_partial_callback(self) -> None:
from functools import partial
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def _cb(): pass
cb = partial(_cb)
callback_obj = d.add_timeout_callback(cb, 1)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
assert callback_obj.timeout == 1
def test_add_remove_next_tick_callback(self) -> None:
d = document.Document()
events = []
def listener(event):
events.append(event)
d.on_change(listener)
assert len(d.session_callbacks) == 0
assert not events
def cb(): pass
callback_obj = d.add_next_tick_callback(cb)
assert len(d.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == d.session_callbacks[0] == events[0].callback
d.remove_next_tick_callback(callback_obj)
assert len(d.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_periodic_callback_gets_curdoc(self) -> None:
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback_obj = d.add_periodic_callback(cb, 1)
callback_obj.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_timeout_callback_gets_curdoc(self) -> None:
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback_obj = d.add_timeout_callback(cb, 1)
callback_obj.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_next_tick_callback_gets_curdoc(self) -> None:
d = document.Document()
assert curdoc() is not d
curdoc_from_cb = []
def cb():
curdoc_from_cb.append(curdoc())
callback_obj = d.add_next_tick_callback(cb)
callback_obj.callback()
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_model_callback_gets_curdoc(self) -> None:
d = document.Document()
m = AnotherModelInTestDocument(bar=42)
d.add_root(m)
assert curdoc() is not d
curdoc_from_cb = []
def cb(attr, old, new):
curdoc_from_cb.append(curdoc())
m.on_change('bar', cb)
m.bar = 43
assert len(curdoc_from_cb) == 1
assert curdoc_from_cb[0] is d
def test_clear(self) -> None:
d = document.Document()
assert not d.roots
assert d.title == document.DEFAULT_TITLE
d.add_root(AnotherModelInTestDocument())
d.add_root(AnotherModelInTestDocument())
d.title = "Foo"
assert len(d.roots) == 2
assert d.title == "Foo"
d.clear()
assert not d.roots
assert len(d.models) == 0
assert d.title == "Foo" # do not reset title
def test_serialization_one_model(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument()
d.add_root(root1)
d.title = "Foo"
json = d.to_json()
copy = document.Document.from_json(json)
assert len(copy.roots) == 1
assert copy.title == "Foo"
def test_serialization_more_models(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
json = d.to_json()
copy = document.Document.from_json(json)
assert len(copy.roots) == 2
foos = []
for r in copy.roots:
foos.append(r.foo)
foos.sort()
assert [42,43] == foos
some_root = next(iter(copy.roots))
assert some_root.child.foo == 44
def test_serialization_data_models(self) -> None:
#obj0 = SomeDataModel()
#obj1 = DerivedDataModel(prop6=obj0)
#obj2 = CDSDerivedDataModel()
#obj3 = CDSDerivedDerivedDataModel()
doc = document.Document()
#doc.add_root(obj0)
#doc.add_root(obj1)
#doc.add_root(obj2)
#doc.add_root(obj3)
json = doc.to_json()
assert json["defs"] == [
ModelDef(
type="model",
name="test_document.SomeDataModel",
properties=[
PropertyDef(name="prop0", kind="Any", default=0),
PropertyDef(name="prop1", kind="Any", default=111),
PropertyDef(name="prop2", kind="Any", default=[1, 2, 3]),
],
),
ModelDef(
type="model",
name="test_document.DerivedDataModel",
extends=Ref(id=ID("test_document.SomeDataModel")),
properties=[
PropertyDef(name="prop3", kind="Any", default=0),
PropertyDef(name="prop4", kind="Any", default=112),
PropertyDef(name="prop5", kind="Any", default=[1, 2, 3, 4]),
PropertyDef(name="prop6", kind="Any"),
PropertyDef(name="prop7", kind="Any", default=None),
],
overrides=[
OverrideDef(name="prop2", default=[4, 5, 6]),
],
),
ModelDef(
type="model",
name="test_document.CDSDerivedDataModel",
extends=Ref(id=ID("ColumnDataSource")),
properties=[
PropertyDef(name="prop0", kind="Any", default=0),
PropertyDef(name="prop1", kind="Any", default=111),
PropertyDef(name="prop2", kind="Any", default=[1, 2, 3]),
],
overrides=[
OverrideDef(name="data", default=MapRep(type="map", entries=[("default_column", [4, 5, 6])])),
],
),
ModelDef(
type="model",
name="test_document.CDSDerivedDerivedDataModel",
extends=Ref(id=ID("test_document.CDSDerivedDataModel")),
properties=[
PropertyDef(
name="prop3",
kind="Any",
default=ObjectRefRep(
type="object",
name="test_document.SomeDataModel",
id=CDSDerivedDerivedDataModel.prop3.property._default.ref["id"],
attributes=dict(prop0=-1),
),
),
],
overrides=[
OverrideDef(name="data", default=MapRep(type="map", entries=[("default_column", [7, 8, 9])])),
],
),
]
# TODO: assert json["roots"]["references"] == ...
def test_serialization_has_version(self) -> None:
from bokeh import __version__
d = document.Document()
json = d.to_json()
assert json['version'] == __version__
def test_patch_integer_property(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
event1 = ModelChangedEvent(d, root1, 'foo', 57)
patch1 = patch_doc.create([event1]).content
d.apply_json_patch(patch1)
assert root1.foo == 57
event2 = ModelChangedEvent(d, child1, 'foo', 67)
patch2 = patch_doc.create([event2]).content
d.apply_json_patch(patch2)
assert child1.foo == 67
def test_patch_spec_property(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = ModelWithSpecInTestDocument(foo=42)
d.add_root(root1)
assert len(d.roots) == 1
def patch_test(new_value: Any):
event1 = ModelChangedEvent(d, root1, 'foo', new_value)
patch1 = patch_doc.create([event1]).content
d.apply_json_patch(patch1)
if isinstance(new_value, dict):
return root1.lookup('foo').get_value(root1)
else:
return root1.foo
assert patch_test(57) == 57
assert 'data' == root1.foo_units
assert patch_test(dict(value=58)) == Value(58)
assert 'data' == root1.foo_units
assert patch_test(dict(value=58, units='screen')) == Value(58, units='screen')
assert 'screen' == root1.foo_units
assert patch_test(dict(value=59, units='screen')) == Value(59, units='screen')
assert 'screen' == root1.foo_units
assert patch_test(dict(value=59, units='data')) == Value(59)
assert 'data' == root1.foo_units
assert patch_test(dict(value=60, units='data')) == Value(60)
assert 'data' == root1.foo_units
assert patch_test(dict(value=60, units='data')) == Value(60)
assert 'data' == root1.foo_units
assert patch_test(61) == 61
assert 'data' == root1.foo_units
root1.foo = "a_string" # so "woot" gets set as a string
assert patch_test("woot") == "woot"
assert 'data' == root1.foo_units
assert patch_test(dict(field="woot2")) == Field("woot2")
assert 'data' == root1.foo_units
assert patch_test(dict(field="woot2", units='screen')) == Field("woot2", units='screen')
assert 'screen' == root1.foo_units
assert patch_test(dict(field="woot3")) == Field("woot3", units="screen")
assert 'screen' == root1.foo_units
assert patch_test(dict(value=70)) == Value(70, units="screen")
assert 'screen' == root1.foo_units
root1.foo = 123 # so 71 gets set as a number
assert patch_test(71) == 71
assert 'screen' == root1.foo_units
def test_patch_reference_property(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
root2 = SomeModelInTestDocument(foo=43)
child1 = SomeModelInTestDocument(foo=44)
child2 = SomeModelInTestDocument(foo=45)
child3 = SomeModelInTestDocument(foo=46, child=child2)
root1.child = child1
root2.child = child1
d.add_root(root1)
d.add_root(root2)
assert len(d.roots) == 2
assert child1.id in d.models
assert child2.id not in d.models
assert child3.id not in d.models
assert d.models._new_models == {root1, root2, child1}
d.to_json() # clear new model queue
assert d.models._new_models == set()
event1 = ModelChangedEvent(d, root1, 'child', child3)
patch1 = patch_doc.create([event1]).content
d.apply_json_patch(patch1)
assert d.models._new_models == set()
assert root1.child.id == child3.id
assert root1.child.child.id == child2.id
assert child1.id in d.models
assert child2.id in d.models
assert child3.id in d.models
# put it back how it was before
event2 = ModelChangedEvent(d, root1, 'child', child1)
patch2 = patch_doc.create([event2]).content
d.apply_json_patch(patch2)
assert d.models._new_models == set()
assert root1.child.id == child1.id
assert root1.child.child is None
assert child1.id in d.models
assert child2.id not in d.models
assert child3.id not in d.models
def test_patch_two_properties_at_once(self) -> None:
d = document.Document()
assert not d.roots
assert len(d.models) == 0
root1 = SomeModelInTestDocument(foo=42)
child1 = SomeModelInTestDocument(foo=43)
root1.child = child1
d.add_root(root1)
assert len(d.roots) == 1
assert root1.child == child1
assert root1.foo == 42
assert root1.child.foo == 43
child2 = SomeModelInTestDocument(foo=44)
event1 = ModelChangedEvent(d, root1, 'foo', 57)
event2 = ModelChangedEvent(d, root1, 'child', child2)
patch1 = patch_doc.create([event1, event2]).content
d.apply_json_patch(patch1)
assert root1.foo == 57
assert root1.child.foo == 44
def test_patch_a_reference_with_implicit_reference_set(self) -> None:
m0 = SomeModelInTestDocument(foo=0, child=None)
m1 = SomeModelInTestDocument(foo=1, child=m0)
m2 = SomeModelInTestDocument(foo=2, child=m1)
m3 = SomeModelInTestDocument(foo=3, child=m2)
doc = document.Document()
doc.add_root(m3)
patch = PatchJson(
events=[
ModelChanged(
kind="ModelChanged",
model=m2.ref,
attr="child",
new=m0.ref,
),
],
references=[], # known models are not included by bokehjs to improve performance (e.g. reduce payload size)
)
assert m2.child == m1
doc.apply_json_patch(patch)
assert m2.child == m0
def test_patch_a_previously_known_reference(self, caplog: pytest.LogCaptureFixture) -> None:
m0 = SomeModelInTestDocument(foo=0)
m1 = SomeModelInTestDocument(foo=1, child=m0)
doc = document.Document()
doc.add_root(m1)
m1.child = None
patch = PatchJson(
events=[
ModelChanged(
kind="ModelChanged",
model=m0.ref,
attr="foo",
new=10,
),
],
references=[],
)
with caplog.at_level(logging.DEBUG):
assert len(caplog.records) == 0
doc.apply_json_patch(patch)
assert len(caplog.records) == 1
[msg0] = caplog.messages
assert m0.ref["id"] in msg0
assert m0.foo == 0
def test_patch_an_unknown_reference(self) -> None:
m0 = SomeModelInTestDocument(foo=0)
m1 = SomeModelInTestDocument(foo=1, child=None)
doc = document.Document()
doc.add_root(m1)
m1.child = None
patch = PatchJson(
events=[
ModelChanged(
kind="ModelChanged",
model=m0.ref,
attr="foo",
new=10,
),
],
references=[],
)
with pytest.raises(UnknownReferenceError):
doc.apply_json_patch(patch)
assert m0.foo == 0
# a more realistic set of models instead of fake models
def test_scatter(self) -> None:
import numpy as np
from bokeh.io.doc import set_curdoc
from bokeh.plotting import figure
d = document.Document()
set_curdoc(d)
assert not d.roots
assert len(d.models) == 0
p1 = figure(tools=[])
N = 10
x = np.linspace(0, 4 * np.pi, N)
y = np.sin(x)
p1.scatter(x, y, color="#FF00FF", nonselection_fill_color="#FFFF00", nonselection_fill_alpha=1)
# figure does not automatically add itself to the document
d.add_root(p1)
assert len(d.roots) == 1
def test_event_handles_new_callbacks_in_event_callback(self) -> None:
from bokeh.models import Button
d = document.Document()
button1 = Button(label="1")
button2 = Button(label="2")
def clicked_1():
button2.on_event('button_click', clicked_2)
d.add_root(button2)
def clicked_2():
pass
button1.on_event('button_click', clicked_1)
d.add_root(button1)
decoder = Deserializer(references=[button1])
event = decoder.decode(dict(
type="event",
name="button_click",
values=dict(model=dict(id=button1.id)),
))
try:
d.callbacks.trigger_event(event)
except RuntimeError:
pytest.fail("trigger_event probably did not copy models before modifying")
# TODO test serialize/deserialize with list-and-dict-valued properties
# TODO test replace_with_json
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# needed for caplog tests to function
basicConfig()
| TestDocument |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 2599,
"end": 14805
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(6320571663)
@pytest.mark.parametrize('k', [0.1, 1, 101])
@pytest.mark.parametrize('x', [0, 1, np.pi, 10, 100])
def test_vonmises_periodic(self, k, x):
def check_vonmises_pdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2 * np.pi * s)))
def check_vonmises_cdf_periodic(k, L, s, x):
vm = stats.vonmises(k, loc=L, scale=s)
assert_almost_equal(vm.cdf(x) % 1,
vm.cdf(x % (2 * np.pi * s)) % 1)
check_vonmises_pdf_periodic(k, 0, 1, x)
check_vonmises_pdf_periodic(k, 1, 1, x)
check_vonmises_pdf_periodic(k, 0, 10, x)
check_vonmises_cdf_periodic(k, 0, 1, x)
check_vonmises_cdf_periodic(k, 1, 1, x)
check_vonmises_cdf_periodic(k, 0, 10, x)
def test_vonmises_line_support(self):
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical(self):
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
# Expected values of the vonmises PDF were computed using
# mpmath with 50 digits of precision:
#
# def vmpdf_mp(x, kappa):
# x = mpmath.mpf(x)
# kappa = mpmath.mpf(kappa)
# num = mpmath.exp(kappa*mpmath.cos(x))
# den = 2 * mpmath.pi * mpmath.besseli(0, kappa)
# return num/den
@pytest.mark.parametrize('x, kappa, expected_pdf',
[(0.1, 0.01, 0.16074242744907072),
(0.1, 25.0, 1.7515464099118245),
(0.1, 800, 0.2073272544458798),
(2.0, 0.01, 0.15849003875385817),
(2.0, 25.0, 8.356882934278192e-16),
(2.0, 800, 0.0)])
def test_vonmises_pdf(self, x, kappa, expected_pdf):
pdf = stats.vonmises.pdf(x, kappa)
assert_allclose(pdf, expected_pdf, rtol=1e-15)
# Expected values of the vonmises entropy were computed using
# mpmath with 50 digits of precision:
#
# def vonmises_entropy(kappa):
# kappa = mpmath.mpf(kappa)
# return (-kappa * mpmath.besseli(1, kappa) /
# mpmath.besseli(0, kappa) + mpmath.log(2 * mpmath.pi *
# mpmath.besseli(0, kappa)))
# >>> float(vonmises_entropy(kappa))
@pytest.mark.parametrize('kappa, expected_entropy',
[(1, 1.6274014590199897),
(5, 0.6756431570114528),
(100, -0.8811275441649473),
(1000, -2.03468891852547),
(2000, -2.3813876496587847)])
def test_vonmises_entropy(self, kappa, expected_entropy):
entropy = stats.vonmises.entropy(kappa)
assert_allclose(entropy, expected_entropy, rtol=1e-13)
def test_vonmises_rvs_gh4598(self):
# check that random variates wrap around as discussed in gh-4598
seed = 30899520
rng1 = np.random.default_rng(seed)
rng2 = np.random.default_rng(seed)
rng3 = np.random.default_rng(seed)
rvs1 = stats.vonmises(1, loc=0, scale=1).rvs(random_state=rng1)
rvs2 = stats.vonmises(1, loc=2*np.pi, scale=1).rvs(random_state=rng2)
rvs3 = stats.vonmises(1, loc=0,
scale=(2*np.pi/abs(rvs1)+1)).rvs(random_state=rng3)
assert_allclose(rvs1, rvs2, atol=1e-15)
assert_allclose(rvs1, rvs3, atol=1e-15)
# Expected values of the vonmises LOGPDF were computed
# using wolfram alpha:
# kappa * cos(x) - log(2*pi*I0(kappa))
@pytest.mark.parametrize('x, kappa, expected_logpdf',
[(0.1, 0.01, -1.8279520246003170),
(0.1, 25.0, 0.5604990605420549),
(0.1, 800, -1.5734567947337514),
(2.0, 0.01, -1.8420635346185686),
(2.0, 25.0, -34.7182759850871489),
(2.0, 800, -1130.4942582548682739)])
def test_vonmises_logpdf(self, x, kappa, expected_logpdf):
logpdf = stats.vonmises.logpdf(x, kappa)
assert_allclose(logpdf, expected_logpdf, rtol=1e-15)
def test_vonmises_expect(self):
"""
Test that the vonmises expectation values are
computed correctly. This test checks that the
numeric integration estimates the correct normalization
(1) and mean angle (loc). These expectations are
independent of the chosen 2pi interval.
"""
rng = np.random.default_rng(6762668991392531563)
loc, kappa, lb = rng.random(3) * 10
res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1)
assert_allclose(res, 1)
assert np.issubdtype(res.dtype, np.floating)
bounds = lb, lb + 2 * np.pi
res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: 1, *bounds)
assert_allclose(res, 1)
assert np.issubdtype(res.dtype, np.floating)
bounds = lb, lb + 2 * np.pi
res = stats.vonmises(loc=loc, kappa=kappa).expect(lambda x: np.exp(1j*x),
*bounds, complex_func=1)
assert_allclose(np.angle(res), loc % (2*np.pi))
assert np.issubdtype(res.dtype, np.complexfloating)
@pytest.mark.xslow
@pytest.mark.parametrize("rvs_loc", [0, 2])
@pytest.mark.parametrize("rvs_shape", [1, 100, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_shape', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_shape,
fix_loc, fix_shape):
if fix_shape and fix_loc:
pytest.skip("Nothing to fit.")
rng = np.random.default_rng(6762668991392531563)
data = stats.vonmises.rvs(rvs_shape, size=1000, loc=rvs_loc,
random_state=rng)
kwds = {'fscale': 1}
if fix_loc:
kwds['floc'] = rvs_loc
if fix_shape:
kwds['f0'] = rvs_shape
_assert_less_or_close_loglike(stats.vonmises, data,
stats.vonmises.nnlf, **kwds)
@pytest.mark.slow
def test_vonmises_fit_bad_floc(self):
data = [-0.92923506, -0.32498224, 0.13054989, -0.97252014, 2.79658071,
-0.89110948, 1.22520295, 1.44398065, 2.49163859, 1.50315096,
3.05437696, -2.73126329, -3.06272048, 1.64647173, 1.94509247,
-1.14328023, 0.8499056, 2.36714682, -1.6823179, -0.88359996]
data = np.asarray(data)
loc = -0.5 * np.pi
kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data, floc=loc)
assert kappa_fit == np.finfo(float).tiny
_assert_less_or_close_loglike(stats.vonmises, data,
stats.vonmises.nnlf, fscale=1, floc=loc)
@pytest.mark.parametrize('sign', [-1, 1])
def test_vonmises_fit_unwrapped_data(self, sign):
rng = np.random.default_rng(6762668991392531563)
data = stats.vonmises(loc=sign*0.5*np.pi, kappa=10).rvs(100000,
random_state=rng)
shifted_data = data + 4*np.pi
kappa_fit, loc_fit, scale_fit = stats.vonmises.fit(data)
kappa_fit_shifted, loc_fit_shifted, _ = stats.vonmises.fit(shifted_data)
assert_allclose(loc_fit, loc_fit_shifted)
assert_allclose(kappa_fit, kappa_fit_shifted)
assert scale_fit == 1
assert -np.pi < loc_fit < np.pi
def test_vonmises_kappa_0_gh18166(self):
# Check that kappa = 0 is supported.
dist = stats.vonmises(0)
assert_allclose(dist.pdf(0), 1 / (2 * np.pi), rtol=1e-15)
assert_allclose(dist.cdf(np.pi/2), 0.75, rtol=1e-15)
assert_allclose(dist.sf(-np.pi/2), 0.75, rtol=1e-15)
assert_allclose(dist.ppf(0.9), np.pi*0.8, rtol=1e-15)
assert_allclose(dist.mean(), 0, atol=1e-15)
assert_allclose(dist.expect(), 0, atol=1e-15)
assert np.all(np.abs(dist.rvs(size=10, random_state=self.rng)) <= np.pi)
def test_vonmises_fit_equal_data(self):
# When all data are equal, expect kappa = 1e16.
kappa, loc, scale = stats.vonmises.fit([0])
assert kappa == 1e16 and loc == 0 and scale == 1
def test_vonmises_fit_bounds(self):
# For certain input data, the root bracket is violated numerically.
# Test that this situation is handled. The input data below are
# crafted to trigger the bound violation for the current choice of
# bounds and the specific way the bounds and the objective function
# are computed.
# Test that no exception is raised when the lower bound is violated.
scipy.stats.vonmises.fit([0, 3.7e-08], floc=0)
# Test that no exception is raised when the upper bound is violated.
scipy.stats.vonmises.fit([np.pi/2*(1-4.86e-9)], floc=0)
def _assert_less_or_close_loglike(dist, data, func=None, maybe_identical=False,
**kwds):
"""
This utility function checks that the negative log-likelihood function
(or `func`) of the result computed using dist.fit() is less than or equal
to the result computed using the generic fit method. Because of
normal numerical imprecision, the "equality" check is made using
`np.allclose` with a relative tolerance of 1e-15.
"""
if func is None:
func = dist.nnlf
mle_analytical = dist.fit(data, **kwds)
numerical_opt = super(type(dist), dist).fit(data, **kwds)
# Sanity check that the analytical MLE is actually executed.
# Due to floating point arithmetic, the generic MLE is unlikely
# to produce the exact same result as the analytical MLE.
if not maybe_identical:
assert np.any(mle_analytical != numerical_opt)
ll_mle_analytical = func(mle_analytical, data)
ll_numerical_opt = func(numerical_opt, data)
assert (ll_mle_analytical <= ll_numerical_opt or
np.allclose(ll_mle_analytical, ll_numerical_opt, rtol=1e-15))
# Ideally we'd check that shapes are correctly fixed, too, but that is
# complicated by the many ways of fixing them (e.g. f0, fix_a, fa).
if 'floc' in kwds:
assert mle_analytical[-2] == kwds['floc']
if 'fscale' in kwds:
assert mle_analytical[-1] == kwds['fscale']
def assert_fit_warnings(dist):
param = ['floc', 'fscale']
if dist.shapes:
nshapes = len(dist.shapes.split(","))
param += ['f0', 'f1', 'f2'][:nshapes]
all_fixed = dict(zip(param, np.arange(len(param))))
data = [1, 2, 3]
with pytest.raises(RuntimeError,
match="All parameters fixed. There is nothing "
"to optimize."):
dist.fit(data, **all_fixed)
with pytest.raises(ValueError,
match="The data contains non-finite values"):
dist.fit([np.nan])
with pytest.raises(ValueError,
match="The data contains non-finite values"):
dist.fit([np.inf])
with pytest.raises(TypeError, match="Unknown keyword arguments:"):
dist.fit(data, extra_keyword=2)
with pytest.raises(TypeError, match="Too many positional arguments."):
dist.fit(data, *[1]*(len(param) - 1))
@pytest.mark.parametrize('dist',
['alpha', 'betaprime',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gibrat',
'powerlognorm', 'rayleigh', 'wald'])
def test_support(dist):
"""gh-6235"""
dct = dict(distcont)
args = dct[dist]
dist = getattr(stats, dist)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
| TestVonMises |
python | django__django | tests/custom_pk/fields.py | {
"start": 491,
"end": 1618
} | class ____(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 10
super().__init__(*args, **kwargs)
def pre_save(self, instance, add):
value = getattr(instance, self.attname, None)
if not value:
value = MyWrapper("".join(random.sample(string.ascii_lowercase, 10)))
setattr(instance, self.attname, value)
return value
def to_python(self, value):
if not value:
return
if not isinstance(value, MyWrapper):
value = MyWrapper(value)
return value
def from_db_value(self, value, expression, connection):
if not value:
return
return MyWrapper(value)
def get_db_prep_save(self, value, connection):
if not value:
return
if isinstance(value, MyWrapper):
return str(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not value:
return
if isinstance(value, MyWrapper):
return str(value)
return value
| MyWrapperField |
python | oauthlib__oauthlib | tests/openid/connect/core/grant_types/test_dispatchers.py | {
"start": 4101,
"end": 4780
} | class ____(DispatcherTest):
def setUp(self):
super().setUp()
self.request_validator.get_authorization_code_scopes.return_value = ('hello', 'world')
self.dispatcher = AuthorizationTokenGrantDispatcher(
self.request_validator,
default_grant=self.auth_grant,
oidc_grant=self.openid_connect_auth
)
def test_create_token_response_oauth(self):
handler = self.dispatcher._handler_for_request(self.request)
self.assertIsInstance(handler, OAuth2AuthorizationCodeGrant)
self.assertTrue(self.dispatcher.request_validator.get_authorization_code_scopes.called)
| AuthTokenGrantDispatcherOAuthTest |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 6720,
"end": 6823
} | class ____(SecurityWarning):
"""Warned when system time is suspected to be wrong"""
| SystemTimeWarning |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 9834,
"end": 11462
} | class ____(PyObject):
"""Description of an attribute."""
option_spec: ClassVar[OptionSpec] = PyObject.option_spec.copy()
option_spec.update({
'type': directives.unchanged,
'value': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(
typ,
'',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(),
*annotations,
)
value = self.options.get('value')
if value:
signode += addnodes.desc_annotation(
value,
'',
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '='),
addnodes.desc_sig_space(),
nodes.Text(value),
)
return fullname, prefix
def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str:
name, _cls = name_cls
try:
clsname, attrname = name.rsplit('.', 1)
if modname and self.config.add_module_names:
clsname = f'{modname}.{clsname}'
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
return _('%s (%s attribute)') % (attrname, clsname)
| PyAttribute |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/utils/__init__.py | {
"start": 1615,
"end": 4470
} | class ____(threading.Thread):
""" Convenience class for creating stoppable threads. """
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
self._stopped_event = Event()
@property
def stopped_event(self):
return self._stopped_event
def should_keep_running(self):
"""Determines whether the thread should continue running."""
return not self._stopped_event.is_set()
def on_thread_stop(self):
"""Override this method instead of :meth:`stop()`.
:meth:`stop()` calls this method.
This method is called immediately after the thread is signaled to stop.
"""
pass
def stop(self):
"""Signals the thread to stop."""
self._stopped_event.set()
self.on_thread_stop()
def on_thread_start(self):
"""Override this method instead of :meth:`start()`. :meth:`start()`
calls this method.
This method is called right before this thread is started and this
object’s run() method is invoked.
"""
pass
def start(self):
self.on_thread_start()
threading.Thread.start(self)
def load_module(module_name):
"""Imports a module given its name and returns a handle to it."""
try:
__import__(module_name)
except ImportError:
raise ImportError('No module named %s' % module_name)
return sys.modules[module_name]
def load_class(dotted_path):
"""Loads and returns a class definition provided a dotted path
specification the last part of the dotted path is the class name
and there is at least one module name preceding the class name.
Notes:
You will need to ensure that the module you are trying to load
exists in the Python path.
Examples:
- module.name.ClassName # Provided module.name is in the Python path.
- module.ClassName # Provided module is in the Python path.
What won't work:
- ClassName
- modle.name.ClassName # Typo in module name.
- module.name.ClasNam # Typo in classname.
"""
dotted_path_split = dotted_path.split('.')
if len(dotted_path_split) > 1:
klass_name = dotted_path_split[-1]
module_name = '.'.join(dotted_path_split[:-1])
module = load_module(module_name)
if has_attribute(module, klass_name):
klass = getattr(module, klass_name)
return klass
# Finally create and return an instance of the class
# return klass(*args, **kwargs)
else:
raise AttributeError('Module %s does not have class attribute %s' % (
module_name, klass_name))
else:
raise ValueError(
'Dotted module path %s must contain a module name and a classname' % dotted_path)
| BaseThread |
python | simplejson__simplejson | simplejson/tests/test_for_json.py | {
"start": 447,
"end": 2767
} | class ____(unittest.TestCase):
def assertRoundTrip(self, obj, other, for_json=True):
if for_json is None:
# None will use the default
s = json.dumps(obj)
else:
s = json.dumps(obj, for_json=for_json)
self.assertEqual(
json.loads(s),
other)
def test_for_json_encodes_stand_alone_object(self):
self.assertRoundTrip(
ForJson(),
ForJson().for_json())
def test_for_json_encodes_object_nested_in_dict(self):
self.assertRoundTrip(
{'hooray': ForJson()},
{'hooray': ForJson().for_json()})
def test_for_json_encodes_object_nested_in_list_within_dict(self):
self.assertRoundTrip(
{'list': [0, ForJson(), 2, 3]},
{'list': [0, ForJson().for_json(), 2, 3]})
def test_for_json_encodes_object_nested_within_object(self):
self.assertRoundTrip(
NestedForJson(),
{'nested': {'for_json': 1}})
def test_for_json_encodes_list(self):
self.assertRoundTrip(
ForJsonList(),
ForJsonList().for_json())
def test_for_json_encodes_list_within_object(self):
self.assertRoundTrip(
{'nested': ForJsonList()},
{'nested': ForJsonList().for_json()})
def test_for_json_encodes_dict_subclass(self):
self.assertRoundTrip(
DictForJson(a=1),
DictForJson(a=1).for_json())
def test_for_json_encodes_list_subclass(self):
self.assertRoundTrip(
ListForJson(['l']),
ListForJson(['l']).for_json())
def test_for_json_ignored_if_not_true_with_dict_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
DictForJson(a=1),
{'a': 1},
for_json=for_json)
def test_for_json_ignored_if_not_true_with_list_subclass(self):
for for_json in (None, False):
self.assertRoundTrip(
ListForJson(['l']),
['l'],
for_json=for_json)
def test_raises_typeerror_if_for_json_not_true_with_object(self):
self.assertRaises(TypeError, json.dumps, ForJson())
self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False)
| TestForJson |
python | pytorch__pytorch | benchmarks/dynamo/common.py | {
"start": 47832,
"end": 53940
} | class ____:
cache: dict[weakref.ref, Any] = {}
@classmethod
def load(cls, model, example_inputs):
key = weakref.ref(model)
if key not in cls.cache:
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
if example_args:
jit_traced_module = torch.jit.trace(
model, example_inputs=example_args, strict=False
)
else:
jit_traced_module = torch.jit.trace(
model, example_kwarg_inputs=example_kwargs, strict=False
)
cls.cache[key] = jit_traced_module
return cls.cache[key]
def export(model, example_inputs):
from torch.export.dynamic_shapes import _combine_args, _tree_map_with_path
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
example_outputs = model(*example_args, **example_kwargs)
_register_dataclass_output_as_pytree(example_outputs)
combined_args = _combine_args(model, example_args, example_kwargs)
dynamic_shapes = _tree_map_with_path(
_produce_dynamic_shapes_for_export, combined_args
)
# NOTE: if args.export is ever enabled for --performance mode (rather than solely
# --accuracy), we'll need to clone the model and subtract out extra memory usage, as
# done in AOTInductorModelCache.
ep = torch.export.export(
model, example_args, example_kwargs, dynamic_shapes=dynamic_shapes, strict=True
)
def opt_export(_, example_inputs):
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
return ep.module()(*example_args, **example_kwargs)
return opt_export
def aot_precompile(model, example_inputs):
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as f:
save_path = f.name
with fresh_cache(), torch._dynamo.config.patch("enable_aot_compile", True):
compiled_fn = torch.compile(
model,
fullgraph=True,
options={"guard_filter_fn": lambda guards: [False for _ in guards]},
).forward.aot_compile((example_args, example_kwargs))
compiled_fn.save_compiled_function(save_path)
torch._dynamo.reset()
with open(save_path, "rb") as f:
load_start_time = time.perf_counter()
loaded_fn = torch.compiler.load_compiled_function(f)
load_end_time = time.perf_counter()
print(
f"AOT Precompile loading time: {load_end_time - load_start_time} seconds"
)
def opt_aot_precompile(_, example_inputs, collect_outputs=False):
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
return loaded_fn(model, *example_args, **example_kwargs)
return opt_aot_precompile
def export_nativert(model, example_inputs):
optimized = NativeRTCache.load(model, example_inputs)
def opt_nativert(_, example_inputs, collect_outputs=False):
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
return optimized.run(*example_args, **example_kwargs)
return opt_nativert
def export_aot_inductor(model, example_inputs, mode):
optimized = AOTInductorModelCache.load(model, example_inputs, mode)
def opt_aot_inductor(_, example_inputs, collect_outputs=False):
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
return optimized(*example_args, **example_kwargs)
return opt_aot_inductor
def torchscript_jit_trace(model, example_inputs):
optimized = JitTracedCache.load(model, example_inputs)
def opt_jit_trace(_, example_inputs, collect_outputs=False):
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
return optimized(*example_args, **example_kwargs)
return opt_jit_trace
def download_retry_decorator(download_fn):
"""
Decorator function for applying retry logic to a download function.
The wrapped function will be called up to 5 times and raises an exception if the function fails each time.
After each unsuccessful attempt, there is a delay before the next attempt, which is increased linearly with the number of tries.
Usage:
@download_retry_decorator
def download_function(model_name: str):
# download logic goes here
"""
@functools.wraps(download_fn)
def wrapper(self, *args, **kwargs) -> Any:
tries = 0
total_allowed_tries = MAX_DOWNLOAD_ATTEMPTS
while tries <= total_allowed_tries:
try:
model = download_fn(self, *args, **kwargs)
return model
except Exception as e:
tries += 1
if tries <= total_allowed_tries:
wait = tries * 30
print(
f"Failed to load model: {e}. Trying again ({tries}/{total_allowed_tries}) after {wait}s"
)
time.sleep(wait)
else:
raise RuntimeError( # noqa: B904
f"Failed to load model '{args}' with following error(s): {str(e)}."
)
return wrapper
def read_batch_size_from_file(args, filename, model_name):
batch_size = None
if os.path.exists("benchmarks"):
filename = os.path.join("benchmarks", filename)
assert os.path.exists(filename), filename
with open(filename) as f:
lines = f.readlines()
lines = [i.split(",") for i in lines if len(i.strip()) > 0]
for val in lines:
cur_name, b = val
if model_name == cur_name:
batch_size = int(b)
if batch_size is None:
log.warning("Could not find batch size for %s", model_name)
elif batch_size == -1:
raise RuntimeError(
f"Batch size is unset for {model_name} in {args.batch_size_file}"
)
print(f"batch size: {batch_size}")
return batch_size
| JitTracedCache |
python | redis__redis-py | tests/test_credentials.py | {
"start": 9577,
"end": 19731
} | class ____:
@pytest.mark.parametrize(
"credential_provider",
[
{
"cred_provider_class": EntraIdCredentialsProvider,
"cred_provider_kwargs": {"expiration_refresh_ratio": 0.00005},
"mock_idp": True,
}
],
indirect=True,
)
def test_re_auth_all_connections(self, credential_provider):
mock_connection = Mock(spec=ConnectionInterface)
mock_connection.retry = Retry(NoBackoff(), 0)
mock_another_connection = Mock(spec=ConnectionInterface)
mock_pool = Mock(spec=ConnectionPool)
mock_pool.connection_kwargs = {
"credential_provider": credential_provider,
}
mock_pool.get_connection.return_value = mock_connection
mock_pool._available_connections = [mock_connection, mock_another_connection]
mock_pool._lock = threading.RLock()
auth_token = None
def re_auth_callback(token):
nonlocal auth_token
auth_token = token
with mock_pool._lock:
for conn in mock_pool._available_connections:
conn.send_command("AUTH", token.try_get("oid"), token.get_value())
conn.read_response()
mock_pool.re_auth_callback = re_auth_callback
Redis(
connection_pool=mock_pool,
credential_provider=credential_provider,
)
credential_provider.get_credentials()
sleep(0.5)
mock_connection.send_command.assert_has_calls(
[call("AUTH", auth_token.try_get("oid"), auth_token.get_value())]
)
mock_another_connection.send_command.assert_has_calls(
[call("AUTH", auth_token.try_get("oid"), auth_token.get_value())]
)
@pytest.mark.parametrize(
"credential_provider",
[
{
"cred_provider_class": EntraIdCredentialsProvider,
"cred_provider_kwargs": {"expiration_refresh_ratio": 0.00005},
"mock_idp": True,
}
],
indirect=True,
)
def test_re_auth_partial_connections(self, credential_provider):
mock_connection = Mock(spec=ConnectionInterface)
mock_connection.retry = Retry(NoBackoff(), 3)
mock_another_connection = Mock(spec=ConnectionInterface)
mock_another_connection.retry = Retry(NoBackoff(), 3)
mock_failed_connection = Mock(spec=ConnectionInterface)
mock_failed_connection.read_response.side_effect = ConnectionError(
"Failed auth"
)
mock_failed_connection.retry = Retry(NoBackoff(), 3)
mock_pool = Mock(spec=ConnectionPool)
mock_pool.connection_kwargs = {
"credential_provider": credential_provider,
}
mock_pool.get_connection.return_value = mock_connection
mock_pool._available_connections = [
mock_connection,
mock_another_connection,
mock_failed_connection,
]
mock_pool._lock = threading.RLock()
def _raise(error: RedisError):
pass
def re_auth_callback(token):
with mock_pool._lock:
for conn in mock_pool._available_connections:
conn.retry.call_with_retry(
lambda: conn.send_command(
"AUTH", token.try_get("oid"), token.get_value()
),
lambda error: _raise(error),
)
conn.retry.call_with_retry(
lambda: conn.read_response(), lambda error: _raise(error)
)
mock_pool.re_auth_callback = re_auth_callback
Redis(
connection_pool=mock_pool,
credential_provider=credential_provider,
)
credential_provider.get_credentials()
sleep(0.5)
mock_connection.read_response.assert_has_calls([call()])
mock_another_connection.read_response.assert_has_calls([call()])
mock_failed_connection.read_response.assert_has_calls([call(), call(), call()])
@pytest.mark.parametrize(
"credential_provider",
[
{
"cred_provider_class": EntraIdCredentialsProvider,
"cred_provider_kwargs": {"expiration_refresh_ratio": 0.00005},
"mock_idp": True,
}
],
indirect=True,
)
def test_re_auth_pub_sub_in_resp3(self, credential_provider):
mock_pubsub_connection = Mock(spec=ConnectionInterface)
mock_pubsub_connection.get_protocol.return_value = 3
mock_pubsub_connection.should_reconnect = Mock(return_value=False)
mock_pubsub_connection.credential_provider = credential_provider
mock_pubsub_connection.retry = Retry(NoBackoff(), 3)
mock_another_connection = Mock(spec=ConnectionInterface)
mock_another_connection.retry = Retry(NoBackoff(), 3)
mock_pool = Mock(spec=ConnectionPool)
mock_pool.connection_kwargs = {
"credential_provider": credential_provider,
}
mock_pool.get_connection.side_effect = [
mock_pubsub_connection,
mock_another_connection,
]
mock_pool._available_connections = [mock_another_connection]
mock_pool._lock = threading.RLock()
auth_token = None
def re_auth_callback(token):
nonlocal auth_token
auth_token = token
with mock_pool._lock:
for conn in mock_pool._available_connections:
conn.send_command("AUTH", token.try_get("oid"), token.get_value())
conn.read_response()
mock_pool.re_auth_callback = re_auth_callback
r = Redis(
connection_pool=mock_pool,
credential_provider=credential_provider,
)
p = r.pubsub()
p.subscribe("test")
credential_provider.get_credentials()
sleep(0.5)
mock_pubsub_connection.send_command.assert_has_calls(
[
call("SUBSCRIBE", "test", check_health=True),
call("AUTH", auth_token.try_get("oid"), auth_token.get_value()),
]
)
mock_another_connection.send_command.assert_has_calls(
[call("AUTH", auth_token.try_get("oid"), auth_token.get_value())]
)
@pytest.mark.parametrize(
"credential_provider",
[
{
"cred_provider_class": EntraIdCredentialsProvider,
"cred_provider_kwargs": {"expiration_refresh_ratio": 0.00005},
"mock_idp": True,
}
],
indirect=True,
)
def test_do_not_re_auth_pub_sub_in_resp2(self, credential_provider):
mock_pubsub_connection = Mock(spec=ConnectionInterface)
mock_pubsub_connection.get_protocol.return_value = 2
mock_pubsub_connection.should_reconnect = Mock(return_value=False)
mock_pubsub_connection.credential_provider = credential_provider
mock_pubsub_connection.retry = Retry(NoBackoff(), 3)
mock_another_connection = Mock(spec=ConnectionInterface)
mock_another_connection.retry = Retry(NoBackoff(), 3)
mock_pool = Mock(spec=ConnectionPool)
mock_pool.connection_kwargs = {
"credential_provider": credential_provider,
}
mock_pool.get_connection.side_effect = [
mock_pubsub_connection,
mock_another_connection,
]
mock_pool._available_connections = [mock_another_connection]
mock_pool._lock = threading.RLock()
auth_token = None
def re_auth_callback(token):
nonlocal auth_token
auth_token = token
with mock_pool._lock:
for conn in mock_pool._available_connections:
conn.send_command("AUTH", token.try_get("oid"), token.get_value())
conn.read_response()
mock_pool.re_auth_callback = re_auth_callback
r = Redis(
connection_pool=mock_pool,
credential_provider=credential_provider,
)
p = r.pubsub()
p.subscribe("test")
credential_provider.get_credentials()
sleep(0.5)
mock_pubsub_connection.send_command.assert_has_calls(
[
call("SUBSCRIBE", "test", check_health=True),
]
)
mock_another_connection.send_command.assert_has_calls(
[call("AUTH", auth_token.try_get("oid"), auth_token.get_value())]
)
@pytest.mark.parametrize(
"credential_provider",
[
{
"cred_provider_class": EntraIdCredentialsProvider,
"cred_provider_kwargs": {"expiration_refresh_ratio": 0.00005},
"mock_idp": True,
}
],
indirect=True,
)
def test_fails_on_token_renewal(self, credential_provider):
credential_provider._token_mgr._idp.request_token.side_effect = [
RequestTokenErr,
RequestTokenErr,
RequestTokenErr,
RequestTokenErr,
]
mock_connection = Mock(spec=ConnectionInterface)
mock_connection.retry = Retry(NoBackoff(), 0)
mock_another_connection = Mock(spec=ConnectionInterface)
mock_pool = Mock(spec=ConnectionPool)
mock_pool.connection_kwargs = {
"credential_provider": credential_provider,
}
mock_pool.get_connection.return_value = mock_connection
mock_pool._available_connections = [mock_connection, mock_another_connection]
mock_pool._lock = threading.RLock()
Redis(
connection_pool=mock_pool,
credential_provider=credential_provider,
)
with pytest.raises(RequestTokenErr):
credential_provider.get_credentials()
@pytest.mark.onlynoncluster
@pytest.mark.cp_integration
@pytest.mark.skipif(not EntraIdCredentialsProvider, reason="requires redis-entraid")
| TestStreamingCredentialProvider |
python | numba__numba | numba/core/types/misc.py | {
"start": 3255,
"end": 3455
} | class ____(Dummy):
def __init__(self, pymod):
self.pymod = pymod
super(Module, self).__init__("Module(%s)" % pymod)
@property
def key(self):
return self.pymod
| Module |
python | python__mypy | mypy/nodes.py | {
"start": 86809,
"end": 87401
} | class ____(Expression):
"""Conditional expression (e.g. x if y else z)"""
__slots__ = ("cond", "if_expr", "else_expr")
__match_args__ = ("if_expr", "cond", "else_expr")
cond: Expression
if_expr: Expression
else_expr: Expression
def __init__(self, cond: Expression, if_expr: Expression, else_expr: Expression) -> None:
super().__init__()
self.cond = cond
self.if_expr = if_expr
self.else_expr = else_expr
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_conditional_expr(self)
| ConditionalExpr |
python | ansible__ansible | test/integration/targets/inventory/inventory_plugins/constructed_with_hostvars.py | {
"start": 685,
"end": 2039
} | class ____(BaseInventoryPlugin, Constructable):
NAME = 'constructed_with_hostvars'
def verify_file(self, path):
return super(InventoryModule, self).verify_file(path) and path.endswith(('constructed.yml', 'constructed.yaml'))
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path, cache)
config = self._read_config_data(path)
with _testing.hard_fail_context("ensure config defaults are trusted and runnable as expressions") as ctx:
ctx.check(self._compose(self.get_option('plugin_expression'), variables={}) == 2)
ctx.check(self._compose(self.get_option('fragment_expression'), variables={}) == 4)
strict = self.get_option('strict')
try:
for host in inventory.hosts:
hostvars = {}
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict, fetch_hostvars=True)
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict, fetch_hostvars=True)
except Exception as ex:
raise AnsibleParserError(f"Failed to parse {path}.") from ex
| InventoryModule |
python | pytorch__pytorch | test/inductor/test_aot_inductor.py | {
"start": 280734,
"end": 288075
} | class ____(TestCase):
def test_no_compile_standalone(self):
with config.patch({"aot_inductor_mode.compile_standalone": False}):
result = maybe_aoti_standalone_config({})
self.assertEqual(result, {})
def test_compile_standalone_sets_package_cpp(self):
result = maybe_aoti_standalone_config(
{"aot_inductor_mode.compile_standalone": True}
)
self.assertEqual(result["aot_inductor.package_cpp_only"], True)
self.assertEqual(result["aot_inductor_mode.compile_standalone"], True)
self.assertEqual(result["aot_inductor.embed_kernel_binary"], True)
self.assertEqual(
result["aot_inductor.emit_multi_arch_kernel"], not torch.version.hip
)
self.assertEqual(
result["aot_inductor.model_name_for_generated_files"], "aoti_model"
)
self.assertEqual(result["aot_inductor.dynamic_linkage"], False)
def test_compile_standalone_explicit_set(self):
patches = {
"aot_inductor_mode.compile_standalone": True,
"aot_inductor.package_cpp_only": True,
"aot_inductor.embed_kernel_binary": True,
"aot_inductor.dynamic_linkage": False,
"aot_inductor.link_libtorch": False,
"aot_inductor.emit_multi_arch_kernel": not torch.version.hip,
"aot_inductor.model_name_for_generated_files": "aoti_model",
}
result = maybe_aoti_standalone_config(patches)
self.assertEqual(result, patches)
def test_compile_standalone_package_cpp_false_raises(self):
patches = {
"aot_inductor_mode.compile_standalone": True,
"aot_inductor.package_cpp_only": False,
}
with self.assertRaises(RuntimeError):
maybe_aoti_standalone_config(patches)
with config.patch({"aot_inductor.package_cpp_only": False}):
patches = {
"aot_inductor_mode.compile_standalone": True,
}
with self.assertRaises(RuntimeError):
maybe_aoti_standalone_config(patches)
def test_compile_standalone_cross_compile_windows_package_format(self):
patches = {
"aot_inductor.cross_target_platform": "windows",
"aot_inductor.package_constants_in_so": True,
}
with self.assertRaises(RuntimeError):
maybe_aoti_standalone_config(patches)
common_utils.instantiate_parametrized_tests(AOTInductorTestsTemplate)
def fail_cpu(is_skip=False):
return TestFailure(
("cpu",),
is_skip=is_skip,
)
def fail_mps(is_skip=False):
return TestFailure(
("mps",),
is_skip=is_skip,
)
def fail_gpu(suffixes: tuple[str, ...], is_skip=False):
return TestFailure(
suffixes,
is_skip=is_skip,
)
# test_failures, xfail by default, set is_skip=True to skip
CPU_TEST_FAILURES = {
# TODO: failed internally
"test_multiple_output_alias": fail_cpu(is_skip=True),
}
# test_failures, xfail by default, set is_skip=True to skip
GPU_TEST_FAILURES = {
# quantized unsupported for GPU
"test_quantized_linear": fail_gpu(("cuda", "xpu")),
"test_quanatized_int8_linear": fail_gpu(("cuda", "xpu")),
"test_quantized_linear_bias_none": fail_gpu(("cuda", "xpu")),
# No scaled_dot_product_efficient_attention implementation for XPU yet.
"test_scaled_dot_product_efficient_attention": fail_gpu(("xpu",)),
}
MPS_TEST_FAILURES = {
# aten::_scaled_dot_product_efficient_attention is not currently implemented for the MPS device.
"test_scaled_dot_product_efficient_attention": fail_mps(),
# aten::_int_mm is not implemented for MPS backend
"test__int_mm": fail_mps(),
# MPS doesn't support float64
"test_while_loop_with_conv_dynamic_True": fail_mps(),
"test_while_loop_with_conv_dynamic_False": fail_mps(),
# MPS doesn't support float8
"test_fp8": fail_mps(),
"test_fp8_view_of_param": fail_mps(),
# cannot initialize a parameter of type 'double' with an rvalue of type 'std::nullptr_t'
"test_fallback_kernel_with_symexpr_output": fail_mps(),
# correctness issue
"test_index_put_with_none_index": fail_mps(),
# Error device may not be nil
"test_zero_size_weight": fail_mps(is_skip=True),
# MPSGraph does not support tensor dims > INT_MAX
"test_upper_bound_i64": fail_mps(is_skip=True),
# MPS doesn't support triton
"test_autotuning_args_reuse": fail_mps(),
"test_triton_autotuning": fail_mps(),
"test_triton_dynamic_launcher_grid": fail_mps(),
"test_triton_dynamic_launcher_grid_infer_from_tensor": fail_mps(),
"test_triton_kernel_on_device_tma_dynamic_False_tma_version_new": fail_mps(),
"test_triton_kernel_on_device_tma_dynamic_False_tma_version_old": fail_mps(),
"test_triton_kernel_on_device_tma_dynamic_True_tma_version_new": fail_mps(),
"test_triton_kernel_on_device_tma_dynamic_True_tma_version_old": fail_mps(),
"test_size_with_unbacked_add_expr_transitive": fail_mps(),
"test_size_with_unbacked_add_and_mul_expr": fail_mps(),
"test_triton_next_power_of_2": fail_mps(),
"test_sympy_cpp_printer_min_max_minmax0": fail_mps(),
"test_sympy_cpp_printer_min_max_minmax1": fail_mps(),
"test_triton_kernel_dynamic_shape_with_div": fail_mps(),
"test_triton_kernel_reinterpret_view": fail_mps(),
"test_triton_kernel_tma_descriptor_1d_dynamic_False_tma_version_new_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_1d_dynamic_False_tma_version_old_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_1d_dynamic_True_tma_version_new_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_1d_dynamic_True_tma_version_old_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_2d_dynamic_False_tma_version_new_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_2d_dynamic_False_tma_version_old_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_2d_dynamic_True_tma_version_new_mps": fail_mps(),
"test_triton_kernel_tma_descriptor_2d_dynamic_True_tma_version_old_mps": fail_mps(),
"test_triton_kernel_sympy_expr_arg": fail_mps(),
"test_triton_kernel_sympy_fn_like_arg": fail_mps(),
"test_triton_kernel_with_none_input": fail_mps(),
"test_triton_kernel_equal_to_1_arg": fail_mps(),
"test_triton_kernel_with_none_inputs_and_equal_to_1_arg": fail_mps(),
"test_triton_kernel_equal_to_1_float_arg_dynamic_True": fail_mps(),
"test_triton_kernel_equal_to_1_float_arg_dynamic_False": fail_mps(),
"test_triton_kernel_weird_param_order": fail_mps(),
"test_triton_kernel_dynamic_grid": fail_mps(),
"test_repeated_user_defined_triton_kernel_embed_kernel_binary_False": fail_mps(),
"test_repeated_user_defined_triton_kernel_embed_kernel_binary_True": fail_mps(),
"test_triton_kernel_extern_kernel_arg": fail_mps(),
"test_triton_kernel_multi_output_arg": fail_mps(),
"test_triton_kernel_reinterpret_view_mem_leak": fail_mps(),
"test_triton_mutated_autotuning": fail_mps(),
"test_sym_i64_input_codegen": fail_mps(),
"test_none_args_aot_codegen": fail_mps(),
"test_aoti_debug_printer_sym_inputs": fail_mps(),
"test_aoti_debug_printer_user_defined_triton_kernel": fail_mps(),
"test_autotune_int64_user_defined_triton_kernel": fail_mps(),
}
| TestAOTInductorConfig |
python | huggingface__transformers | tests/models/mbart50/test_tokenization_mbart50.py | {
"start": 2992,
"end": 8857
} | class ____(unittest.TestCase):
checkpoint_name = "facebook/mbart-large-50-one-to-many-mmt"
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
tgt_text = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
expected_src_tokens = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def setUpClass(cls):
cls.tokenizer: MBart50Tokenizer = MBart50Tokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO"
)
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"], 250038)
def test_tokenizer_batch_encode_plus(self):
ids = self.tokenizer(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_tokenizer_decode_ignores_language_codes(self):
self.assertIn(RO_CODE, self.tokenizer.all_special_ids)
generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_romanian)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_tokenizer_truncation(self):
src_text = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], str)
desired_max_length = 10
ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0]
self.assertEqual(ids[0], EN_CODE)
self.assertEqual(ids[-1], 2)
self.assertEqual(len(ids), desired_max_length)
def test_mask_token(self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250053, 250001])
def test_special_tokens_unaffacted_by_save_load(self):
tmpdirname = tempfile.mkdtemp()
original_special_tokens = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(tmpdirname)
new_tok = MBart50Tokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens)
@require_torch
def test_batch_fairseq_parity(self):
batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt")
batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def test_tokenizer_prepare_batch(self):
batch = self.tokenizer(
self.src_text,
text_target=self.tgt_text,
padding=True,
truncation=True,
max_length=len(self.expected_src_tokens),
return_tensors="pt",
)
batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, result)
self.assertEqual(2, batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
def test_seq2seq_max_target_length(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt")
targets = self.tokenizer(
text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt"
)
labels = targets["input_ids"]
batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR"
)
self.assertEqual(
nested_simplify(inputs),
{
# en_XX, A, test, EOS
"input_ids": [[250004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
},
)
| MBart50OneToManyIntegrationTest |
python | doocs__leetcode | solution/0900-0999/0971.Flip Binary Tree To Match Preorder Traversal/Solution.py | {
"start": 192,
"end": 864
} | class ____:
def flipMatchVoyage(self, root: Optional[TreeNode], voyage: List[int]) -> List[int]:
def dfs(root):
nonlocal i, ok
if root is None or not ok:
return
if root.val != voyage[i]:
ok = False
return
i += 1
if root.left is None or root.left.val == voyage[i]:
dfs(root.left)
dfs(root.right)
else:
ans.append(root.val)
dfs(root.right)
dfs(root.left)
ans = []
i = 0
ok = True
dfs(root)
return ans if ok else [-1]
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position05.py | {
"start": 315,
"end": 872
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"object_position": 2})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | nedbat__coveragepy | tests/test_sqlitedb.py | {
"start": 634,
"end": 4852
} | class ____(CoverageTest):
"""Tests of tricky parts of SqliteDb."""
def test_error_reporting(self) -> None:
msg = "Couldn't use data file 'test.db': no such table: bar"
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
with pytest.raises(DataError, match=msg):
with db.execute("select foo from bar"):
# Entering the context manager raises the error, this line doesn't run:
pass # pragma: not covered
def test_retry_execute(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
with db.execute("select first from name order by 1") as cur:
assert list(cur) == [("pablo",)]
def test_retry_execute_failure(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT"), RuntimeError("Fake")])
with mock.patch.object(db, "con", proxy):
with pytest.raises(RuntimeError, match="Fake"):
with db.execute("select first from name order by 1"):
# Entering the context manager raises the error, this line doesn't run:
pass # pragma: not covered
def test_retry_executemany_void(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "executemany", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
db.executemany_void(
"insert into name (first, last) values (?, ?)",
[("vincent", "van gogh")],
)
with db.execute("select first from name order by 1") as cur:
assert list(cur) == [("pablo",), ("vincent",)]
def test_retry_executemany_void_failure(self) -> None:
with SqliteDb("test.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "executemany", [Exception("WUT"), RuntimeError("Fake")])
with mock.patch.object(db, "con", proxy):
with pytest.raises(RuntimeError, match="Fake"):
db.executemany_void(
"insert into name (first, last) values (?, ?)",
[("vincent", "van gogh")],
)
def test_open_fails_on_bad_db(self) -> None:
self.make_file("bad.db", "boogers")
def fake_failing_open(filename: str, mode: str) -> NoReturn:
assert (filename, mode) == ("bad.db", "rb")
raise RuntimeError("No you can't!")
with mock.patch.object(coverage.sqlitedb, "open", fake_failing_open):
msg = "Couldn't use data file 'bad.db': file is not a database"
with pytest.raises(DataError, match=msg):
with SqliteDb("bad.db", DebugControlString(options=["sql"])):
pass # pragma: not covered
def test_execute_void_can_allow_failure(self) -> None:
with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
db.execute_void("select x from nosuchtable", fail_ok=True)
def test_execute_void_can_refuse_failure(self) -> None:
with SqliteDb("fail.db", DebugControlString(options=["sql"])) as db:
db.executescript(DB_INIT)
proxy = FailingProxy(db.con, "execute", [Exception("WUT")])
with mock.patch.object(db, "con", proxy):
msg = "Couldn't use data file 'fail.db': no such table: nosuchtable"
with pytest.raises(DataError, match=msg):
db.execute_void("select x from nosuchtable", fail_ok=False)
| SqliteDbTest |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py | {
"start": 932,
"end": 1368
} | class ____(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return self.callee(x)
# CHECK: While importing SavedModel function 'callee': in input signature:
# CHECK-SAME: Unhandled structured value kind {{.*}} at index path: <value>.1.foo
@tf.function
def callee(self, x, n={'foo': 42}):
return x
if __name__ == '__main__':
common.do_test(TestModule)
| TestModule |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/dms.py | {
"start": 14052,
"end": 17076
} | class ____(AwsBaseOperator[DmsHook]):
"""
Creates an AWS DMS Serverless replication configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsCreateReplicationConfigOperator`
:param replication_config_id: Unique identifier used to create a ReplicationConfigArn.
:param source_endpoint_arn: ARN of the source endpoint
:param target_endpoint_arn: ARN of the target endpoint
:param compute_config: Parameters for provisioning an DMS Serverless replication.
:param replication_type: type of DMS Serverless replication
:param table_mappings: JSON table mappings
:param tags: Key-value tag pairs
:param additional_config_kwargs: Additional configuration parameters for DMS Serverless replication. Passed directly to the API
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
"""
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields(
"replication_config_id",
"source_endpoint_arn",
"target_endpoint_arn",
"compute_config",
"replication_type",
"table_mappings",
)
template_fields_renderers = {"compute_config": "json", "tableMappings": "json"}
def __init__(
self,
*,
replication_config_id: str,
source_endpoint_arn: str,
target_endpoint_arn: str,
compute_config: dict[str, Any],
replication_type: str,
table_mappings: str,
additional_config_kwargs: dict | None = None,
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(
aws_conn_id=aws_conn_id,
**kwargs,
)
self.replication_config_id = replication_config_id
self.source_endpoint_arn = source_endpoint_arn
self.target_endpoint_arn = target_endpoint_arn
self.compute_config = compute_config
self.replication_type = replication_type
self.table_mappings = table_mappings
self.additional_config_kwargs = additional_config_kwargs or {}
def execute(self, context: Context) -> str:
resp = self.hook.create_replication_config(
replication_config_id=self.replication_config_id,
source_endpoint_arn=self.source_endpoint_arn,
target_endpoint_arn=self.target_endpoint_arn,
compute_config=self.compute_config,
replication_type=self.replication_type,
table_mappings=self.table_mappings,
additional_config_kwargs=self.additional_config_kwargs,
)
self.log.info("DMS replication config(%s) has been created.", self.replication_config_id)
return resp
| DmsCreateReplicationConfigOperator |
python | getsentry__sentry | tests/sentry/deletions/test_sentry_app.py | {
"start": 752,
"end": 3632
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.org = self.create_organization()
self.sentry_app = self.create_sentry_app(
name="blah", organization=self.org, scopes=("project:read",)
)
def test_deletes_app_installations(self) -> None:
install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
deletions.exec_sync(self.sentry_app)
assert not SentryAppInstallation.objects.filter(pk=install.id).exists()
def test_deletes_api_application(self) -> None:
application = self.sentry_app.application
deletions.exec_sync(self.sentry_app)
assert not ApiApplication.objects.filter(pk=application.id).exists()
def test_deletes_proxy_user(self) -> None:
proxy_user = self.sentry_app.proxy_user
deletions.exec_sync(self.sentry_app)
assert not User.objects.filter(pk=proxy_user.id).exists()
def test_soft_deletes_sentry_app(self) -> None:
deletions.exec_sync(self.sentry_app)
with pytest.raises(SentryApp.DoesNotExist):
SentryApp.objects.get(pk=self.sentry_app.id)
# The QuerySet will automatically NOT include deleted installs, so we
# use a raw sql query to ensure it still exists.
c = connections[router.db_for_write(SentryApp)].cursor()
c.execute(
"SELECT count(1) "
"FROM sentry_sentryapp "
"WHERE id = %s AND date_deleted IS NOT NULL",
[self.sentry_app.id],
)
assert c.fetchone()[0] == 1
def test_disables_actions(self) -> None:
action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": str(self.sentry_app.id),
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
"target_type": ActionTarget.SENTRY_APP,
},
)
webhook_action = self.create_action(
type=Action.Type.WEBHOOK,
config={
"target_identifier": self.sentry_app.slug,
},
)
other_action = self.create_action(
type=Action.Type.SENTRY_APP,
config={
"target_identifier": "1212121212",
"sentry_app_identifier": SentryAppIdentifier.SENTRY_APP_ID,
"target_type": ActionTarget.SENTRY_APP,
},
)
deletions.exec_sync(self.sentry_app)
action.refresh_from_db()
assert action.status == ObjectStatus.DISABLED
webhook_action.refresh_from_db()
assert webhook_action.status == ObjectStatus.DISABLED
other_action.refresh_from_db()
assert other_action.status == ObjectStatus.ACTIVE
| TestSentryAppDeletionTask |
python | getsentry__sentry | src/sentry/sentry_apps/metrics.py | {
"start": 1798,
"end": 2107
} | class ____(StrEnum):
"""Reasons why sentry app webhooks can halt"""
GOT_CLIENT_ERROR = "got_client_error"
INTEGRATOR_ERROR = "integrator_error"
MISSING_INSTALLATION = "missing_installation"
RESTRICTED_IP = "restricted_ip"
CONNECTION_RESET = "connection_reset"
| SentryAppWebhookHaltReason |
python | scikit-learn__scikit-learn | sklearn/cluster/_birch.py | {
"start": 13018,
"end": 26062
} | class ____(
ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator
):
"""Implements the BIRCH clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
.. versionadded:: 0.16
Parameters
----------
threshold : float, default=0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default=50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model or None, default=3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model
is fit treating the subclusters as new samples and the initial data
is mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default=True
Whether or not to compute labels for each fit.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray of shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
MiniBatchKMeans : Alternative implementation that does incremental updates
of the centers' positions using mini-batches.
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
See :ref:`sphx_glr_auto_examples_cluster_plot_birch_vs_minibatchkmeans.py` for a
comparison with :class:`~sklearn.cluster.MiniBatchKMeans`.
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(n_clusters=None)
>>> brc.fit(X)
Birch(n_clusters=None)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
For a comparison of the BIRCH clustering algorithm with other clustering algorithms,
see :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0.0, None, closed="neither")],
"branching_factor": [Interval(Integral, 1, None, closed="neither")],
"n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")],
"compute_labels": ["boolean"],
}
def __init__(
self,
*,
threshold=0.5,
branching_factor=50,
n_clusters=3,
compute_labels=True,
):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
return self._fit(X, partial=False)
def _fit(self, X, partial):
has_root = getattr(self, "root_", None)
first_call = not (partial and has_root)
X = validate_data(
self,
X,
accept_sparse="csr",
reset=first_call,
dtype=[np.float64, np.float32],
)
threshold = self.threshold
branching_factor = self.branching_factor
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
if first_call:
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features,
dtype=X.dtype,
)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features,
dtype=X.dtype,
)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor
)
del self.root_
self.root_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=False,
n_features=n_features,
dtype=X.dtype,
)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._n_features_out = self.subcluster_centers_.shape[0]
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
return self._fit(X, partial=True)
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
return self._predict(X)
def _predict(self, X):
"""Predict data using the ``centroids_`` of subclusters."""
kwargs = {"Y_norm_squared": self._subcluster_norms}
with config_context(assume_finite=True):
argmin = pairwise_distances_argmin(
X, self.subcluster_centers_, metric_kwargs=kwargs
)
return self.subcluster_labels_[argmin]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
with config_context(assume_finite=True):
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integral):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters),
ConvergenceWarning,
)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
if compute_labels:
self.labels_ = self._predict(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
tags.input_tags.sparse = True
return tags
| Birch |
python | getsentry__sentry | tests/sentry/api/test_base.py | {
"start": 21571,
"end": 21987
} | class ____(APITestCase):
def test_resolve_region(self) -> None:
def request_with_subdomain(subdomain):
request = self.make_request(method="GET")
request.subdomain = subdomain
return subdomain_is_region(request)
assert request_with_subdomain("us")
assert request_with_subdomain("eu")
assert not request_with_subdomain("sentry")
| CustomerDomainTest |
python | pytorch__pytorch | test/test_overrides.py | {
"start": 52844,
"end": 68260
} | class ____(TestCase):
def test_basic(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -1
# NB: factory functions get overridden too!
x = torch.randn(1)
with A():
self.assertEqual(torch.randn(3), -1)
self.assertEqual(torch.add(x, x), -1)
self.assertEqual(torch.split(None, [2]), -1) # python side
self.assertEqual(bar(x), -1)
def test_factory_override(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -1
with A():
self.assertEqual(torch.tensor([1]), -1)
self.assertEqual(torch.sparse_coo_tensor(1, 1, 1), -1)
self.assertEqual(torch.sparse_csr_tensor(1, 1, 1), -1)
self.assertEqual(torch.sparse_coo_tensor(1, 1, (1, 1), check_invariants=False), -1)
self.assertEqual(torch.sparse_csr_tensor(1, 1, 1, (1, 1), check_invariants=False), -1)
self.assertEqual(torch.as_tensor([1]), -1)
def test_modes_handle_first(self):
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return -40
x = SubTensor()
with A():
self.assertEqual(torch.neg(x), -40)
self.assertEqual(torch.mean(x), -40)
self.assertEqual(torch.mm(x, x), -40)
self.assertEqual(bar(x), -40)
def test_modes_return_notimplemented(self):
class MyMode(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
return NotImplemented
x = SubTensor()
with MyMode():
self.assertEqual(torch.mean(x), 0)
self.assertEqual(torch.mm(x, x), -1)
self.assertEqual(bar(x), 1)
self.assertRaisesRegex(
TypeError, r'SubTensor',
lambda: self.assertEqual(torch.max(x, x)))
def test_with_mode(self):
class ErrorA(RuntimeError):
pass
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
raise ErrorA
with self.assertRaises(ErrorA):
with A():
torch.empty([])
def test_with_mode_created_separately(self):
class ErrorA(RuntimeError):
pass
class A(TorchFunctionMode):
def __torch_function__(self, *args, **kwargs):
raise ErrorA
x = A()
with self.assertRaises(ErrorA):
with x:
torch.empty([])
def test_with_nested_modes(self):
out = []
class A(TorchFunctionMode):
def __init__(self, msg):
self.msg = msg
def __torch_function__(self, func, _, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
out.append(self.msg)
return func(*args, **kwargs)
with A("layer1"):
with A("layer2"):
torch.empty([])
self.assertEqual(out, ["layer2", "layer1"])
def test_nested_same_mode(self):
out = []
class A(TorchFunctionMode):
def __init__(self, msg):
self.msg = msg
def __torch_function__(self, func, _, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
out.append(self.msg)
return func(*args, **kwargs)
with A("layer1") as a:
with a:
torch.empty([])
self.assertEqual(out, ["layer1", "layer1"])
def test_error_using_class_method_on_mode(self):
class A(TorchFunctionMode):
@classmethod
def __torch_function__(cls, func, _, args=(), kwargs=None):
return func(args, kwargs)
x = torch.tensor(5.)
with self.assertRaisesRegex(RuntimeError, "classmethod is not supported, please make it a plain method"):
with A():
x + x
def test_restacking_with_ancestor(self):
class A(TorchFunctionMode):
pass
with A():
with A() as x:
pass
with x:
pass
def test_get_cur_mode(self):
class A(TorchFunctionMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
pass
with A() as mode1:
self.assertEqual(_get_current_function_mode(), mode1)
with mode1:
with A() as mode2:
self.assertEqual(_get_current_function_mode(), mode2)
def test_get_mode_stack(self):
class A(TorchFunctionMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
pass
self.assertEqual(_get_current_function_mode_stack(), [])
with A() as mode1:
self.assertEqual(_get_current_function_mode_stack(), [mode1])
with mode1:
with A() as mode2:
self.assertEqual(_get_current_function_mode_stack(), [mode1, mode2])
def test_all_same_mode(self):
class A(TorchFunctionMode):
pass
x = A()
y = A()
self.assertTrue(all_same_mode([x, x, x]))
self.assertFalse(all_same_mode([x, None]))
self.assertFalse(all_same_mode([x, y]))
def test_nested_modes_with_python_has_torch_function(self):
called = []
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
called.append("A")
kwargs = {} if kwargs is None else kwargs
return func(*args, **kwargs)
class B(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
called.append("B")
kwargs = {} if kwargs is None else kwargs
return func(*args, **kwargs)
x = torch.randn(3, 4)
with A():
with B():
y = bar(x)
self.assertEqual(y, x)
self.assertEqual(called, ["B", "A"])
def test_reentrant_mode_idiom(self):
log = []
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
log.append(func)
if func is torch.sub:
with self:
input, other = args
assert not kwargs
return torch.add(input, other, alpha=-1)
return func(*args, **kwargs)
x = torch.randn(1)
y = torch.randn(1)
with A():
torch.sub(x, y)
# add hits the torch function again!
self.assertEqual(log, [torch.sub, torch.add])
def test_nn_parse_to(self):
# This failed because the parser thinks the function is called to()
# but it's actually called _parse_to()
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
with A():
torch._C._nn._parse_to('cpu')
self.assertTrue(called)
def test_getitem_call(self):
# This failed because the parser thinks the function is called to()
# but it's actually called _parse_to()
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
a = torch.zeros(5)
b = torch.tensor(0)
with A():
a[b]
self.assertTrue(called)
def test_distributions_bernoulli(self):
# This failed because improper use of has_torch_function when
# is_tensor_like should have been used instead, inside the
# broadcasting logic called by distributions (Bernoulli doesn't
# matter per se)
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
with A():
torch.distributions.Bernoulli(0.3)
self.assertTrue(called)
def test_mode_notimplemented_loop(self):
# Default tensor subclass implementation disables torch function;
# when we redispatch to mode we must not treat the objects as
# eligible
called = 0
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called += 1
# The first time we call, the mode sees an active type that
# it doesn't know how to deal with. The second time, we're
# instructed to treat it "as if it were a tensor", and so
# we keep going. I'm not entirely clear if the subclasses
# disappearing from types is the correct way to do it.
if any(t is not torch.Tensor for t in types):
return NotImplemented
else:
return func(*args, **kwargs)
class B(torch.Tensor):
pass
b = B()
with A():
r = torch.neg(b)
self.assertIs(type(r), B)
self.assertEqual(called, 2)
called = 0
with A():
r = bar(b)
self.assertIs(type(r), B)
self.assertEqual(called, 2)
def test_disable_subclass_not_mode(self):
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
class B(torch.Tensor):
pass
x = B(torch.randn(5))
with A():
with torch._C.DisableTorchFunctionSubclass():
self.assertNotIsInstance(torch.sum(x), B)
self.assertTrue(called)
def test_disable_subclass_mode(self):
called = False
class A(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
nonlocal called
if kwargs is None:
kwargs = {}
called = True
return func(*args, **kwargs)
class B(torch.Tensor):
pass
x = B(torch.randn(5))
with A():
with torch._C.DisableTorchFunction():
self.assertNotIsInstance(torch.sum(x), B)
self.assertFalse(called)
def test_disable_enable_subclass(self):
class A(torch.Tensor):
pass
x = A(torch.randn(5))
with torch._C.DisableTorchFunctionSubclass():
g = torch._C._EnableTorchFunction()
try:
self.assertIsInstance(torch.sum(x), A)
finally:
del g
def test_disable_enable_torch_function_ctx(self):
class A(torch.Tensor):
pass
x = A(torch.randn(5))
with torch._C.DisableTorchFunction():
with torch.overrides._enable_torch_function():
self.assertIsInstance(torch.sum(x), A)
def test_torch_function_all_disabled_api(self):
from torch._C import _is_torch_function_all_disabled
state = _is_torch_function_all_disabled()
self.assertFalse(state)
with torch._C.DisableTorchFunction():
state = _is_torch_function_all_disabled()
self.assertTrue(state)
state = _is_torch_function_all_disabled()
self.assertFalse(state)
with torch._C.DisableTorchFunctionSubclass():
state = _is_torch_function_all_disabled()
self.assertFalse(state)
def test_subclass_hash(self):
class DiagTensor(torch.Tensor):
def __init__(self, diag):
self._diag = diag
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
def get_full_matrices(t):
if isinstance(t, DiagTensor):
return torch.diag_embed(t._diag)
else:
return t
return func(*tree_map(get_full_matrices, args), **tree_map(get_full_matrices, kwargs))
d = torch.rand(2)
a = DiagTensor(d)
self.assertEqual((a + 1), torch.diag_embed(d) + 1)
# If the hash function was returning the same value, this would
# fail inside `Tensor.__eq__`.
# If __hash__ was going through torch_function, the implementation above would
# be wrong as it would compute the hash on a temporary Tensor thus not ensuring
# the uniqueness of the hash that we rely on for Tensors.
s = set()
s.add(a)
s.add(DiagTensor(d))
def test_custom_device_type(self):
class CustomDeviceContext(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
if func == torch.device:
if args and isinstance(args[0], int):
args = ("xla", args[0])
elif isinstance(kwargs.get('device'), int):
kwargs['device'] = f"xla:{kwargs.get('device')}"
return func(*args, **kwargs)
with CustomDeviceContext():
d_args = torch.device(0)
self.assertEqual(d_args.type, "xla")
self.assertEqual(d_args.index, 0)
d_kwargs = torch.device(device=0)
self.assertEqual(d_kwargs.type, "xla")
self.assertEqual(d_kwargs.index, 0)
def test_device_context_semantics(self):
from torch._C import _len_torch_function_stack
from torch.utils._device import DeviceContext
try:
torch.set_default_device("cuda")
def get_stack():
return [torch._C._get_function_stack_at(i) for i in range(_len_torch_function_stack())]
base_mode = BaseTorchFunctionMode()
with base_mode:
torch.set_default_device("cpu")
stack = get_stack()
self.assertIsInstance(stack[0], DeviceContext)
self.assertEqual(stack[0].device, torch.device("cpu"))
stack = get_stack()
self.assertIsInstance(stack[0], DeviceContext)
self.assertEqual(stack[0].device, torch.device("cpu"))
finally:
torch.set_default_device(None)
if __name__ == '__main__':
run_tests()
| TestTorchFunctionMode |
python | celery__celery | t/unit/utils/test_collections.py | {
"start": 4688,
"end": 9587
} | class ____:
def test_add(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.add('bar')
for n in 'foo', 'bar':
assert n in s
s.add('baz')
for n in 'bar', 'baz':
assert n in s
assert 'foo' not in s
s = LimitedSet(maxlen=10)
for i in range(150):
s.add(i)
assert len(s) <= 10
# make sure heap is not leaking:
assert len(s._heap) < len(s) * (
100. + s.max_heap_percent_overload) / 100
def test_purge(self):
# purge now enforces rules
# can't purge(1) now. but .purge(now=...) still works
s = LimitedSet(maxlen=10)
[s.add(i) for i in range(10)]
s.maxlen = 2
s.purge()
assert len(s) == 2
# expired
s = LimitedSet(maxlen=10, expires=1)
[s.add(i) for i in range(10)]
s.maxlen = 2
s.purge(now=monotonic() + 100)
assert len(s) == 0
# not expired
s = LimitedSet(maxlen=None, expires=1)
[s.add(i) for i in range(10)]
s.maxlen = 2
s.purge(now=lambda: monotonic() - 100)
assert len(s) == 2
# expired -> minsize
s = LimitedSet(maxlen=10, minlen=10, expires=1)
[s.add(i) for i in range(20)]
s.minlen = 3
s.purge(now=monotonic() + 3)
assert s.minlen == len(s)
assert len(s._heap) <= s.maxlen * (
100. + s.max_heap_percent_overload) / 100
def test_pickleable(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.add('bar')
assert pickle.loads(pickle.dumps(s)) == s
def test_iter(self):
s = LimitedSet(maxlen=3)
items = ['foo', 'bar', 'baz', 'xaz']
for item in items:
s.add(item)
l = list(iter(s))
for item in items[1:]:
assert item in l
assert 'foo' not in l
assert l == items[1:], 'order by insertion time'
def test_repr(self):
s = LimitedSet(maxlen=2)
items = 'foo', 'bar'
for item in items:
s.add(item)
assert 'LimitedSet(' in repr(s)
def test_discard(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.discard('foo')
assert 'foo' not in s
assert len(s._data) == 0
s.discard('foo')
def test_clear(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.add('bar')
assert len(s) == 2
s.clear()
assert not s
def test_update(self):
s1 = LimitedSet(maxlen=2)
s1.add('foo')
s1.add('bar')
s2 = LimitedSet(maxlen=2)
s2.update(s1)
assert sorted(list(s2)) == ['bar', 'foo']
s2.update(['bla'])
assert sorted(list(s2)) == ['bar', 'bla']
s2.update(['do', 're'])
assert sorted(list(s2)) == ['do', 're']
s1 = LimitedSet(maxlen=10, expires=None)
s2 = LimitedSet(maxlen=10, expires=None)
s3 = LimitedSet(maxlen=10, expires=None)
s4 = LimitedSet(maxlen=10, expires=None)
s5 = LimitedSet(maxlen=10, expires=None)
for i in range(12):
s1.add(i)
s2.add(i * i)
s3.update(s1)
s3.update(s2)
s4.update(s1.as_dict())
s4.update(s2.as_dict())
s5.update(s1._data) # revoke is using this
s5.update(s2._data)
assert s3 == s4
assert s3 == s5
s2.update(s4)
s4.update(s2)
assert s2 == s4
def test_iterable_and_ordering(self):
s = LimitedSet(maxlen=35, expires=None)
clock = count(1)
for i in reversed(range(15)):
s.add(i, now=next(clock))
j = 40
for i in s:
assert i < j # each item is smaller and smaller
j = i
assert i == 0 # last item is zero
def test_pop_and_ordering_again(self):
s = LimitedSet(maxlen=5)
for i in range(10):
s.add(i)
j = -1
for _ in range(5):
i = s.pop()
assert j < i
i = s.pop()
assert i is None
def test_as_dict(self):
s = LimitedSet(maxlen=2)
s.add('foo')
assert isinstance(s.as_dict(), Mapping)
def test_add_removes_duplicate_from_small_heap(self):
s = LimitedSet(maxlen=2)
s.add('foo')
s.add('foo')
s.add('foo')
assert len(s) == 1
assert len(s._data) == 1
assert len(s._heap) == 1
def test_add_removes_duplicate_from_big_heap(self):
s = LimitedSet(maxlen=1000)
[s.add(i) for i in range(2000)]
assert len(s) == 1000
[s.add('foo') for i in range(1000)]
# heap is refreshed when 15% larger than _data
assert len(s._heap) < 1150
[s.add('foo') for i in range(1000)]
assert len(s._heap) < 1150
| test_LimitedSet |
python | huggingface__transformers | src/transformers/models/gpt_oss/modeling_gpt_oss.py | {
"start": 21189,
"end": 28162
} | class ____(GptOssPreTrainedModel):
_no_split_modules = ["GptOssDecoderLayer"]
def __init__(self, config: GptOssConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[GptOssDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = GptOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = GptOssRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
| GptOssModel |
python | Textualize__textual | docs/examples/guide/widgets/hello03.py | {
"start": 363,
"end": 701
} | class ____(Static):
"""Display a greeting."""
def on_mount(self) -> None:
self.next_word()
def on_click(self) -> None:
self.next_word()
def next_word(self) -> None:
"""Get a new hello and update the content area."""
hello = next(hellos)
self.update(f"{hello}, [b]World[/b]!")
| Hello |
python | pyparsing__pyparsing | tests/test_simple_unit.py | {
"start": 5120,
"end": 5609
} | class ____(PyparsingExpressionTestCase):
tests = [
PyparsingTest(
desc="Match colors, converting to consistent case",
expr=(
pp.CaselessLiteral("RED")
| pp.CaselessLiteral("GREEN")
| pp.CaselessLiteral("BLUE")
)[...],
text="red Green BluE blue GREEN green rEd",
expected_list=["RED", "GREEN", "BLUE", "BLUE", "GREEN", "GREEN", "RED"],
),
]
| TestCaselessLiteral |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 3414,
"end": 3572
} | class ____(enum.Enum):
@classmethod
def _missing_(cls, value):
"""inherited"""
return super()._missing_(value)
| _SunderMissingInEnumMixin |
python | keon__algorithms | tests/test_greedy.py | {
"start": 88,
"end": 637
} | class ____(unittest.TestCase):
def test_max_contiguous_subsequence_sum(self):
arr1 = [-2, 3, 8, -1, 4]
arr2 = [-1, 1, 0]
arr3 = [-1, -3, -4]
arr4 = [-2, 3, 8, -12, 8, 4]
self.assertEqual(max_contiguous_subsequence_sum(arr1), 14)
self.assertEqual(max_contiguous_subsequence_sum(arr2), 1)
self.assertEqual(max_contiguous_subsequence_sum(arr3), -1)
self.assertEqual(max_contiguous_subsequence_sum(arr4), 12)
if __name__ == '__main__':
unittest.main() | TestMaxContiguousSubsequenceSum |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_threading.py | {
"start": 4464,
"end": 7752
} | class ____(_ThreadTest, fixtures.MappedTest):
run_dispose_bind = "once"
__requires__ = ("multithreading_support",)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("thread_id", String(50)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
def test_sessionmaker_thread_safe(self, num_threads_engine):
"""Test that sessionmaker factory is thread-safe."""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
# Single sessionmaker shared across threads
SessionFactory = sessionmaker(num_threads_engine)
def worker(results, thread_name):
thread_id = thread_name
for _ in range(ITERATIONS):
with SessionFactory() as session:
for i in range(3):
user = User(
name=f"user_{thread_id}_{i}", thread_id=thread_id
)
session.add(user)
session.commit()
count = (
session.query(User)
.filter_by(thread_id=thread_id)
.count()
)
results.append(count)
results, errors = self.run_threaded(worker)
eq_(errors, [])
eq_(
results,
[
tuple(range(3, 3 * ITERATIONS + 3, 3))
for _ in range(NUM_THREADS)
],
)
def test_scoped_session_thread_local(self, num_threads_engine):
"""Test that scoped_session provides thread-local sessions."""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
# Create scoped session
Session = scoped_session(sessionmaker(num_threads_engine))
session_ids = {}
def worker(results, thread_name):
thread_id = thread_name
session = Session()
session_ids[thread_id] = id(session)
session.close()
for _ in range(ITERATIONS):
user = User(
name=f"scoped_user_{thread_id}", thread_id=thread_id
)
Session.add(user)
Session.commit()
session2 = Session()
assert id(session2) == session_ids[thread_id]
session2.close()
count = (
Session.query(User).filter_by(thread_id=thread_id).count()
)
results.append(count)
Session.remove()
results, errors = self.run_threaded(worker)
eq_(errors, [])
unique_sessions = set(session_ids.values())
eq_(len(unique_sessions), NUM_THREADS)
eq_(
results,
[tuple(range(1, ITERATIONS + 1)) for _ in range(NUM_THREADS)],
)
@testing.add_to_marker.timing_intensive
| SessionThreadingTest |
python | django-extensions__django-extensions | django_extensions/collision_resolvers.py | {
"start": 6575,
"end": 6868
} | class ____(FullPathCR, InstalledAppsOrderCR):
"""
Collision resolver which is mixin of FullPathCR and InstalledAppsOrderCR.
In case of collisions he sets aliases like FullPathCR, but sets default model using InstalledAppsOrderCR.
""" # noqa: E501
pass
| FullPathCustomOrderCR |
python | spyder-ide__spyder | spyder/plugins/editor/extensions/docstring.py | {
"start": 3881,
"end": 29586
} | class ____(object):
"""Class for insert docstring template automatically."""
def __init__(self, code_editor):
"""Initialize and Add code_editor to the variable."""
self.code_editor = code_editor
self.quote3 = '"""'
self.quote3_other = "'''"
self.line_number_cursor = None
@staticmethod
def is_beginning_triple_quotes(text):
"""Return True if there are only triple quotes in text."""
docstring_triggers = ['"""', 'r"""', "'''", "r'''"]
if text.lstrip() in docstring_triggers:
return True
return False
def is_end_of_function_definition(self, text, line_number):
"""Return True if text is the end of the function definition."""
text_without_whitespace = "".join(text.split())
if (
text_without_whitespace.endswith("):") or
text_without_whitespace.endswith("]:") or
(text_without_whitespace.endswith(":") and
"->" in text_without_whitespace)
):
return True
elif text_without_whitespace.endswith(":") and line_number > 1:
complete_text = text_without_whitespace
document = self.code_editor.document()
cursor = QTextCursor(
document.findBlockByNumber(line_number - 2)) # previous line
for i in range(line_number - 2, -1, -1):
txt = "".join(str(cursor.block().text()).split())
if txt.endswith("\\") or is_in_scope_backward(complete_text):
if txt.endswith("\\"):
txt = txt[:-1]
complete_text = txt + complete_text
else:
break
if i != 0:
cursor.movePosition(QTextCursor.PreviousBlock)
if is_start_of_function(complete_text):
return (
complete_text.endswith("):") or
complete_text.endswith("]:") or
(complete_text.endswith(":") and
"->" in complete_text)
)
else:
return False
else:
return False
def get_function_definition_from_first_line(self):
"""Get func def when the cursor is located on the first def line."""
document = self.code_editor.document()
cursor = QTextCursor(
document.findBlockByNumber(self.line_number_cursor - 1))
func_text = ''
func_indent = ''
is_first_line = True
line_number = cursor.blockNumber() + 1
number_of_lines = self.code_editor.blockCount()
remain_lines = number_of_lines - line_number + 1
number_of_lines_of_function = 0
for __ in range(min(remain_lines, 20)):
cur_text = str(cursor.block().text()).rstrip()
cur_text = remove_comments(cur_text)
if is_first_line:
if not is_start_of_function(cur_text):
return None
func_indent = get_indent(cur_text)
is_first_line = False
else:
cur_indent = get_indent(cur_text)
if cur_indent <= func_indent and cur_text.strip() != '':
return None
if is_start_of_function(cur_text):
return None
if (cur_text.strip() == '' and
not is_in_scope_forward(func_text)):
return None
if len(cur_text) > 0 and cur_text[-1] == '\\':
cur_text = cur_text[:-1]
func_text += cur_text
number_of_lines_of_function += 1
if self.is_end_of_function_definition(
cur_text, line_number + number_of_lines_of_function - 1):
return func_text, number_of_lines_of_function
cursor.movePosition(QTextCursor.NextBlock)
return None
def get_function_definition_from_below_last_line(self):
"""Get func def when the cursor is located below the last def line."""
cursor = self.code_editor.textCursor()
func_text = ''
is_first_line = True
line_number = cursor.blockNumber() + 1
number_of_lines_of_function = 0
for __ in range(min(line_number, 20)):
if cursor.block().blockNumber() == 0:
return None
cursor.movePosition(QTextCursor.PreviousBlock)
prev_text = str(cursor.block().text()).rstrip()
prev_text = remove_comments(prev_text)
if is_first_line:
if not self.is_end_of_function_definition(
prev_text, line_number - 1):
return None
is_first_line = False
elif self.is_end_of_function_definition(
prev_text, line_number - number_of_lines_of_function - 1):
return None
if len(prev_text) > 0 and prev_text[-1] == '\\':
prev_text = prev_text[:-1]
func_text = prev_text + func_text
number_of_lines_of_function += 1
if is_start_of_function(prev_text):
return func_text, number_of_lines_of_function
return None
def get_function_body(self, func_indent):
"""Get the function body text."""
cursor = self.code_editor.textCursor()
line_number = cursor.blockNumber() + 1
number_of_lines = self.code_editor.blockCount()
body_list = []
for __ in range(number_of_lines - line_number + 1):
text = str(cursor.block().text())
text_indent = get_indent(text)
if text.strip() == '':
pass
elif len(text_indent) <= len(func_indent):
break
body_list.append(text)
cursor.movePosition(QTextCursor.NextBlock)
return '\n'.join(body_list)
def write_docstring(self):
"""Write docstring to editor."""
line_to_cursor = self.code_editor.get_text('sol', 'cursor')
if self.is_beginning_triple_quotes(line_to_cursor):
cursor = self.code_editor.textCursor()
prev_pos = cursor.position()
quote = line_to_cursor[-1]
docstring_type = CONF.get('editor', 'docstring_type')
docstring = self._generate_docstring(docstring_type, quote)
if docstring:
self.code_editor.insert_text(docstring)
cursor = self.code_editor.textCursor()
cursor.setPosition(prev_pos, QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.NextBlock)
cursor.movePosition(QTextCursor.EndOfLine,
QTextCursor.KeepAnchor)
cursor.clearSelection()
self.code_editor.setTextCursor(cursor)
return True
return False
def write_docstring_at_first_line_of_function(self):
"""Write docstring to editor at mouse position."""
result = self.get_function_definition_from_first_line()
editor = self.code_editor
if result:
func_text, number_of_line_func = result
line_number_function = (self.line_number_cursor +
number_of_line_func - 1)
cursor = editor.textCursor()
line_number_cursor = cursor.blockNumber() + 1
offset = line_number_function - line_number_cursor
if offset > 0:
for __ in range(offset):
cursor.movePosition(QTextCursor.NextBlock)
else:
for __ in range(abs(offset)):
cursor.movePosition(QTextCursor.PreviousBlock)
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.MoveAnchor)
editor.setTextCursor(cursor)
indent = get_indent(func_text)
editor.insert_text('\n{}{}"""'.format(indent, editor.indent_chars))
self.write_docstring()
def write_docstring_for_shortcut(self):
"""Write docstring to editor by shortcut of code editor."""
# cursor placed below function definition
result = self.get_function_definition_from_below_last_line()
if result is not None:
__, number_of_lines_of_function = result
cursor = self.code_editor.textCursor()
for __ in range(number_of_lines_of_function):
cursor.movePosition(QTextCursor.PreviousBlock)
self.code_editor.setTextCursor(cursor)
cursor = self.code_editor.textCursor()
self.line_number_cursor = cursor.blockNumber() + 1
self.write_docstring_at_first_line_of_function()
def _generate_docstring(self, doc_type, quote):
"""Generate docstring."""
docstring = None
self.quote3 = quote * 3
if quote == '"':
self.quote3_other = "'''"
else:
self.quote3_other = '"""'
result = self.get_function_definition_from_below_last_line()
if result:
func_def, __ = result
func_info = FunctionInfo()
func_info.parse_def(func_def)
if func_info.has_info:
func_body = self.get_function_body(func_info.func_indent)
if func_body:
func_info.parse_body(func_body)
if doc_type == 'Numpydoc':
docstring = self._generate_numpy_doc(func_info)
elif doc_type == 'Googledoc':
docstring = self._generate_google_doc(func_info)
elif doc_type == "Sphinxdoc":
docstring = self._generate_sphinx_doc(func_info)
return docstring
def _generate_numpy_doc(self, func_info):
"""Generate a docstring of numpy type."""
numpy_doc = ''
arg_names = func_info.arg_name_list
arg_types = func_info.arg_type_list
arg_values = func_info.arg_value_list
if len(arg_names) > 0 and arg_names[0] in ('self', 'cls'):
del arg_names[0]
del arg_types[0]
del arg_values[0]
indent1 = func_info.func_indent + self.code_editor.indent_chars
indent2 = func_info.func_indent + self.code_editor.indent_chars * 2
numpy_doc += '\n{}\n'.format(indent1)
if len(arg_names) > 0:
numpy_doc += '\n{}Parameters'.format(indent1)
numpy_doc += '\n{}----------\n'.format(indent1)
arg_text = ''
for arg_name, arg_type, arg_value in zip(arg_names, arg_types,
arg_values):
arg_text += '{}{} : '.format(indent1, arg_name)
if arg_type:
arg_text += '{}'.format(arg_type)
else:
arg_text += 'TYPE'
if arg_value:
arg_text += ', optional'
arg_text += '\n{}DESCRIPTION.'.format(indent2)
if arg_value:
arg_value = arg_value.replace(self.quote3, self.quote3_other)
arg_text += ' The default is {}.'.format(arg_value)
arg_text += '\n'
numpy_doc += arg_text
if func_info.raise_list:
numpy_doc += '\n{}Raises'.format(indent1)
numpy_doc += '\n{}------'.format(indent1)
for raise_type in func_info.raise_list:
numpy_doc += '\n{}{}'.format(indent1, raise_type)
numpy_doc += '\n{}DESCRIPTION.'.format(indent2)
numpy_doc += '\n'
numpy_doc += '\n'
if func_info.has_yield:
header = '{0}Yields\n{0}------\n'.format(indent1)
else:
header = '{0}Returns\n{0}-------\n'.format(indent1)
return_type_annotated = func_info.return_type_annotated
if return_type_annotated:
return_section = '{}{}{}'.format(header, indent1,
return_type_annotated)
return_section += '\n{}DESCRIPTION.'.format(indent2)
else:
return_element_type = indent1 + '{return_type}\n' + indent2 + \
'DESCRIPTION.'
placeholder = return_element_type.format(return_type='TYPE')
return_element_name = indent1 + '{return_name} : ' + \
placeholder.lstrip()
try:
return_section = self._generate_docstring_return_section(
func_info.return_value_in_body, header,
return_element_name, return_element_type, placeholder,
indent1)
except (ValueError, IndexError):
return_section = '{}{}None.'.format(header, indent1)
numpy_doc += return_section
numpy_doc += '\n\n{}{}'.format(indent1, self.quote3)
return numpy_doc
def _generate_google_doc(self, func_info):
"""Generate a docstring of google type."""
google_doc = ''
arg_names = func_info.arg_name_list
arg_types = func_info.arg_type_list
arg_values = func_info.arg_value_list
if len(arg_names) > 0 and arg_names[0] in ('self', 'cls'):
del arg_names[0]
del arg_types[0]
del arg_values[0]
indent1 = func_info.func_indent + self.code_editor.indent_chars
indent2 = func_info.func_indent + self.code_editor.indent_chars * 2
google_doc += '\n{}\n'.format(indent1)
if len(arg_names) > 0:
google_doc += '\n{0}Args:\n'.format(indent1)
arg_text = ''
for arg_name, arg_type, arg_value in zip(arg_names, arg_types,
arg_values):
arg_text += '{}{} '.format(indent2, arg_name)
arg_text += '('
if arg_type:
arg_text += '{}'.format(arg_type)
else:
arg_text += 'TYPE'
if arg_value:
arg_text += ', optional'
arg_text += '):'
arg_text += ' DESCRIPTION.'
if arg_value:
arg_value = arg_value.replace(self.quote3, self.quote3_other)
arg_text += ' Defaults to {}.\n'.format(arg_value)
else:
arg_text += '\n'
google_doc += arg_text
if func_info.raise_list:
google_doc += '\n{0}Raises:'.format(indent1)
for raise_type in func_info.raise_list:
google_doc += '\n{}{}'.format(indent2, raise_type)
google_doc += ': DESCRIPTION.'
google_doc += '\n'
google_doc += '\n'
if func_info.has_yield:
header = '{}Yields:\n'.format(indent1)
else:
header = '{}Returns:\n'.format(indent1)
return_type_annotated = func_info.return_type_annotated
if return_type_annotated:
return_section = '{}{}{}: DESCRIPTION.'.format(
header, indent2, return_type_annotated)
else:
return_element_type = indent2 + '{return_type}: DESCRIPTION.'
placeholder = return_element_type.format(return_type='TYPE')
return_element_name = indent2 + '{return_name} ' + \
'(TYPE): DESCRIPTION.'
try:
return_section = self._generate_docstring_return_section(
func_info.return_value_in_body, header,
return_element_name, return_element_type, placeholder,
indent2)
except (ValueError, IndexError):
return_section = '{}{}None.'.format(header, indent2)
google_doc += return_section
google_doc += '\n\n{}{}'.format(indent1, self.quote3)
return google_doc
def _generate_sphinx_doc(self, func_info):
"""Generate a docstring of sphinx type."""
sphinx_doc = ''
arg_names = func_info.arg_name_list
arg_types = func_info.arg_type_list
arg_values = func_info.arg_value_list
if len(arg_names) > 0 and arg_names[0] in ('self', 'cls'):
del arg_names[0]
del arg_types[0]
del arg_values[0]
indent1 = func_info.func_indent + self.code_editor.indent_chars
sphinx_doc += '\n{}\n'.format(indent1)
arg_text = ''
for arg_name, arg_type, arg_value in zip(arg_names, arg_types,
arg_values):
arg_text += '{}:param {}: DESCRIPTION'.format(indent1, arg_name)
if arg_value:
arg_value = arg_value.replace(self.quote3, self.quote3_other)
arg_text += ', defaults to {}\n'.format(arg_value)
else:
arg_text += '\n'
arg_text += '{}:type {}: '.format(indent1, arg_name)
if arg_type:
arg_text += '{}'.format(arg_type)
else:
arg_text += 'TYPE'
if arg_value:
arg_text += ', optional'
arg_text += '\n'
sphinx_doc += arg_text
if func_info.raise_list:
for raise_type in func_info.raise_list:
sphinx_doc += '{}:raises {}: DESCRIPTION\n'.format(indent1,
raise_type)
if func_info.has_yield:
header = '{}:yield:'.format(indent1)
else:
header = '{}:return:'.format(indent1)
return_type_annotated = func_info.return_type_annotated
if return_type_annotated:
return_section = '{} DESCRIPTION\n'.format(header)
return_section += '{}:rtype: {}'.format(indent1,
return_type_annotated)
else:
return_section = '{} DESCRIPTION\n'.format(header)
return_section += '{}:rtype: TYPE'.format(indent1)
sphinx_doc += return_section
sphinx_doc += '\n\n{}{}'.format(indent1, self.quote3)
return sphinx_doc
@staticmethod
def find_top_level_bracket_locations(string_toparse):
"""Get the locations of top-level brackets in a string."""
bracket_stack = []
replace_args_list = []
bracket_type = None
literal_type = ''
brackets = {'(': ')', '[': ']', '{': '}'}
for idx, character in enumerate(string_toparse):
if (not bracket_stack and character in brackets.keys()
or character == bracket_type):
bracket_stack.append(idx)
bracket_type = character
elif bracket_type and character == brackets[bracket_type]:
begin_idx = bracket_stack.pop()
if not bracket_stack:
if not literal_type:
if bracket_type == '(':
literal_type = '(None)'
elif bracket_type == '[':
literal_type = '[list]'
elif bracket_type == '{':
if idx - begin_idx <= 1:
literal_type = '{dict}'
else:
literal_type = '{set}'
replace_args_list.append(
(string_toparse[begin_idx:idx + 1],
literal_type, 1))
bracket_type = None
literal_type = ''
elif len(bracket_stack) == 1:
if bracket_type == '(' and character == ',':
literal_type = '(tuple)'
elif bracket_type == '{' and character == ':':
literal_type = '{dict}'
elif bracket_type == '(' and character == ':':
literal_type = '[slice]'
if bracket_stack:
raise IndexError('Bracket mismatch')
for replace_args in replace_args_list:
string_toparse = string_toparse.replace(*replace_args)
return string_toparse
@staticmethod
def parse_return_elements(return_vals_group, return_element_name,
return_element_type, placeholder):
"""Return the appropriate text for a group of return elements."""
all_eq = (return_vals_group.count(return_vals_group[0])
== len(return_vals_group))
if all([{'[list]', '(tuple)', '{dict}', '{set}'}.issuperset(
return_vals_group)]) and all_eq:
return return_element_type.format(
return_type=return_vals_group[0][1:-1])
# Output placeholder if special Python chars present in name
py_chars = {' ', '+', '-', '*', '/', '%', '@', '<', '>', '&', '|', '^',
'~', '=', ',', ':', ';', '#', '(', '[', '{', '}', ']',
')', }
if any([any([py_char in return_val for py_char in py_chars])
for return_val in return_vals_group]):
return placeholder
# Output str type and no name if only string literals
if all(['"' in return_val or '\'' in return_val
for return_val in return_vals_group]):
return return_element_type.format(return_type='str')
# Output bool type and no name if only bool literals
if {'True', 'False'}.issuperset(return_vals_group):
return return_element_type.format(return_type='bool')
# Output numeric types and no name if only numeric literals
try:
[float(return_val) for return_val in return_vals_group]
num_not_int = 0
for return_val in return_vals_group:
try:
int(return_val)
except ValueError: # If not an integer (EAFP)
num_not_int = num_not_int + 1
if num_not_int == 0:
return return_element_type.format(return_type='int')
elif num_not_int == len(return_vals_group):
return return_element_type.format(return_type='float')
else:
return return_element_type.format(return_type='numeric')
except ValueError: # Not a numeric if float conversion didn't work
pass
# If names are not equal, don't contain "." or are a builtin
if ({'self', 'cls', 'None'}.isdisjoint(return_vals_group) and all_eq
and all(['.' not in return_val
for return_val in return_vals_group])):
return return_element_name.format(return_name=return_vals_group[0])
return placeholder
def _generate_docstring_return_section(self, return_vals, header,
return_element_name,
return_element_type,
placeholder, indent):
"""Generate the Returns section of a function/method docstring."""
# If all return values are None, return none
non_none_vals = [return_val for return_val in return_vals
if return_val and return_val != 'None']
if not non_none_vals:
return header + indent + 'None.'
# Get only values with matching brackets that can be cleaned up
non_none_vals = [return_val.strip(' ()\t\n').rstrip(',')
for return_val in non_none_vals]
non_none_vals = [re.sub('([\"\'])(?:(?=(\\\\?))\\2.)*?\\1',
'"string"', return_val)
for return_val in non_none_vals]
unambiguous_vals = []
for return_val in non_none_vals:
try:
cleaned_val = self.find_top_level_bracket_locations(return_val)
except IndexError:
continue
unambiguous_vals.append(cleaned_val)
if not unambiguous_vals:
return header + placeholder
# If remaining are a mix of tuples and not, return single placeholder
single_vals, tuple_vals = [], []
for return_val in unambiguous_vals:
(tuple_vals.append(return_val) if ',' in return_val
else single_vals.append(return_val))
if single_vals and tuple_vals:
return header + placeholder
# If return values are tuples of different length, return a placeholder
if tuple_vals:
num_elements = [return_val.count(',') + 1
for return_val in tuple_vals]
if num_elements.count(num_elements[0]) != len(num_elements):
return header + placeholder
num_elements = num_elements[0]
else:
num_elements = 1
# If all have the same len but some ambiguous return that placeholders
if len(unambiguous_vals) != len(non_none_vals):
return header + '\n'.join(
[placeholder for __ in range(num_elements)])
# Handle tuple (or single) values position by position
return_vals_grouped = zip(*[
[return_element.strip() for return_element in
return_val.split(',')]
for return_val in unambiguous_vals])
return_elements_out = []
for return_vals_group in return_vals_grouped:
return_elements_out.append(
self.parse_return_elements(return_vals_group,
return_element_name,
return_element_type,
placeholder))
return header + '\n'.join(return_elements_out)
| DocstringWriterExtension |
python | pypa__twine | twine/exceptions.py | {
"start": 2821,
"end": 4366
} | class ____(TwineException):
"""An upload attempt was detected using features not supported by a repository.
The features specified either in configuration or on the command-line.
"""
class Builder:
"""Build the parameters for an UnsupportedConfiguration exception.
In the event we add additional features we are not allowing with
something other than PyPI or TestPyPI, we can use a builder to
accumulate them all instead of requiring someone to run multiple times
to discover all unsupported configuration options.
"""
repository_url: str
features: t.List[str]
def __init__(self) -> None:
self.repository_url = ""
self.features = []
def with_repository_url(
self, repository_url: str
) -> "UnsupportedConfiguration.Builder":
self.repository_url = repository_url
return self
def with_feature(self, feature: str) -> "UnsupportedConfiguration.Builder":
self.features.append(feature)
return self
def finalize(self) -> "UnsupportedConfiguration":
return UnsupportedConfiguration(
f"The configured repository {self.repository_url!r} does not "
"have support for the following features: "
f"{', '.join(self.features)} and is an unsupported "
"configuration",
self.repository_url,
*self.features,
)
| UnsupportedConfiguration |
python | run-llama__llama_index | llama-index-core/llama_index/core/callbacks/pythonically_printing_base_handler.py | {
"start": 191,
"end": 1435
} | class ____(BaseCallbackHandler):
"""
Callback handler that prints logs in a Pythonic way. That is, not using `print` at all; use the logger instead.
See https://stackoverflow.com/a/6918596/1147061 for why you should prefer using a logger over `print`.
This class is meant to be subclassed, not used directly.
Using this class, your LlamaIndex Callback Handlers can now make use of vanilla Python logging handlers now.
One popular choice is https://rich.readthedocs.io/en/stable/logging.html#logging-handler.
"""
def __init__(
self,
event_starts_to_ignore: Optional[List[CBEventType]] = None,
event_ends_to_ignore: Optional[List[CBEventType]] = None,
logger: Optional[logging.Logger] = None,
) -> None:
self.logger: Optional[logging.Logger] = logger
super().__init__(
event_starts_to_ignore=event_starts_to_ignore or [],
event_ends_to_ignore=event_ends_to_ignore or [],
)
def _print(self, print_str: str) -> None:
if self.logger:
self.logger.debug(print_str)
else:
# This branch is to preserve existing behavior.
print(print_str, flush=True)
| PythonicallyPrintingBaseHandler |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/from_tensors_test.py | {
"start": 2432,
"end": 13152
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testFromTensors(self):
"""Test a dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
[c.shape for c in components],
nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)))
self.assertDatasetProduces(dataset, expected_output=[components])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsDataset(self):
"""Test a dataset that represents a dataset."""
dataset = dataset_ops.Dataset.from_tensors(dataset_ops.Dataset.range(10))
dataset = dataset.flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsTensorArray(self):
"""Test a dataset that represents a TensorArray."""
components = (
tensor_array_ops.TensorArray(dtypes.float32, element_shape=(), size=2)
.unstack([1.0, 2.0]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(
dataset, expected_output=[[1.0, 2.0]], requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsSparse(self):
"""Test a dataset that represents a single tuple of tensors."""
components = (sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
[tensor_shape.TensorShape(c.dense_shape) for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, expected_output=[components])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsMixed(self):
"""Test an dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual([
tensor_shape.TensorShape(c.dense_shape)
if sparse_tensor.is_sparse(c) else c.shape for c in components
], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, expected_output=[components])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsRagged(self):
components = (
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]),
ragged_factory_ops.constant_value([[[3]], [[4]], [[5]]]),
)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(dataset, expected_output=[components])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsNamedTuple(self):
Foo = collections.namedtuple("Foo", ["x", "y"])
element = Foo(x=1, y=2)
dataset = dataset_ops.Dataset.from_tensors(element)
self.assertDatasetProduces(dataset, expected_output=[element])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsAttrs(self):
if attr is None:
self.skipTest("attr module is not available.")
@attr.s
class Foo:
x = attr.ib()
y = attr.ib()
element = Foo(x=1, y=2)
dataset = dataset_ops.Dataset.from_tensors(element)
self.assertDatasetProduces(dataset, expected_output=[element])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsDataclass(self):
mt = MaskedTensor(mask=True, value=np.array([1]))
dataset = dataset_ops.Dataset.from_tensors(mt)
self.assertDatasetProduces(dataset, expected_output=[mt])
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsMixedRagged(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0),
sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0]),
dense_shape=np.array([1])),
sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1, 1]),
dense_shape=np.array([2, 2])),
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]]]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertDatasetProduces(dataset, expected_output=[components])
@combinations.generate(
combinations.combine(
tf_api_version=[1],
mode=["graph"],
components=(np.array([1, 2, 3], dtype=np.int64),
(np.array([4., 5.]), np.array(
[6., 7.])), np.array([8, 9, 10], dtype=np.int64)),
expected_shapes=[[[None, 3], [None, 3], [None, 2], [None, 2]]]) +
combinations.combine(
tf_api_version=[1],
mode=["eager"],
components=(np.array([1, 2, 3], dtype=np.int64),
(np.array([4., 5.]), np.array(
[6., 7.])), np.array([8, 9, 10], dtype=np.int64)),
expected_shapes=[[[1, 3], [1, 3], [1, 2], [1, 2]]]))
def testNestedStructure(self, components, expected_shapes):
dataset = dataset_ops.Dataset.from_tensors(components)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(
((x[0], x[1]), (y[0], y[1])))).batch(32)
get_next = self.getNext(dataset)
(w, x), (y, z) = get_next()
self.assertEqual(dtypes.int64, w.dtype)
self.assertEqual(dtypes.int64, x.dtype)
self.assertEqual(dtypes.float64, y.dtype)
self.assertEqual(dtypes.float64, z.dtype)
self.assertEqual(expected_shapes, [
w.shape.as_list(),
x.shape.as_list(),
y.shape.as_list(),
z.shape.as_list()
])
get_next = self.getNext(dataset)
(w, x), (y, z) = get_next()
self.assertEqual(dtypes.int64, w.dtype)
self.assertEqual(dtypes.int64, x.dtype)
self.assertEqual(dtypes.float64, y.dtype)
self.assertEqual(dtypes.float64, z.dtype)
self.assertEqual(expected_shapes, [
w.shape.as_list(),
x.shape.as_list(),
y.shape.as_list(),
z.shape.as_list()
])
@combinations.generate(test_base.default_test_combinations())
def testNestedDict(self):
components = {"a": {"aa": 1, "ab": [2.0, 2.0]}, "b": [3, 3, 3]}
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["a"]["aa"])
self.assertEqual(dtypes.float32,
dataset_ops.get_legacy_output_types(dataset)["a"]["ab"])
self.assertEqual(dtypes.int32,
dataset_ops.get_legacy_output_types(dataset)["b"])
self.assertEqual([],
dataset_ops.get_legacy_output_shapes(dataset)["a"]["aa"])
self.assertEqual([2],
dataset_ops.get_legacy_output_shapes(dataset)["a"]["ab"])
self.assertEqual([3],
dataset_ops.get_legacy_output_shapes(dataset)["b"])
@combinations.generate(test_base.default_test_combinations())
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3], dtype=np.int64)
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x: array_ops_stack.stack([x, x]))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([2, 3], dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEqual(dtypes.int64,
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))
get_next = self.getNext(dataset)
self.assertEqual(dtypes.int64, get_next().dtype)
self.assertEqual([3], get_next().shape)
# TODO(b/121264236): needs mechanism for multiple device in eager mode.
@combinations.generate(test_base.graph_only_combinations())
def testSplitPipeline(self):
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
dataset = dataset_ops.Dataset.from_tensors(0)
# Define a pipeline that attempts to use variables on two
# different devices.
#
# Initialize the variables before creating to iterator, to avoid the
# placement algorithm overriding the DT_RESOURCE colocation constraints.
with ops.device("/cpu:0"):
var_0 = resource_variable_ops.ResourceVariable(initial_value=1)
dataset = dataset.map(lambda x: x + var_0.read_value())
sess.run(var_0.initializer)
with ops.device("/cpu:1"):
var_1 = resource_variable_ops.ResourceVariable(initial_value=1)
dataset = dataset.map(lambda x: x + var_1.read_value())
sess.run(var_1.initializer)
iterator = dataset_ops.make_initializable_iterator(dataset)
sess.run(iterator.initializer)
self.assertEqual(sess.run(iterator.get_next()), 2)
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42, name="from_tensors")
self.assertDatasetProduces(dataset, [42])
| FromTensorsTest |
python | django__django | tests/utils_tests/test_http.py | {
"start": 589,
"end": 3352
} | class ____(SimpleTestCase):
cannot_encode_none_msg = (
"Cannot encode None for key 'a' in a query string. Did you mean to "
"pass an empty string or omit the value?"
)
def test_tuples(self):
self.assertEqual(urlencode((("a", 1), ("b", 2), ("c", 3))), "a=1&b=2&c=3")
def test_dict(self):
result = urlencode({"a": 1, "b": 2, "c": 3})
self.assertEqual(result, "a=1&b=2&c=3")
def test_dict_containing_sequence_not_doseq(self):
self.assertEqual(urlencode({"a": [1, 2]}, doseq=False), "a=%5B1%2C+2%5D")
def test_dict_containing_tuple_not_doseq(self):
self.assertEqual(urlencode({"a": (1, 2)}, doseq=False), "a=%281%2C+2%29")
def test_custom_iterable_not_doseq(self):
class IterableWithStr:
def __str__(self):
return "custom"
def __iter__(self):
yield from range(0, 3)
self.assertEqual(urlencode({"a": IterableWithStr()}, doseq=False), "a=custom")
def test_dict_containing_sequence_doseq(self):
self.assertEqual(urlencode({"a": [1, 2]}, doseq=True), "a=1&a=2")
def test_dict_containing_empty_sequence_doseq(self):
self.assertEqual(urlencode({"a": []}, doseq=True), "")
def test_multivaluedict(self):
result = urlencode(
MultiValueDict(
{
"name": ["Adrian", "Simon"],
"position": ["Developer"],
}
),
doseq=True,
)
self.assertEqual(result, "name=Adrian&name=Simon&position=Developer")
def test_dict_with_bytes_values(self):
self.assertEqual(urlencode({"a": b"abc"}, doseq=True), "a=abc")
def test_dict_with_sequence_of_bytes(self):
self.assertEqual(
urlencode({"a": [b"spam", b"eggs", b"bacon"]}, doseq=True),
"a=spam&a=eggs&a=bacon",
)
def test_dict_with_bytearray(self):
self.assertEqual(urlencode({"a": bytearray(range(2))}, doseq=True), "a=0&a=1")
def test_generator(self):
self.assertEqual(urlencode({"a": range(2)}, doseq=True), "a=0&a=1")
self.assertEqual(urlencode({"a": range(2)}, doseq=False), "a=range%280%2C+2%29")
def test_none(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({"a": None})
def test_none_in_sequence(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({"a": [None]}, doseq=True)
def test_none_in_generator(self):
def gen():
yield None
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({"a": gen()}, doseq=True)
| URLEncodeTests |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-inversions.py | {
"start": 1266,
"end": 2319
} | class ____(object):
def numberOfPermutations(self, n, requirements):
"""
:type n: int
:type requirements: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
lookup = [-1]*n
for i, c in requirements:
lookup[i] = c
dp = [0]*(lookup[-1]+1)
dp[0] = 1
for i in xrange(n):
new_dp = [0]*len(dp)
if lookup[i] != -1: # optimized
new_dp[lookup[i]] = reduce(lambda total, i: (total+dp[i])%MOD, xrange(max(lookup[i]-i, 0), lookup[i]+1), 0)
else:
for j in xrange(len(dp)):
new_dp[j] = dp[j]
if j-1 >= 0:
new_dp[j] = (new_dp[j]+new_dp[j-1])%MOD
if j-(i+1) >= 0:
new_dp[j] = (new_dp[j]-dp[j-(i+1)])%MOD
dp = new_dp
return dp[-1]
# Time: O(n * k), k = max(cnt for _, cnt in requirements)
# Space: O(n + k)
# knapsack dp, combinatorics, sliding window, two pointers
| Solution2 |
python | django__django | tests/many_to_one_null/models.py | {
"start": 525,
"end": 620
} | class ____(models.Model):
make = models.CharField(max_length=100, null=True, unique=True)
| Car |
python | nedbat__coveragepy | tests/test_concurrency.py | {
"start": 1732,
"end": 5060
} | class ____(CoverageTest):
"""Test the helpers here."""
run_in_temp_dir = False
def test_line_count(self) -> None:
CODE = """
# Hey there!
x = 1
if x:
print("hello")
else:
print("bye")
print("done")
"""
assert line_count(CODE) == 5
# The code common to all the concurrency models.
SUM_RANGE_Q = """
# Above this will be imports defining queue and threading.
class Producer(threading.Thread):
def __init__(self, limit, q):
threading.Thread.__init__(self)
self.limit = limit
self.q = q
def run(self):
for i in range(self.limit):
self.q.put(i)
self.q.put(None)
class Consumer(threading.Thread):
def __init__(self, q, qresult):
threading.Thread.__init__(self)
self.q = q
self.qresult = qresult
def run(self):
sum = 0
while "no peephole".upper():
i = self.q.get()
if i is None:
break
sum += i
self.qresult.put(sum)
def sum_range(limit):
q = queue.Queue()
qresult = queue.Queue()
c = Consumer(q, qresult)
p = Producer(limit, q)
c.start()
p.start()
p.join()
c.join()
return qresult.get()
# Below this will be something using sum_range.
"""
PRINT_SUM_RANGE = """
print(sum_range({QLIMIT}))
"""
# Import the things to use threads.
THREAD = """
import threading
import queue
"""
# Import the things to use eventlet.
EVENTLET = """
import eventlet.green.threading as threading
import eventlet.queue as queue
"""
# Import the things to use gevent.
GEVENT = """
from gevent import monkey
monkey.patch_thread()
import threading
import gevent.queue as queue
"""
# Uncomplicated code that doesn't use any of the concurrency stuff, to test
# the simple case under each of the regimes.
SIMPLE = """
total = 0
for i in range({QLIMIT}):
total += i
print(total)
"""
def cant_trace_msg(concurrency: str, the_module: ModuleType | None) -> str | None:
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
parts = concurrency.split(",")
parts.remove("multiprocessing")
concurrency = ",".join(parts)
if testenv.SYS_MON and concurrency:
expected_out = f"Can't use core=sysmon: it doesn't support concurrency={concurrency}"
elif the_module is None:
# We don't even have the underlying module installed, we expect
# coverage to alert us to this fact.
expected_out = f"Couldn't trace with concurrency={concurrency}, the module isn't installed."
elif testenv.C_TRACER or concurrency == "thread" or concurrency == "":
expected_out = None
else:
expected_out = (
f"Can't support concurrency={concurrency} with {testenv.REQUESTED_TRACER_CLASS}, "
+ "only threads are supported."
)
return expected_out
| LineCountTest |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 7411,
"end": 7511
} | class ____(SyntaxException):
"""Invalid syntax within NatSpec docstring."""
| NatSpecSyntaxException |
python | PyCQA__pylint | doc/data/messages/i/inherit-non-class/good.py | {
"start": 0,
"end": 50
} | class ____:
def __bool__(self):
pass
| Fruit |
python | ray-project__ray | python/ray/llm/_internal/common/utils/cloud_utils.py | {
"start": 16164,
"end": 18476
} | class ____:
"""Unified accessor for models stored in cloud storage (S3 or GCS).
Args:
model_id: The model id to download or upload.
mirror_config: The mirror config for the model.
"""
def __init__(self, model_id: str, mirror_config: CloudMirrorConfig):
self.model_id = model_id
self.mirror_config = mirror_config
def _get_lock_path(self, suffix: str = "") -> Path:
return Path(
"~", f"{self.model_id.replace('/', '--')}{suffix}.lock"
).expanduser()
def _get_model_path(self) -> Path:
if Path(self.model_id).exists():
return Path(self.model_id)
# Delayed import to avoid circular dependencies
from transformers.utils.hub import TRANSFORMERS_CACHE
return Path(
TRANSFORMERS_CACHE, f"models--{self.model_id.replace('/', '--')}"
).expanduser()
def remote_object_cache(
max_size: int,
missing_expire_seconds: Optional[int] = None,
exists_expire_seconds: Optional[int] = None,
missing_object_value: Any = None,
) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""A decorator that provides async caching using CloudObjectCache.
This is a direct replacement for the remote_object_cache/cachetools combination,
using CloudObjectCache internally to maintain cache state.
Args:
max_size: Maximum number of items to store in cache
missing_expire_seconds: How long to cache missing objects
exists_expire_seconds: How long to cache existing objects
missing_object_value: Value to use for missing objects
"""
def decorator(func: Callable[..., T]) -> Callable[..., T]:
# Create a single cache instance for this function
cache = CloudObjectCache(
max_size=max_size,
fetch_fn=func,
missing_expire_seconds=missing_expire_seconds,
exists_expire_seconds=exists_expire_seconds,
missing_object_value=missing_object_value,
)
async def wrapper(*args, **kwargs):
# Extract the key from either first positional arg or object_uri kwarg
key = args[0] if args else kwargs.get("object_uri")
return await cache.aget(key)
return wrapper
return decorator
| CloudModelAccessor |
python | huggingface__transformers | tests/models/gemma3n/test_modeling_gemma3n.py | {
"start": 33380,
"end": 36171
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (Gemma3nModel, Gemma3nForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (Gemma3nForConditionalGeneration,) if is_torch_available() else ()
test_missing_keys = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
# MP works but offload doesn't work when the SigLIP MultiheadAttention is offloaded
# TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"]
# in the dispatch_model function
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def setUp(self):
self.model_tester = Gemma3nVision2TextModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=Gemma3nConfig,
hidden_size=37,
text_config={"activation_sparsity_pattern": None},
)
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="SiglipVisionModel (vision backbone) does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
reason="Siglip has no FLEX attention, and we don't have a proper way to set/test attn in VLMs. TODO @raushan"
)
def test_flex_attention_with_grads(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_eager_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("Gemma3n applies key/query norm which doesn't work with packing")
def test_sdpa_padding_matches_padding_free_with_position_ids(self):
pass
def test_automodelforcausallm(self):
"""
Regression test for #36741 -- make sure `AutoModelForCausalLM` works with a Gemma3n config, i.e. that
`AutoModelForCausalLM.from_pretrained` pulls the text config before loading the model
"""
config = self.model_tester.get_config()
model = Gemma3nForConditionalGeneration(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
for_causal_lm = AutoModelForCausalLM.from_pretrained(tmp_dir)
self.assertIsInstance(for_causal_lm, Gemma3nForCausalLM)
@slow
@require_torch_accelerator
@require_read_token
| Gemma3nVision2TextModelTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/loop51.py | {
"start": 251,
"end": 410
} | class ____(StrEnum):
A = "A"
for _ in range(2):
x: dict[MyEnum, int] = {}
if MyEnum.A in x:
...
for _ in x.values():
...
| MyEnum |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType7.py | {
"start": 117,
"end": 276
} | class ____:
pass
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2", bound=ClassA)
_T2A = TypeVar("_T2A", bound=ClassA)
_T3 = TypeVar("_T3", ClassA, int, str)
| ClassA |
python | getsentry__sentry | src/sentry/db/models/fields/gzippeddict.py | {
"start": 303,
"end": 1799
} | class ____(TextField):
"""
Slightly different from a JSONField in the sense that the default
value is a dictionary.
"""
def contribute_to_class(self, cls: type[Model], name: str, private_only: bool = False) -> None:
"""
Add a descriptor for backwards compatibility
with previous Django behavior.
"""
super().contribute_to_class(cls, name, private_only=private_only)
setattr(cls, name, Creator(self))
def to_python(self, value):
try:
if not value:
return {}
return json.loads(value)
except (ValueError, TypeError):
if isinstance(value, str) and value:
try:
value = pickle.loads(decompress(value))
except Exception as e:
logger.exception(str(e))
return {}
elif not value:
return {}
return value
def from_db_value(self, value, expression, connection):
return self.to_python(value)
def get_prep_value(self, value):
if not value and self.null:
# save ourselves some storage
return None
elif isinstance(value, bytes):
value = value.decode("utf-8")
if value is None and self.null:
return None
return json.dumps(value)
def value_to_string(self, obj):
return self.get_prep_value(self.value_from_object(obj))
| GzippedDictField |
python | arrow-py__arrow | arrow/parser.py | {
"start": 2044,
"end": 3664
} | class ____(TypedDict, total=False):
"""
A dictionary that represents different parts of a datetime.
:class:`_Parts` is a TypedDict that represents various components of a date or time,
such as year, month, day, hour, minute, second, microsecond, timestamp, expanded_timestamp, tzinfo,
am_pm, day_of_week, and weekdate.
:ivar year: The year, if present, as an integer.
:ivar month: The month, if present, as an integer.
:ivar day_of_year: The day of the year, if present, as an integer.
:ivar day: The day, if present, as an integer.
:ivar hour: The hour, if present, as an integer.
:ivar minute: The minute, if present, as an integer.
:ivar second: The second, if present, as an integer.
:ivar microsecond: The microsecond, if present, as an integer.
:ivar timestamp: The timestamp, if present, as a float.
:ivar expanded_timestamp: The expanded timestamp, if present, as an integer.
:ivar tzinfo: The timezone info, if present, as a :class:`dt_tzinfo` object.
:ivar am_pm: The AM/PM indicator, if present, as a string literal "am" or "pm".
:ivar day_of_week: The day of the week, if present, as an integer.
:ivar weekdate: The week date, if present, as a tuple of three integers or None.
"""
year: int
month: int
day_of_year: int
day: int
hour: int
minute: int
second: int
microsecond: int
timestamp: float
expanded_timestamp: int
tzinfo: dt_tzinfo
am_pm: Literal["am", "pm"]
day_of_week: int
weekdate: Tuple[_WEEKDATE_ELEMENT, _WEEKDATE_ELEMENT, Optional[_WEEKDATE_ELEMENT]]
| _Parts |
python | Textualize__textual | tests/test_markdown.py | {
"start": 472,
"end": 705
} | class ____(MarkdownBlock):
def __init__(self, markdown: Markdown, token: Token) -> None:
super().__init__(markdown)
self._token = token
def __repr___(self) -> str:
return self._token.type
| UnhandledToken |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 290,
"end": 644
} | class ____:
def __init__(self, index=0):
self.index = index
return
def run(self, obj):
if isinstance(obj, LTTextBox):
obj.index = self.index
self.index += 1
elif isinstance(obj, LTTextGroup):
for x in obj:
self.run(x)
return
## LAParams
##
| IndexAssigner |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_color__property.py | {
"start": 4169,
"end": 5873
} | class ____:
def test_valid(self) -> None:
prop = bcpc.RGB()
assert prop.is_valid(RGB(10, 20, 30))
def test_invalid(self) -> None:
prop = bcpc.RGB()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0 + 1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid((0, 127, 255))
assert not prop.is_valid((0, 127, 255, 1.0))
assert not prop.is_valid((0, -127, 255))
assert not prop.is_valid((0, 127))
assert not prop.is_valid((0, 127, 1.0))
assert not prop.is_valid((0, 127, 255, 255))
assert not prop.is_valid("#00aaff")
assert not prop.is_valid("#00AAFF")
assert not prop.is_valid("#00AaFf")
assert not prop.is_valid("00aaff")
assert not prop.is_valid("00AAFF")
assert not prop.is_valid("00AaFf")
assert not prop.is_valid("#00AaFg")
assert not prop.is_valid("#00AaFff")
assert not prop.is_valid("blue")
assert not prop.is_valid("BLUE")
assert not prop.is_valid("foobar")
def test_has_ref(self) -> None:
prop = bcpc.RGB()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpc.RGB()
assert str(prop) == "RGB"
| Test_RGB |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 15036,
"end": 16814
} | class ____(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Shape parameter Alpha must be positive.")
_value_check(beta > 0, "Shape parameter Beta must be positive.")
set = Interval(0, oo)
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta)
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 -alpha - beta
z *(z + 1)
-------------------------------
B(alpha, beta)
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_prime_distribution
.. [2] https://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return rv(name, BetaPrimeDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Bounded Pareto Distribution --------------------------------------------------
| BetaPrimeDistribution |
python | fabric__fabric | tests/group.py | {
"start": 4167,
"end": 6407
} | class ____:
@mark.parametrize("method", ALL_METHODS)
def executes_arguments_on_contents_run_serially(self, method):
"executes arguments on contents' run() serially"
cxns = [Connection(x) for x in ("host1", "host2", "host3")]
args = ARGS_BY_METHOD[method]
kwargs = KWARGS_BY_METHOD[method]
for index, cxn in enumerate(cxns):
side_effect = _make_serial_tester(
method, cxns, index, args, kwargs
)
setattr(cxn, method, Mock(side_effect=side_effect))
g = SerialGroup.from_connections(cxns)
getattr(g, method)(*args, **kwargs)
# Sanity check, e.g. in case none of them were actually run
for cxn in cxns:
getattr(cxn, method).assert_called_with(*args, **kwargs)
@mark.parametrize("method", ALL_METHODS)
def errors_in_execution_capture_and_continue_til_end(self, method):
cxns = [Mock(name=x) for x in ("host1", "host2", "host3")]
class OhNoz(Exception):
pass
onoz = OhNoz()
getattr(cxns[1], method).side_effect = onoz
g = SerialGroup.from_connections(cxns)
try:
getattr(g, method)("whatever", hide=True)
except GroupException as e:
result = e.result
else:
assert False, "Did not raise GroupException!"
succeeded = {
cxns[0]: getattr(cxns[0], method).return_value,
cxns[2]: getattr(cxns[2], method).return_value,
}
failed = {cxns[1]: onoz}
expected = succeeded.copy()
expected.update(failed)
assert result == expected
assert result.succeeded == succeeded
assert result.failed == failed
@mark.parametrize("method", ALL_METHODS)
def returns_results_mapping(self, method):
cxns = [Mock(name=x) for x in ("host1", "host2", "host3")]
g = SerialGroup.from_connections(cxns)
result = getattr(g, method)("whatever", hide=True)
assert isinstance(result, GroupResult)
expected = {x: getattr(x, method).return_value for x in cxns}
assert result == expected
assert result.succeeded == expected
assert result.failed == {}
| SerialGroup_ |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 17040,
"end": 19231
} | class ____(TestCase):
def test_unique_values(self):
n = 8
expected = set(
x
for x in permutations(range(n))
if not any(x[i] == i for i in range(n))
)
for i, iterable in enumerate(
[
range(n),
list(range(n)),
set(range(n)),
]
):
actual = set(mi.derangements(iterable))
self.assertEqual(actual, expected)
def test_repeated_values(self):
self.assertEqual(
[''.join(x) for x in mi.derangements('AACD')],
[
'AADC',
'ACDA',
'ADAC',
'CADA',
'CDAA',
'CDAA',
'DAAC',
'DCAA',
'DCAA',
],
)
def test_unsortable_unhashable(self):
iterable = (0, True, ['Carol'])
actual = list(mi.derangements(iterable))
expected = [(True, ['Carol'], 0), (['Carol'], 0, True)]
self.assertListEqual(actual, expected)
def test_r(self):
s = 'ABCD'
for r, expected in [
(0, ['']),
(1, ['B', 'C', 'D']),
(2, ['BA', 'BC', 'BD', 'CA', 'CD', 'DA', 'DC']),
(
3,
[
'BAD',
'BCA',
'BCD',
'BDA',
'CAB',
'CAD',
'CDA',
'CDB',
'DAB',
'DCA',
'DCB',
],
),
(
4,
[
'BADC',
'BCDA',
'BDAC',
'CADB',
'CDAB',
'CDBA',
'DABC',
'DCAB',
'DCBA',
],
),
]:
with self.subTest(r=r):
actual = [''.join(x) for x in mi.derangements(s, r=r)]
self.assertEqual(actual, expected)
| DerangementsTests |
python | walkccc__LeetCode | solutions/1609. Even Odd Tree/1609.py | {
"start": 0,
"end": 660
} | class ____:
def isEvenOddTree(self, root: TreeNode | None) -> bool:
q = collections.deque([root])
isEven = True
while q:
prevVal = -math.inf if isEven else math.inf
for _ in range(sz):
node = q.popleft()
if isEven and (node.val % 2 == 0 or node.val <= prevVal):
return False # invalid case on even level
if not isEven and (node.val % 2 == 1 or node.val >= prevVal):
return False # invalid case on odd level
prevVal = node.val
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
isEven = not isEven
return True
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image16.py | {
"start": 315,
"end": 845
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image16.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("C2", self.image_dir + "issue32.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | squidfunk__mkdocs-material | material/plugins/info/config.py | {
"start": 1410,
"end": 1656
} | class ____(Config):
enabled = Type(bool, default = True)
enabled_on_serve = Type(bool, default = False)
# Settings for archive
archive = Type(bool, default = True)
archive_stop_on_violation = Type(bool, default = True)
| InfoConfig |
python | nedbat__coveragepy | tests/test_testing.py | {
"start": 13322,
"end": 13798
} | class ____(CoverageTest):
"""Tests of arcz/arcs helpers."""
run_in_temp_dir = False
@pytest.mark.parametrize(
"arcz, arcs",
[
(".1 12 2.", [(-1, 1), (1, 2), (2, -1)]),
("-11 12 2-5", [(-1, 1), (1, 2), (2, -5)]),
("-QA CB IT Z-A", [(-26, 10), (12, 11), (18, 29), (35, -10)]),
],
)
def test_arcz_to_arcs(self, arcz: str, arcs: list[TArc]) -> None:
assert arcz_to_arcs(arcz) == arcs
| ArczTest |
python | donnemartin__interactive-coding-challenges | graphs_trees/min_heap/test_min_heap.py | {
"start": 18,
"end": 1563
} | class ____(unittest.TestCase):
def test_min_heap(self):
heap = MinHeap()
self.assertEqual(heap.peek_min(), None)
self.assertEqual(heap.extract_min(), None)
heap.insert(20)
self.assertEqual(heap.array[0], 20)
heap.insert(5)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
heap.insert(15)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 15)
heap.insert(22)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 15)
self.assertEqual(heap.array[3], 22)
heap.insert(40)
self.assertEqual(heap.array[0], 5)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 15)
self.assertEqual(heap.array[3], 22)
self.assertEqual(heap.array[4], 40)
heap.insert(3)
self.assertEqual(heap.array[0], 3)
self.assertEqual(heap.array[1], 20)
self.assertEqual(heap.array[2], 5)
self.assertEqual(heap.array[3], 22)
self.assertEqual(heap.array[4], 40)
self.assertEqual(heap.array[5], 15)
mins = []
while heap:
mins.append(heap.extract_min())
self.assertEqual(mins, [3, 5, 15, 20, 22, 40])
print('Success: test_min_heap')
def main():
test = TestMinHeap()
test.test_min_heap()
if __name__ == '__main__':
main()
| TestMinHeap |
python | mlflow__mlflow | mlflow/pyfunc/spark_model_cache.py | {
"start": 67,
"end": 2091
} | class ____:
"""Caches models in memory on Spark Executors, to avoid continually reloading from disk.
This class has to be part of a different module than the one that _uses_ it. This is
because Spark will pickle classes that are defined in the local scope, but relies on
Python's module loading behavior for classes in different modules. In this case, we
are relying on the fact that Python will load a module at-most-once, and can therefore
store per-process state in a static map.
"""
# Map from unique name --> (loaded model, local_model_path).
_models = {}
# Number of cache hits we've had, for testing purposes.
_cache_hits = 0
def __init__(self):
pass
@staticmethod
def add_local_model(spark, model_path):
"""Given a SparkSession and a model_path which refers to a pyfunc directory locally,
we will zip the directory up, enable it to be distributed to executors, and return
the "archive_path", which should be used as the path in get_or_load().
"""
return _SparkDirectoryDistributor.add_dir(spark, model_path)
@staticmethod
def get_or_load(archive_path):
"""Given a path returned by add_local_model(), this method will return a tuple of
(loaded_model, local_model_path).
If this Python process ever loaded the model before, we will reuse that copy.
"""
if archive_path in SparkModelCache._models:
SparkModelCache._cache_hits += 1
return SparkModelCache._models[archive_path]
local_model_dir = _SparkDirectoryDistributor.get_or_extract(archive_path)
# We must rely on a supposed cyclic import here because we want this behavior
# on the Spark Executors (i.e., don't try to pickle the load_model function).
from mlflow.pyfunc import load_model
SparkModelCache._models[archive_path] = (load_model(local_model_dir), local_model_dir)
return SparkModelCache._models[archive_path]
| SparkModelCache |
python | getsentry__sentry | src/sentry/backup/exports.py | {
"start": 3254,
"end": 11584
} | class ____(ExportCheckpointer):
"""
A noop checkpointer - that is, it doesn't write or read any checkpoints, always returning None.
This means that no checkpointing ever occurs.
"""
def __init__(self, crypto: EncryptorDecryptorPair | None, printer: Printer):
pass
def get(self, model_name: NormalizedModelName) -> RpcExportOk | None:
return None
def add(self, model_name: NormalizedModelName, json_data: str) -> None:
return None
def _export(
dest: IO[bytes],
scope: ExportScope,
*,
encryptor: Encryptor | None = None,
indent: int = 2,
filter_by: Filter | None = None,
printer: Printer,
checkpointer: ExportCheckpointer | None = None,
):
"""
Exports core data for the Sentry installation.
It is generally preferable to avoid calling this function directly, as there are certain
combinations of input parameters that should not be used together. Instead, use one of the other
wrapper functions in this file, named `export_in_XXX_scope()`.
"""
# Import here to prevent circular module resolutions.
from sentry.models.organization import Organization
from sentry.models.organizationmember import OrganizationMember
from sentry.users.models.user import User
if SiloMode.get_current_mode() == SiloMode.CONTROL:
errText = "Exports must be run in REGION or MONOLITH instances only"
printer.echo(errText, err=True)
raise RuntimeError(errText)
cache = checkpointer if checkpointer is not None else NoopExportCheckpointer(None, printer)
json_export = []
pk_map = PrimaryKeyMap()
allowed_relocation_scopes = scope.value
filters = []
if filter_by is not None:
filters.append(filter_by)
if filter_by.model == Organization:
if filter_by.field != "slug":
raise ValueError(
"Filter arguments must only apply to `Organization`'s `slug` field"
)
org_pks = set(
Organization.objects.filter(slug__in=filter_by.values).values_list("id", flat=True)
)
# Note: `user_id` can be NULL (for invited members that have not yet responded), but
# this is okay, because `Filter`s constructor explicitly filters out `None` members
# from the set.
user_pks = set(
OrganizationMember.objects.filter(organization_id__in=org_pks).values_list(
"user_id", flat=True
)
)
filters.append(Filter(User, "pk", set(user_pks)))
elif filter_by.model == User:
if filter_by.field not in {"pk", "id", "username"}:
raise ValueError("Filter arguments must only apply to `User`'s `username` field")
else:
raise ValueError("Filter arguments must only apply to `Organization` or `User` models")
# TODO(getsentry/team-ospo#190): Another optimization opportunity to use a generator with ijson
# # to print the JSON objects in a streaming manner.
for model in sorted_dependencies():
from sentry.db.models.base import BaseModel
if not issubclass(model, BaseModel):
continue
possible_relocation_scopes = model.get_possible_relocation_scopes()
includable = possible_relocation_scopes & allowed_relocation_scopes
if not includable or model._meta.proxy:
continue
model_name = get_model_name(model)
model_relations = dependencies().get(model_name)
if not model_relations:
continue
dep_models = {get_model_name(d) for d in model_relations.get_dependencies_for_relocation()}
export_by_model = ImportExportService.get_exporter_for_model(model)
cached_result = cache.get(model_name)
result = (
cached_result
if cached_result is not None
else export_by_model(
export_model_name=str(model_name),
scope=RpcExportScope.into_rpc(scope),
from_pk=0,
filter_by=[RpcFilter.into_rpc(f) for f in filters],
pk_map=RpcPrimaryKeyMap.into_rpc(pk_map.partition(dep_models)),
indent=indent,
)
)
if isinstance(result, RpcExportError):
printer.echo(result.pretty(), err=True)
raise ExportingError(result)
pk_map.extend(result.mapped_pks.from_rpc())
json_models = orjson.loads(result.json_data)
if cached_result is None:
cache.add(model_name, json_models)
# TODO(getsentry/team-ospo#190): Since the structure of this data is very predictable (an
# array of serialized model objects), we could probably avoid re-ingesting the JSON string
# as a future optimization.
for json_model in json_models:
json_export.append(json_model)
# If no `encryptor` argument was passed in, this is an unencrypted export, so we can just dump
# the JSON into the `dest` file and exit early.
if encryptor is None:
dest_wrapper = io.TextIOWrapper(dest, encoding="utf-8", newline="")
builtin_json.dump(json_export, dest_wrapper, indent=indent)
dest_wrapper.detach()
return
dest.write(create_encrypted_export_tarball(json_export, encryptor).getvalue())
def export_in_user_scope(
dest: IO[bytes],
*,
encryptor: Encryptor | None = None,
user_filter: set[str] | None = None,
indent: int = 2,
printer: Printer,
checkpointer: ExportCheckpointer | None = None,
):
"""
Perform an export in the `User` scope, meaning that only models with `RelocationScope.User` will
be exported from the provided `dest` file.
"""
# Import here to prevent circular module resolutions.
from sentry.users.models.user import User
return _export(
dest,
ExportScope.User,
encryptor=encryptor,
filter_by=Filter(User, "username", user_filter) if user_filter is not None else None,
indent=indent,
printer=printer,
checkpointer=checkpointer,
)
def export_in_organization_scope(
dest: IO[bytes],
*,
encryptor: Encryptor | None = None,
org_filter: set[str] | None = None,
indent: int = 2,
printer: Printer,
checkpointer: ExportCheckpointer | None = None,
):
"""
Perform an export in the `Organization` scope, meaning that only models with
`RelocationScope.User` or `RelocationScope.Organization` will be exported from the provided
`dest` file.
"""
# Import here to prevent circular module resolutions.
from sentry.models.organization import Organization
return _export(
dest,
ExportScope.Organization,
encryptor=encryptor,
filter_by=Filter(Organization, "slug", org_filter) if org_filter is not None else None,
indent=indent,
printer=printer,
checkpointer=checkpointer,
)
def export_in_config_scope(
dest: IO[bytes],
*,
encryptor: Encryptor | None = None,
indent: int = 2,
printer: Printer,
checkpointer: ExportCheckpointer | None = None,
):
"""
Perform an export in the `Config` scope, meaning that only models directly related to the global
configuration and administration of an entire Sentry instance will be exported.
"""
# Import here to prevent circular module resolutions.
from sentry.users.models.user import User
return _export(
dest,
ExportScope.Config,
encryptor=encryptor,
filter_by=Filter(User, "pk", import_export_service.get_all_globally_privileged_users()),
indent=indent,
printer=printer,
checkpointer=checkpointer,
)
def export_in_global_scope(
dest: IO[bytes],
*,
encryptor: Encryptor | None = None,
indent: int = 2,
printer: Printer,
checkpointer: ExportCheckpointer | None = None,
):
"""
Perform an export in the `Global` scope, meaning that all models will be exported from the
provided source file.
"""
return _export(
dest,
ExportScope.Global,
encryptor=encryptor,
indent=indent,
printer=printer,
checkpointer=checkpointer,
)
| NoopExportCheckpointer |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 37337,
"end": 39671
} | class ____:
"""Test fr_FR address provider methods"""
def test_street_prefix(self, faker, num_samples):
for _ in range(num_samples):
street_prefix = faker.street_prefix()
assert isinstance(street_prefix, str)
assert street_prefix in FrFrAddressProvider.street_prefixes
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in FrFrAddressProvider.city_prefixes
def test_region(self, faker, num_samples):
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
assert region in FrFrAddressProvider.regions
def test_department(self, faker, num_samples):
for _ in range(num_samples):
department = faker.department()
assert isinstance(department, tuple)
assert department in FrFrAddressProvider.departments
def test_department_name(self, faker, num_samples):
department_names = [dept_name for dept_num, dept_name in FrFrAddressProvider.departments]
for _ in range(num_samples):
department_name = faker.department_name()
assert isinstance(department_name, str)
assert department_name in department_names
def test_department_number(self, faker, num_samples):
department_numbers = [dept_num for dept_num, dept_name in FrFrAddressProvider.departments]
for _ in range(num_samples):
department_number = faker.department_number()
assert isinstance(department_number, str)
assert department_number in department_numbers
def test_postcode(self, faker, num_samples):
department_numbers = [dept_num for dept_num, dept_name in FrFrAddressProvider.departments]
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert len(postcode) == 5
assert (
postcode[:3] in department_numbers # for 3 digits departments number
or postcode[:2] == "20" # for Corsica : "2A" or "2B"
or postcode[:2] in department_numbers # any other
)
| TestFrFr |
python | django__django | tests/model_meta/tests.py | {
"start": 14866,
"end": 15100
} | class ____(SimpleTestCase):
def test_abstract_model_not_instantiated(self):
msg = "Abstract models cannot be instantiated."
with self.assertRaisesMessage(TypeError, msg):
AbstractPerson()
| AbstractModelTests |
python | keras-team__keras | keras/src/ops/math.py | {
"start": 3258,
"end": 4901
} | class ____(SegmentReduction):
def call(self, data, segment_ids):
_segment_reduce_validation(data, segment_ids)
return backend.math.segment_max(
data,
segment_ids,
num_segments=self.num_segments,
sorted=self.sorted,
)
@keras_export("keras.ops.segment_max")
def segment_max(data, segment_ids, num_segments=None, sorted=False):
"""Computes the max of segments in a tensor.
Args:
data: Input tensor.
segment_ids: A N-D tensor containing segment indices for each
element in `data`. data.shape[:len(segment_ids.shape)] should match.
num_segments: An integer representing the total number of
segments. If not specified, it is inferred from the maximum
value in `segment_ids`.
sorted: A boolean indicating whether `segment_ids` is sorted.
Defaults to `False`.
Returns:
A tensor containing the max of segments, where each element
represents the max of the corresponding segment in `data`.
Example:
>>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200])
>>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2])
>>> num_segments = 3
>>> keras.ops.segment_max(data, segment_ids, num_segments)
array([2, 20, 200], dtype=int32)
"""
_segment_reduce_validation(data, segment_ids)
if any_symbolic_tensors((data,)):
return SegmentMax(num_segments, sorted).symbolic_call(data, segment_ids)
return backend.math.segment_max(
data, segment_ids, num_segments=num_segments, sorted=sorted
)
| SegmentMax |
python | pypa__pip | src/pip/_vendor/resolvelib/providers.py | {
"start": 355,
"end": 8914
} | class ____(Generic[RT, CT, KT]):
"""Delegate class to provide the required interface for the resolver."""
def identify(self, requirement_or_candidate: RT | CT) -> KT:
"""Given a requirement or candidate, return an identifier for it.
This is used to identify, e.g. whether two requirements
should have their specifier parts merged or a candidate matches a
requirement via ``find_matches()``.
"""
raise NotImplementedError
def get_preference(
self,
identifier: KT,
resolutions: Mapping[KT, CT],
candidates: Mapping[KT, Iterator[CT]],
information: Mapping[KT, Iterator[RequirementInformation[RT, CT]]],
backtrack_causes: Sequence[RequirementInformation[RT, CT]],
) -> Preference:
"""Produce a sort key for given requirement based on preference.
As this is a sort key it will be called O(n) times per backtrack
step, where n is the number of `identifier`s, if you have a check
which is expensive in some sense. E.g. It needs to make O(n) checks
per call or takes significant wall clock time, consider using
`narrow_requirement_selection` to filter the `identifier`s, which
is applied before this sort key is called.
The preference is defined as "I think this requirement should be
resolved first". The lower the return value is, the more preferred
this group of arguments is.
:param identifier: An identifier as returned by ``identify()``. This
identifies the requirement being considered.
:param resolutions: Mapping of candidates currently pinned by the
resolver. Each key is an identifier, and the value is a candidate.
The candidate may conflict with requirements from ``information``.
:param candidates: Mapping of each dependency's possible candidates.
Each value is an iterator of candidates.
:param information: Mapping of requirement information of each package.
Each value is an iterator of *requirement information*.
:param backtrack_causes: Sequence of *requirement information* that are
the requirements that caused the resolver to most recently
backtrack.
A *requirement information* instance is a named tuple with two members:
* ``requirement`` specifies a requirement contributing to the current
list of candidates.
* ``parent`` specifies the candidate that provides (depended on) the
requirement, or ``None`` to indicate a root requirement.
The preference could depend on various issues, including (not
necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should probably be
worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this requirement? Those
with few left should likely be worked on first, I guess?
* Are there any known conflicts for this requirement? We should
probably work on those with the most known conflicts.
A sortable value should be returned (this will be used as the ``key``
parameter of the built-in sorting function). The smaller the value is,
the more preferred this requirement is (i.e. the sorting function
is called with ``reverse=False``).
"""
raise NotImplementedError
def find_matches(
self,
identifier: KT,
requirements: Mapping[KT, Iterator[RT]],
incompatibilities: Mapping[KT, Iterator[CT]],
) -> Matches[CT]:
"""Find all possible candidates that satisfy the given constraints.
:param identifier: An identifier as returned by ``identify()``. All
candidates returned by this method should produce the same
identifier.
:param requirements: A mapping of requirements that all returned
candidates must satisfy. Each key is an identifier, and the value
an iterator of requirements for that dependency.
:param incompatibilities: A mapping of known incompatibile candidates of
each dependency. Each key is an identifier, and the value an
iterator of incompatibilities known to the resolver. All
incompatibilities *must* be excluded from the return value.
This should try to get candidates based on the requirements' types.
For VCS, local, and archive requirements, the one-and-only match is
returned, and for a "named" requirement, the index(es) should be
consulted to find concrete candidates for this requirement.
The return value should produce candidates ordered by preference; the
most preferred candidate should come first. The return type may be one
of the following:
* A callable that returns an iterator that yields candidates.
* An collection of candidates.
* An iterable of candidates. This will be consumed immediately into a
list of candidates.
"""
raise NotImplementedError
def is_satisfied_by(self, requirement: RT, candidate: CT) -> bool:
"""Whether the given requirement can be satisfied by a candidate.
The candidate is guaranteed to have been generated from the
requirement.
A boolean should be returned to indicate whether ``candidate`` is a
viable solution to the requirement.
"""
raise NotImplementedError
def get_dependencies(self, candidate: CT) -> Iterable[RT]:
"""Get dependencies of a candidate.
This should return a collection of requirements that `candidate`
specifies as its dependencies.
"""
raise NotImplementedError
def narrow_requirement_selection(
self,
identifiers: Iterable[KT],
resolutions: Mapping[KT, CT],
candidates: Mapping[KT, Iterator[CT]],
information: Mapping[KT, Iterator[RequirementInformation[RT, CT]]],
backtrack_causes: Sequence[RequirementInformation[RT, CT]],
) -> Iterable[KT]:
"""
An optional method to narrow the selection of requirements being
considered during resolution. This method is called O(1) time per
backtrack step.
:param identifiers: An iterable of `identifiers` as returned by
``identify()``. These identify all requirements currently being
considered.
:param resolutions: A mapping of candidates currently pinned by the
resolver. Each key is an identifier, and the value is a candidate
that may conflict with requirements from ``information``.
:param candidates: A mapping of each dependency's possible candidates.
Each value is an iterator of candidates.
:param information: A mapping of requirement information for each package.
Each value is an iterator of *requirement information*.
:param backtrack_causes: A sequence of *requirement information* that are
the requirements causing the resolver to most recently
backtrack.
A *requirement information* instance is a named tuple with two members:
* ``requirement`` specifies a requirement contributing to the current
list of candidates.
* ``parent`` specifies the candidate that provides (is depended on for)
the requirement, or ``None`` to indicate a root requirement.
Must return a non-empty subset of `identifiers`, with the default
implementation being to return `identifiers` unchanged. Those `identifiers`
will then be passed to the sort key `get_preference` to pick the most
prefered requirement to attempt to pin, unless `narrow_requirement_selection`
returns only 1 requirement, in which case that will be used without
calling the sort key `get_preference`.
This method is designed to be used by the provider to optimize the
dependency resolution, e.g. if a check cost is O(m) and it can be done
against all identifiers at once then filtering the requirement selection
here will cost O(m) but making it part of the sort key in `get_preference`
will cost O(m*n), where n is the number of `identifiers`.
Returns:
Iterable[KT]: A non-empty subset of `identifiers`.
"""
return identifiers
| AbstractProvider |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 23863,
"end": 24960
} | class ____(Request):
"""
Archive models
:param ids: IDs of the models to archive
:type ids: Sequence[str]
"""
_service = "models"
_action = "archive_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "IDs of the models to archive",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids: List[str], **kwargs: Any) -> None:
super(ArchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| ArchiveManyRequest |
python | scrapy__scrapy | tests/AsyncCrawlerProcess/asyncio_deferred_signal.py | {
"start": 188,
"end": 519
} | class ____:
async def _open_spider(self, spider):
spider.logger.info("async pipeline opened!")
await asyncio.sleep(0.1)
def open_spider(self, spider):
return deferred_from_coro(self._open_spider(spider))
def process_item(self, item):
return {"url": item["url"].upper()}
| UppercasePipeline |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.