language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | aimacode__aima-python | agents.py | {
"start": 28110,
"end": 28340
} | class ____(Agent):
holding = []
has_arrow = True
killed_by = ""
direction = Direction("right")
def can_grab(self, thing):
"""Explorer can only grab gold"""
return thing.__class__ == Gold
| Explorer |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/history.py | {
"start": 685,
"end": 2814
} | class ____(metaclass=ABCMeta):
"""
Base ``History`` class.
This also includes abstract methods for loading/storing history.
"""
def __init__(self) -> None:
# In memory storage for strings.
self._loaded = False
# History that's loaded already, in reverse order. Latest, most recent
# item first.
self._loaded_strings: list[str] = []
#
# Methods expected by `Buffer`.
#
async def load(self) -> AsyncGenerator[str, None]:
"""
Load the history and yield all the entries in reverse order (latest,
most recent history entry first).
This method can be called multiple times from the `Buffer` to
repopulate the history when prompting for a new input. So we are
responsible here for both caching, and making sure that strings that
were were appended to the history will be incorporated next time this
method is called.
"""
if not self._loaded:
self._loaded_strings = list(self.load_history_strings())
self._loaded = True
for item in self._loaded_strings:
yield item
def get_strings(self) -> list[str]:
"""
Get the strings from the history that are loaded so far.
(In order. Oldest item first.)
"""
return self._loaded_strings[::-1]
def append_string(self, string: str) -> None:
"Add string to the history."
self._loaded_strings.insert(0, string)
self.store_string(string)
#
# Implementation for specific backends.
#
@abstractmethod
def load_history_strings(self) -> Iterable[str]:
"""
This should be a generator that yields `str` instances.
It should yield the most recent items first, because they are the most
important. (The history can already be used, even when it's only
partially loaded.)
"""
while False:
yield
@abstractmethod
def store_string(self, string: str) -> None:
"""
Store the string in persistent storage.
"""
| History |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_returned.py | {
"start": 1153,
"end": 1246
} | class ____:
""" Uninferable return value """
__len__ = lambda self: Missing
| AmbigousLen |
python | huggingface__transformers | src/transformers/models/ernie/modeling_ernie.py | {
"start": 54268,
"end": 59291
} | class ____(ErniePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ernie = ErnieModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
task_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
task_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.ernie(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
task_type_ids=task_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| ErnieForMultipleChoice |
python | mlflow__mlflow | mlflow/utils/async_logging/async_logging_queue.py | {
"start": 861,
"end": 1312
} | class ____(enum.Enum):
"""Status of the async queue"""
# The queue is listening to new data and logging enqueued data to MLflow.
ACTIVE = 1
# The queue is not listening to new data, but still logging enqueued data to MLflow.
TEAR_DOWN = 2
# The queue is neither listening to new data or logging enqueued data to MLflow.
IDLE = 3
_MAX_ITEMS_PER_BATCH = 1000
_MAX_PARAMS_PER_BATCH = 100
_MAX_TAGS_PER_BATCH = 100
| QueueStatus |
python | falconry__falcon | falcon/errors.py | {
"start": 5128,
"end": 6507
} | class ____(WebSocketDisconnected):
"""The server encountered an unexpected error."""
pass
HTTPErrorKeywordArguments = Union[str, int, None]
# TODO(vytas): Passing **kwargs down to HTTPError results in arg-type error in
# Mypy, because it is impossible to verify that, e.g., an int value was not
# erroneously passed to href instead of code, etc.
#
# It is hard to properly type this on older Pythons, so we just sprinkle type
# ignores on the super().__init__(...) calls below. In any case, this call is
# internal to the framework.
#
# On Python 3.11+, I have verified it is possible to properly type this
# pattern using typing.Unpack:
#
# class HTTPErrorKeywordArguments(TypedDict):
# href: Optional[str]
# href_text: Optional[str]
# code: Optional[int]
#
# class HTTPErrorSubclass(HTTPError):
# def __init__(
# self,
# *,
# title: Optional[str] = None,
# description: Optional[str] = None,
# headers: Optional[HeaderList] = None,
# **kwargs: Unpack[HTTPErrorKeywordArguments],
# ) -> None:
# super().__init__(
# status.HTTP_400,
# title=title,
# description=description,
# headers=headers,
# **kwargs,
# )
RetryAfter = Union[int, datetime, None]
| WebSocketServerError |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_details.py | {
"start": 7117,
"end": 7381
} | class ____(SuperuserUserSerializer):
is_staff = serializers.BooleanField()
is_superuser = serializers.BooleanField()
class Meta:
model = User
fields = ("name", "username", "is_active", "is_staff", "is_superuser")
| PrivilegedUserSerializer |
python | django__django | tests/test_runner/test_discover_runner.py | {
"start": 1404,
"end": 3990
} | class ____(SimpleTestCase):
def get_parser(self):
parser = ArgumentParser()
DiscoverRunner.add_arguments(parser)
return parser
def test_parallel_default(self, *mocked_objects):
result = self.get_parser().parse_args([])
self.assertEqual(result.parallel, 0)
def test_parallel_flag(self, *mocked_objects):
result = self.get_parser().parse_args(["--parallel"])
self.assertEqual(result.parallel, "auto")
def test_parallel_auto(self, *mocked_objects):
result = self.get_parser().parse_args(["--parallel", "auto"])
self.assertEqual(result.parallel, "auto")
def test_parallel_count(self, *mocked_objects):
result = self.get_parser().parse_args(["--parallel", "17"])
self.assertEqual(result.parallel, 17)
def test_parallel_invalid(self, *mocked_objects):
with self.assertRaises(SystemExit), captured_stderr() as stderr:
self.get_parser().parse_args(["--parallel", "unaccepted"])
msg = "argument --parallel: 'unaccepted' is not an integer or the string 'auto'"
self.assertIn(msg, stderr.getvalue())
def test_get_max_test_processes(self, *mocked_objects):
self.assertEqual(get_max_test_processes(), 12)
@mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"})
def test_get_max_test_processes_env_var(self, *mocked_objects):
self.assertEqual(get_max_test_processes(), 7)
def test_get_max_test_processes_spawn(
self,
mocked_get_start_method,
mocked_cpu_count,
):
mocked_get_start_method.return_value = "spawn"
self.assertEqual(get_max_test_processes(), 12)
with mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}):
self.assertEqual(get_max_test_processes(), 7)
def test_get_max_test_processes_forkserver(
self,
mocked_get_start_method,
mocked_cpu_count,
):
mocked_get_start_method.return_value = "forkserver"
self.assertEqual(get_max_test_processes(), 12)
with mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}):
self.assertEqual(get_max_test_processes(), 7)
def test_get_max_test_processes_other(
self,
mocked_get_start_method,
mocked_cpu_count,
):
mocked_get_start_method.return_value = "other"
self.assertEqual(get_max_test_processes(), 1)
with mock.patch.dict(os.environ, {"DJANGO_TEST_PROCESSES": "7"}):
self.assertEqual(get_max_test_processes(), 1)
| DiscoverRunnerParallelArgumentTests |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_delayed_workflow.py | {
"start": 25690,
"end": 33478
} | class ____(TestDelayedWorkflowBase):
def setUp(self) -> None:
super().setUp()
assert self.workflow1.when_condition_group
assert self.workflow2.when_condition_group
self.data_condition_groups: list[DataConditionGroup] = (
[
self.workflow1.when_condition_group,
self.workflow2.when_condition_group,
]
+ self.workflow1_if_dcgs
+ self.workflow2_if_dcgs
)
self.workflows_to_envs = {self.workflow1.id: self.environment.id, self.workflow2.id: None}
self.condition_group_results: dict[UniqueConditionQuery, QueryResult] = {
UniqueConditionQuery(
handler=EventFrequencyQueryHandler,
interval="1h",
environment_id=self.environment.id,
): {self.group1.id: 101, self.group2.id: 101},
UniqueConditionQuery(
handler=EventFrequencyQueryHandler,
interval="1h",
comparison_interval="1w",
environment_id=self.environment.id,
): {self.group1.id: 50, self.group2.id: 50},
UniqueConditionQuery(
handler=EventFrequencyQueryHandler, interval="1h", environment_id=None
): {self.group1.id: 101, self.group2.id: 101},
UniqueConditionQuery(
handler=EventFrequencyQueryHandler,
interval="1h",
comparison_interval="1w",
environment_id=None,
): {self.group1.id: 202, self.group2.id: 202},
UniqueConditionQuery(
handler=EventUniqueUserFrequencyQueryHandler,
interval="1h",
environment_id=self.environment.id,
): {self.group1.id: 101, self.group2.id: 101},
UniqueConditionQuery(
handler=EventUniqueUserFrequencyQueryHandler, interval="1h", environment_id=None
): {self.group1.id: 50, self.group2.id: 50},
}
# add slow condition to workflow1 slow condition IF dcg (ALL), passes
self.create_data_condition(
condition_group=self.workflow1_if_dcgs[0],
type=Condition.EVENT_UNIQUE_USER_FREQUENCY_COUNT,
comparison={"interval": "1h", "value": 100},
condition_result=True,
)
# add slow condition to workflow2 WHEN dcg (ANY), passes
self.create_data_condition(
condition_group=self.workflow2.when_condition_group,
type=Condition.EVENT_UNIQUE_USER_FREQUENCY_COUNT,
comparison={"interval": "1h", "value": 20},
condition_result=True,
)
self.event_data = EventRedisData(
events={
EventKey.from_redis_key(
f"{self.workflow1.id}:{self.group1.id}:{self.workflow1.when_condition_group_id}:{self.workflow1_if_dcgs[0].id}:{self.workflow1_if_dcgs[1].id}"
): EventInstance(event_id="test-event-1"),
EventKey.from_redis_key(
f"{self.workflow2.id}:{self.group2.id}:{self.workflow2.when_condition_group_id}:{self.workflow2_if_dcgs[0].id}:{self.workflow2_if_dcgs[1].id}"
): EventInstance(event_id="test-event-2"),
}
)
self.dcg_to_slow_conditions = get_slow_conditions_for_groups(list(self.event_data.dcg_ids))
def test_simple(self) -> None:
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
self.dcg_to_slow_conditions,
)
# NOTE: no WHEN DCGs. We only collect IF DCGs here to fire their actions in the fire_actions_for_groups function
assert result == {
self.group1.id: set(self.workflow1_if_dcgs),
self.group2.id: {
self.workflow2_if_dcgs[1]
}, # WHEN DCG passed so we have the passing if dcg here. IF DCG with slow condition did not pass
}
def test_missing_query_result_excludes_group(self) -> None:
existing_query = UniqueConditionQuery(
handler=EventUniqueUserFrequencyQueryHandler, interval="1h", environment_id=None
)
existing_result = self.condition_group_results[existing_query]
assert self.group2.id in existing_result
self.condition_group_results[existing_query] = {
self.group1.id: existing_result[self.group1.id]
}
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
self.dcg_to_slow_conditions,
)
# group2 should be excluded because it's missing from the query result
assert result == {
self.group1.id: set(self.workflow1_if_dcgs),
}
def test_dcg_all_fails(self) -> None:
self.condition_group_results.update(
{
UniqueConditionQuery(
handler=EventUniqueUserFrequencyQueryHandler,
interval="1h",
environment_id=self.environment.id,
): {self.group1.id: 99}
}
)
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
self.dcg_to_slow_conditions,
)
assert result == {
self.group1.id: {self.workflow1_if_dcgs[1]},
self.group2.id: {self.workflow2_if_dcgs[1]},
}
def test_dcg_any_fails(self) -> None:
self.condition_group_results.update(
{
UniqueConditionQuery(
handler=EventUniqueUserFrequencyQueryHandler, interval="1h", environment_id=None
): {self.group2.id: 10}
}
)
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
self.dcg_to_slow_conditions,
)
assert result == {
self.group1.id: set(self.workflow1_if_dcgs),
}
def test_ignored_deleted_dcgs(self) -> None:
self.workflow1_if_dcgs[0].delete()
self.workflow2_if_dcgs[1].delete()
assert self.workflow1.when_condition_group
assert self.workflow2.when_condition_group
self.data_condition_groups = (
[
self.workflow1.when_condition_group,
self.workflow2.when_condition_group,
]
+ [self.workflow1_if_dcgs[1]]
+ [self.workflow2_if_dcgs[0]]
)
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
self.dcg_to_slow_conditions,
)
# NOTE: same result as test_simple but without the deleted DCGs
assert result == {
self.group1.id: {self.workflow1_if_dcgs[1]},
}
def test_ignored_deleted_workflow(self) -> None:
self.workflow1.delete()
self.workflows_to_envs = {self.workflow2.id: None}
result = get_groups_to_fire(
self.data_condition_groups,
self.workflows_to_envs,
self.event_data,
self.condition_group_results,
self.dcg_to_slow_conditions,
)
# NOTE: same result as test_simple but without the deleted workflow
assert result == {self.group2.id: {self.workflow2_if_dcgs[1]}}
| TestGetGroupsToFire |
python | doocs__leetcode | solution/3500-3599/3590.Kth Smallest Path XOR Sum/Solution.py | {
"start": 0,
"end": 1528
} | class ____:
def __init__(self):
self.count = 0
self.children = [None, None]
def add(self, num: int, delta: int, bit=17):
self.count += delta
if bit < 0:
return
b = (num >> bit) & 1
if not self.children[b]:
self.children[b] = BinarySumTrie()
self.children[b].add(num, delta, bit - 1)
def collect(self, prefix=0, bit=17, output=None):
if output is None:
output = []
if self.count == 0:
return output
if bit < 0:
output.append(prefix)
return output
if self.children[0]:
self.children[0].collect(prefix, bit - 1, output)
if self.children[1]:
self.children[1].collect(prefix | (1 << bit), bit - 1, output)
return output
def exists(self, num: int, bit=17):
if self.count == 0:
return False
if bit < 0:
return True
b = (num >> bit) & 1
return self.children[b].exists(num, bit - 1) if self.children[b] else False
def find_kth(self, k: int, bit=17):
if k > self.count:
return -1
if bit < 0:
return 0
left_count = self.children[0].count if self.children[0] else 0
if k <= left_count:
return self.children[0].find_kth(k, bit - 1)
elif self.children[1]:
return (1 << bit) + self.children[1].find_kth(k - left_count, bit - 1)
else:
return -1
| BinarySumTrie |
python | getsentry__responses | responses/tests/test_responses.py | {
"start": 50096,
"end": 70111
} | class ____:
def test_passthrough_flag(self, httpserver):
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain"
)
url = httpserver.url_for("/")
response = Response(responses.GET, url, body="MOCK")
@responses.activate
def run_passthrough():
responses.add(response)
resp = requests.get(url)
assert_response(resp, "OK")
@responses.activate
def run_mocked():
responses.add(response)
resp = requests.get(url)
assert_response(resp, "MOCK")
run_mocked()
assert_reset()
response.passthrough = True
run_passthrough()
assert_reset()
def test_passthrough_kwarg(self, httpserver):
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain"
)
url = httpserver.url_for("/")
def configure_response(passthrough):
responses.get(url, body="MOCK", passthrough=passthrough)
@responses.activate
def run_passthrough():
configure_response(passthrough=True)
resp = requests.get(url)
assert_response(resp, "OK")
@responses.activate
def run_mocked():
configure_response(passthrough=False)
resp = requests.get(url)
assert_response(resp, "MOCK")
run_mocked()
assert_reset()
run_passthrough()
assert_reset()
def test_passthrough_response(self, httpserver):
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain"
)
url = httpserver.url_for("/")
@responses.activate
def run():
responses.add(PassthroughResponse(responses.GET, url))
responses.add(responses.GET, f"{url}/one", body="one")
responses.add(responses.GET, "http://example.com/two", body="two")
resp = requests.get("http://example.com/two")
assert_response(resp, "two")
resp = requests.get(f"{url}/one")
assert_response(resp, "one")
resp = requests.get(url)
assert_response(resp, "OK")
assert len(responses.calls) == 3
responses.assert_call_count(url, 1)
run()
assert_reset()
def test_passthrough_response_stream(self, httpserver):
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain"
)
@responses.activate
def run():
url = httpserver.url_for("/")
responses.add(PassthroughResponse(responses.GET, url))
content_1 = requests.get(url).content
with requests.get(url, stream=True) as resp:
content_2 = resp.raw.read()
assert content_1 == content_2
run()
assert_reset()
def test_passthru_prefixes(self, httpserver):
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain"
)
url = httpserver.url_for("/")
@responses.activate
def run_constructor_argument():
with responses.RequestsMock(passthru_prefixes=(url,)):
resp = requests.get(url)
assert_response(resp, "OK")
@responses.activate
def run_property_setter():
with responses.RequestsMock() as m:
m.passthru_prefixes = tuple([url])
resp = requests.get(url)
assert_response(resp, "OK")
run_constructor_argument()
assert_reset()
run_property_setter()
assert_reset()
def test_passthru(self, httpserver):
httpserver.expect_request("/").respond_with_data(
"OK", content_type="text/plain"
)
url = httpserver.url_for("/")
@responses.activate
def run():
responses.add_passthru(url)
responses.add(responses.GET, f"{url}/one", body="one")
responses.add(responses.GET, "http://example.com/two", body="two")
resp = requests.get("http://example.com/two")
assert_response(resp, "two")
resp = requests.get(f"{url}/one")
assert_response(resp, "one")
resp = requests.get(url)
assert_response(resp, "OK")
run()
assert_reset()
def test_passthru_regex(self, httpserver):
httpserver.expect_request(re.compile("^/\\w+")).respond_with_data(
"OK", content_type="text/plain"
)
url = httpserver.url_for("/")
@responses.activate
def run():
responses.add_passthru(re.compile(f"{url}/\\w+"))
responses.add(responses.GET, f"{url}/one", body="one")
responses.add(responses.GET, "http://example.com/two", body="two")
resp = requests.get("http://example.com/two")
assert_response(resp, "two")
resp = requests.get(f"{url}/one")
assert_response(resp, "one")
resp = requests.get(f"{url}/two")
assert_response(resp, "OK")
resp = requests.get(f"{url}/three")
assert_response(resp, "OK")
run()
assert_reset()
def test_passthru_does_not_persist_across_tests(self, httpserver):
"""
passthru should be erased on exit from context manager
see:
https://github.com/getsentry/responses/issues/322
"""
httpserver.expect_request("/").respond_with_data(
"mocked server", status=969, content_type="text/plain"
)
@responses.activate
def with_a_passthru():
assert not responses.mock.passthru_prefixes
responses.add_passthru(re.compile(".*"))
url = httpserver.url_for("/")
response = requests.get(url)
assert response.status_code == 969
assert response.text == "mocked server"
@responses.activate
def without_a_passthru():
assert not responses.mock.passthru_prefixes
with pytest.raises(requests.exceptions.ConnectionError):
requests.get("https://example.com")
with_a_passthru()
without_a_passthru()
def test_passthru_unicode(self):
@responses.activate
def run():
with responses.RequestsMock() as m:
url = "http://موقع.وزارة-الاتصالات.مصر/"
clean_url = "http://xn--4gbrim.xn----ymcbaaajlc6dj7bxne2c.xn--wgbh1c/"
m.add_passthru(url)
assert m.passthru_prefixes[0] == clean_url
run()
assert_reset()
def test_real_send_argument(self):
def run():
# the following mock will serve to catch the real send request from another mock and
# will "donate" `unbound_on_send` method
mock_to_catch_real_send = responses.RequestsMock(
assert_all_requests_are_fired=True
)
mock_to_catch_real_send.post(
"http://send-this-request-through.com", status=500
)
with responses.RequestsMock(
assert_all_requests_are_fired=True,
real_adapter_send=mock_to_catch_real_send.unbound_on_send(),
) as r_mock:
r_mock.add_passthru("http://send-this-request-through.com")
r_mock.add(responses.POST, "https://example.org", status=200)
response = requests.post("https://example.org")
assert response.status_code == 200
response = requests.post("http://send-this-request-through.com")
assert response.status_code == 500
run()
assert_reset()
def test_method_named_param():
@responses.activate
def run():
responses.add(method=responses.GET, url="http://example.com", body="OK")
resp = requests.get("http://example.com")
assert_response(resp, "OK")
run()
assert_reset()
def test_custom_target(monkeypatch):
requests_mock = responses.RequestsMock(target="something.else")
std_mock_mock = responses.std_mock.MagicMock()
patch_mock = std_mock_mock.patch
monkeypatch.setattr(responses, "std_mock", std_mock_mock)
requests_mock.start()
assert len(patch_mock.call_args_list) == 1
assert patch_mock.call_args[1]["target"] == "something.else"
@pytest.mark.parametrize(
"url",
(
"http://example.com",
"http://example.com/some/path",
"http://example.com/other/path/",
),
)
def test_request_param(url): # type: ignore[misc]
@responses.activate
def run():
params = {"hello": "world", "example": "params"}
responses.add(
method=responses.GET,
url=f"{url}?hello=world",
body="test",
match_querystring=False,
)
resp = requests.get(url, params=params)
assert_response(resp, "test")
assert_params(resp, params)
resp = requests.get(url)
assert_response(resp, "test")
assert_params(resp, {})
run()
assert_reset()
def test_request_param_with_multiple_values_for_the_same_key():
@responses.activate
def run():
url = "http://example.com"
params = {"key1": ["one", "two"], "key2": "three"}
responses.add(
method=responses.GET,
url=url,
body="test",
)
resp = requests.get(url, params=params)
assert_response(resp, "test")
assert_params(resp, params)
run()
assert_reset()
@pytest.mark.parametrize(
"url", ("http://example.com", "http://example.com?hello=world")
)
def test_assert_call_count(url): # type: ignore[misc]
@responses.activate
def run():
responses.add(responses.GET, url)
responses.add(responses.GET, "http://example1.com")
assert responses.assert_call_count(url, 0) is True
with pytest.raises(AssertionError) as excinfo:
responses.assert_call_count(url, 2)
assert "Expected URL '{}' to be called 2 times. Called 0 times.".format(
url
) in str(excinfo.value)
requests.get(url)
assert responses.assert_call_count(url, 1) is True
requests.get("http://example1.com")
assert responses.assert_call_count(url, 1) is True
requests.get(url)
with pytest.raises(AssertionError) as excinfo:
responses.assert_call_count(url, 3)
assert "Expected URL '{}' to be called 3 times. Called 2 times.".format(
url
) in str(excinfo.value)
run()
assert_reset()
def test_call_count_with_matcher():
@responses.activate
def run():
rsp = responses.add(
responses.GET,
"http://www.example.com",
match=(matchers.query_param_matcher({}),),
)
rsp2 = responses.add(
responses.GET,
"http://www.example.com",
match=(matchers.query_param_matcher({"hello": "world"}),),
status=777,
)
requests.get("http://www.example.com")
resp1 = requests.get("http://www.example.com")
requests.get("http://www.example.com?hello=world")
resp2 = requests.get("http://www.example.com?hello=world")
assert resp1.status_code == 200
assert resp2.status_code == 777
assert rsp.call_count == 2
assert rsp2.call_count == 2
run()
assert_reset()
def test_call_count_without_matcher():
@responses.activate
def run():
rsp = responses.add(responses.GET, "http://www.example.com")
requests.get("http://www.example.com")
requests.get("http://www.example.com")
requests.get("http://www.example.com?hello=world")
requests.get("http://www.example.com?hello=world")
assert rsp.call_count == 4
run()
assert_reset()
def test_response_calls_indexing_and_slicing():
@responses.activate
def run():
responses.add(responses.GET, "http://www.example.com")
responses.add(responses.GET, "http://www.example.com/1")
responses.add(responses.GET, "http://www.example.com/2")
requests.get("http://www.example.com")
requests.get("http://www.example.com/1")
requests.get("http://www.example.com/2")
requests.get("http://www.example.com/1")
requests.get("http://www.example.com")
# Use of a type hints here ensures mypy knows the difference between index and slice.
individual_call: Call = responses.calls[0]
call_slice: List[Call] = responses.calls[1:-1]
assert individual_call.request.url == "http://www.example.com/"
assert call_slice == [
responses.calls[1],
responses.calls[2],
responses.calls[3],
]
assert [c.request.url for c in call_slice] == [
"http://www.example.com/1",
"http://www.example.com/2",
"http://www.example.com/1",
]
run()
assert_reset()
def test_response_calls_and_registry_calls_are_equal():
@responses.activate
def run():
rsp1 = responses.add(responses.GET, "http://www.example.com")
rsp2 = responses.add(responses.GET, "http://www.example.com/1")
rsp3 = responses.add(
responses.GET, "http://www.example.com/2"
) # won't be requested
requests.get("http://www.example.com")
requests.get("http://www.example.com/1")
requests.get("http://www.example.com")
assert len(responses.calls) == len(rsp1.calls) + len(rsp2.calls) + len(
rsp3.calls
)
assert rsp1.call_count == 2
assert len(rsp1.calls) == 2
assert rsp1.calls[0] is responses.calls[0]
assert rsp1.calls[1] is responses.calls[2]
assert rsp2.call_count == 1
assert len(rsp2.calls) == 1
assert rsp2.calls[0] is responses.calls[1]
assert rsp3.call_count == 0
assert len(rsp3.calls) == 0
run()
assert_reset()
def test_fail_request_error():
"""
Validate that exception is raised if request URL/Method/kwargs don't match
:return:
"""
def run():
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
rsps.add("POST", "http://example1.com")
rsps.add("GET", "http://example.com")
rsps.add_passthru("http://other.example.com")
with pytest.raises(ConnectionError) as excinfo:
requests.post("http://example.com", data={"id": "bad"})
msg = str(excinfo.value)
assert "- POST http://example1.com/ URL does not match" in msg
assert "- GET http://example.com/ Method does not match" in msg
assert "Passthru prefixes:\n- http://other.example.com" in msg
run()
assert_reset()
@pytest.mark.parametrize(
"response_params, expected_representation",
[
(
{"method": responses.GET, "url": "http://example.com/"},
(
"<Response(url='http://example.com/' status=200 "
"content_type='text/plain' headers='null')>"
),
),
(
{
"method": responses.POST,
"url": "http://another-domain.com/",
"content_type": "application/json",
"status": 404,
},
(
"<Response(url='http://another-domain.com/' status=404 "
"content_type='application/json' headers='null')>"
),
),
(
{
"method": responses.PUT,
"url": "http://abcd.com/",
"content_type": "text/html",
"status": 500,
"headers": {"X-Test": "foo"},
"body": {"it_wont_be": "considered"},
},
(
"<Response(url='http://abcd.com/' status=500 "
"content_type='text/html' headers='{\"X-Test\": \"foo\"}')>"
),
),
],
)
def test_response_representations(response_params, expected_representation): # type: ignore[misc]
response = Response(**response_params)
assert str(response) == expected_representation
assert repr(response) == expected_representation
def test_mocked_responses_list_registered():
@responses.activate
def run():
first_response = Response(
responses.GET,
"http://example.com/",
body="",
headers={"X-Test": "foo"},
status=404,
)
second_response = Response(
responses.GET, "http://example.com/", body="", headers={"X-Test": "foo"}
)
third_response = Response(
responses.POST,
"http://anotherdomain.com/",
)
responses.add(first_response)
responses.add(second_response)
responses.add(third_response)
mocks_list = responses.registered()
assert mocks_list == responses.mock.registered()
assert mocks_list == [first_response, second_response, third_response]
run()
assert_reset()
@pytest.mark.parametrize(
"url,other_url",
[
("http://service-A/foo?q=fizz", "http://service-a/foo?q=fizz"),
("http://service-a/foo", "http://service-A/foo"),
("http://someHost-AwAy/", "http://somehost-away/"),
("http://fizzbuzz/foo", "http://fizzbuzz/foo"),
],
)
def test_rfc_compliance(url, other_url): # type: ignore[misc]
@responses.activate
def run():
responses.add(method=responses.GET, url=url)
resp = requests.request("GET", other_url)
assert_response(resp, "")
run()
assert_reset()
def test_requests_between_add():
@responses.activate
def run():
responses.add(responses.GET, "https://example.com/", json={"response": "old"})
assert requests.get("https://example.com/").content == b'{"response": "old"}'
assert requests.get("https://example.com/").content == b'{"response": "old"}'
assert requests.get("https://example.com/").content == b'{"response": "old"}'
responses.add(responses.GET, "https://example.com/", json={"response": "new"})
assert requests.get("https://example.com/").content == b'{"response": "new"}'
assert requests.get("https://example.com/").content == b'{"response": "new"}'
assert requests.get("https://example.com/").content == b'{"response": "new"}'
run()
assert_reset()
def test_responses_reuse():
@responses.activate
def run():
url = "https://someapi.com/"
fail_response = responses.Response(
method="GET", url=url, body="fail", status=500
)
responses.add(responses.GET, url, "success", status=200)
responses.add(fail_response)
responses.add(fail_response)
responses.add(fail_response)
responses.add(responses.GET, url, "success", status=200)
responses.add(responses.GET, url, "", status=302)
response = requests.get(url)
assert response.content == b"success"
for _ in range(3):
response = requests.get(url)
assert response.content == b"fail"
run()
assert_reset()
@pytest.mark.asyncio
async def test_async_calls(): # type: ignore[misc]
@responses.activate
async def run():
responses.add(
responses.GET,
"http://twitter.com/api/1/foobar",
json={"error": "not found"},
status=404,
)
resp = requests.get("http://twitter.com/api/1/foobar")
assert resp.json() == {"error": "not found"}
assert responses.calls[0].request.url == "http://twitter.com/api/1/foobar"
await run()
assert_reset()
| TestPassthru |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance3.py | {
"start": 250,
"end": 283
} | class ____:
c_val: int
@final
| C |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 161598,
"end": 162097
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of ClosePullRequest"""
__schema__ = github_schema
__field_names__ = ("pull_request_id", "client_mutation_id")
pull_request_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="pullRequestId")
"""ID of the pull request to be closed."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| ClosePullRequestInput |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/control_flow_util_v2_test.py | {
"start": 1139,
"end": 2172
} | class ____(test.TestCase):
def setUp(self):
self._enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
def tearDown(self):
control_flow_util.ENABLE_CONTROL_FLOW_V2 = self._enable_control_flow_v2_old
def _create_control_flow(self, expect_in_defun):
"""Helper method for testInDefun."""
def body(i):
def branch():
self.assertEqual(control_flow_util_v2.in_defun(), expect_in_defun)
return i + 1
return cond.cond(constant_op.constant(True),
branch, lambda: 0)
return while_loop.while_loop(lambda i: i < 4, body,
[constant_op.constant(0)])
@test_util.run_in_graph_and_eager_modes
def testInDefun(self):
self._create_control_flow(False)
@def_function.function
def defun():
self._create_control_flow(True)
defun()
self.assertFalse(control_flow_util_v2.in_defun())
if __name__ == "__main__":
test.main()
| ControlFlowUtilV2Test |
python | pytorch__pytorch | torch/fx/_symbolic_trace.py | {
"start": 7436,
"end": 41822
} | class ____(TracerBase):
# Reference: https://github.com/pytorch/pytorch/issues/54354
# The first line of this docstring overrides the one Sphinx generates for the
# documentation. We need it so that Sphinx doesn't leak `math`s path from the
# build environment (e.g. `<module 'math' from '/leaked/path').
"""Tracer(autowrap_modules=(math,), autowrap_functions=())
``Tracer`` is the class that implements the symbolic tracing functionality
of ``torch.fx.symbolic_trace``. A call to ``symbolic_trace(m)`` is equivalent
to ``Tracer().trace(m)``.
Tracer can be subclassed to override various behaviors of the tracing
process. The different behaviors that can be overridden are described
in the docstrings of the methods on this class.
"""
# Not checking BC on this API because the default value for `autowrap_modules`
# includes the local filepath to the `math` module, which would jitter
# across machines.
@compatibility(is_backward_compatible=True)
def __init__(
self,
autowrap_modules: tuple[ModuleType] = (math,),
autowrap_functions: tuple[Callable, ...] = (),
param_shapes_constant: bool = False,
) -> None:
# This method's signature is overridden by the first line of this class'
# docstring. If this method's signature is modified, the signature that
# overrides it also should be modified accordingly.
"""
Construct a Tracer object.
Args:
autowrap_modules (Tuple[ModuleType]): defaults to `(math, )`,
Python modules whose functions should be wrapped automatically
without needing to use fx.wrap(). Backward-compatibility for
this parameter is guaranteed.
autowrap_functions (Tuple[Callable, ...]): defaults to `()`,
Python functions that should be wrapped automatically without
needing to use fx.wrap(). Backward compatibility for this
parameter is guaranteed.
param_shapes_constant (bool): When this flag is set, calls to shape,
size and a few other shape like attributes of a module's parameter
will be evaluated directly, rather than returning a new Proxy value
for an attribute access. Backward compatibility for this parameter
is guaranteed.
"""
super().__init__()
# Functions we will eagerly wrap when we see them while tracing
# this captures both `math.sqrt()` and `from math import sqrt` automatically
self._autowrap_function_ids: set[int] = {
id(value)
for name, value in chain.from_iterable(
m.__dict__.items() for m in autowrap_modules
)
if not name.startswith("_") and callable(value)
}
self._autowrap_function_ids.update({id(f) for f in autowrap_functions})
# Python modules to apply autowrap to at the start, in addition to
# modules we see while tracing
self._autowrap_search: list[ModuleType] = list(autowrap_modules)
self.param_shapes_constant = param_shapes_constant
self.submodule_paths: Optional[dict[torch.nn.Module, str]] = None
self.root_module_name: str = ""
# Maps the containing module's name to the operator name
self.scope = Scope("", None)
# Records the module call stack
self.module_stack = collections.OrderedDict()
self.num_calls: dict[str, int] = {}
# Mapping of node name to module scope
self.node_name_to_scope: dict[str, tuple[str, type]] = {}
_qualname_counter: dict[str, int] = collections.defaultdict(int)
@compatibility(is_backward_compatible=True)
def get_fresh_qualname(self, prefix: str) -> str:
"""
Gets a fresh name for a prefix and returns it. This function ensures
that it will not clash with an existing attribute on the graph.
"""
# The idea here is that if the module doesn't have this prefix at all we
# should reset the counter to start from the beginning
# It's a ... little bit hacky (doesn't cover all cases) but the precise
# naming of the prefixes isn't a correctness issue, just a niceness
# issue
qualname = f"{prefix}0"
if not hasattr(self.root, qualname):
self._qualname_counter[prefix] = 0
return qualname
i = self._qualname_counter[prefix]
while True:
qualname = f"{prefix}{i}"
i += 1
if not hasattr(self.root, qualname):
break
self._qualname_counter[prefix] = i
return qualname
@compatibility(is_backward_compatible=True)
def create_arg(self, a: Any) -> "Argument":
"""
A method to specify the behavior of tracing when preparing values to
be used as arguments to nodes in the ``Graph``.
By default, the behavior includes:
#. Iterate through collection types (e.g. tuple, list, dict) and recursively
call ``create_args`` on the elements.
#. Given a Proxy object, return a reference to the underlying IR ``Node``
#. Given a non-Proxy Tensor object, emit IR for various cases:
* For a Parameter, emit a ``get_attr`` node referring to that Parameter
* For a non-Parameter Tensor, store the Tensor away in a special
attribute referring to that attribute.
This method can be overridden to support more types.
Args:
a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
Returns:
The value ``a`` converted into the appropriate ``Argument``
"""
# The base tracer is used to construct Graphs when there is no associated
# module hierarchy, so it can never create parameter references.
# The default tracer adds the ability to refer to parameters when
# tracing modules.
if isinstance(a, torch.nn.Parameter):
for n, p in self.root.named_parameters():
if a is p:
return self.create_node("get_attr", n, (), {})
raise NameError("parameter is not a member of this module")
elif isinstance(a, torch.Tensor):
for n_, p_ in self.root.named_buffers():
if a is p_:
return self.create_node("get_attr", n_, (), {})
elif isinstance(a, torch.nn.Module):
for n_, p_ in self.root.named_modules():
if a is p_:
return self.create_node("get_attr", n_, (), {})
# For NamedTuple instances that appear literally as args, we emit
# a node to construct the NamedTuple and use that Node as the argument.
if isinstance(a, tuple) and hasattr(a, "_fields"):
args = tuple(self.create_arg(elem) for elem in a)
return self.create_node("call_function", a.__class__, args, {})
# Tensors do not have a reliable string repr() from which they can be
# constructed (and we probably don't want to rely on that, either), so
# for any constant Tensor values we encounter, first search for if they
# are an attribute of some module in the module hierarchy. If so, emit
# a get_attr to retrieve that tensor. Otherwise, we'll store away the
# tensor value into a special attribute on the Module s.t. we can
# retrieve it with a get_attr.
if isinstance(a, _constant_attribute_types) or is_opaque_type(type(a)):
qualname: Optional[str] = self.tensor_attrs.get(
a
) # pyrefly: ignore[no-matching-overload]
# Tensor was not found in the Module hierarchy, stow it away in a
# special attribute and set the qualname to refer to that
if not qualname:
if isinstance(a, torch.Tensor):
base_name = "_tensor_constant"
elif isinstance(a, (FakeScriptObject, ScriptObject)):
base_name = "_torchbind_obj"
elif isinstance(a, pytree.TreeSpec):
base_name = "_tree_spec_constant"
elif is_opaque_type(type(a)):
base_name = "_opaque_obj"
else:
raise RuntimeError(
f"cannot create constant arg for {a} of type {type(a)}."
)
qualname = self.get_fresh_qualname(base_name)
assert isinstance(qualname, str)
self.tensor_attrs[a] = ( # pyrefly: ignore[unsupported-operation]
qualname
)
setattr(self.root, qualname, a)
return self.create_node("get_attr", qualname, (), {})
if type(a) in _proxyable_classes:
# This is an instance of a proxyable class for which we did not
# witness its construction. Intern this as a constant attribute
# TODO: binary search
qualname = self.get_fresh_qualname(f"_{a.__class__.__name__}_constant_")
assert isinstance(qualname, str)
setattr(self.root, qualname, a)
return self.create_node("get_attr", qualname, (), {})
return super().create_arg(a)
@compatibility(is_backward_compatible=True)
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
"""
A method to specify whether a given ``nn.Module`` is a "leaf" module.
Leaf modules are the atomic units that appear in
the IR, referenced by ``call_module`` calls. By default,
Modules in the PyTorch standard library namespace (torch.nn)
are leaf modules. All other modules are traced through and
their constituent ops are recorded, unless specified otherwise
via this parameter.
Args:
m (Module): The module being queried about
module_qualified_name (str): The path to root of this module. For example,
if you have a module hierarchy where submodule ``foo`` contains
submodule ``bar``, which contains submodule ``baz``, that module will
appear with the qualified name ``foo.bar.baz`` here.
"""
return (
m.__module__.startswith("torch.nn")
or m.__module__.startswith("torch.ao.nn")
) and not isinstance(m, torch.nn.Sequential)
@compatibility(is_backward_compatible=True)
def path_of_module(self, mod: torch.nn.Module) -> str:
"""
Helper method to find the qualified name of ``mod`` in the Module hierarchy
of ``root``. For example, if ``root`` has a submodule named ``foo``, which has
a submodule named ``bar``, passing ``bar`` into this function will return
the string "foo.bar".
Args:
mod (str): The ``Module`` to retrieve the qualified name for.
"""
# Prefer the O(1) algorithm
if self.submodule_paths:
path = self.submodule_paths.get(mod)
if path is None:
raise NameError("module is not installed as a submodule")
assert isinstance(path, str)
return path
# O(N^2) fallback in the case that we didn't store the submodule
# paths.
else:
for n, p in self.root.named_modules():
if mod is p:
return n
raise NameError("module is not installed as a submodule")
@compatibility(is_backward_compatible=True)
def call_module(
self,
m: torch.nn.Module,
forward: Callable[..., Any],
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> Any:
"""
Method that specifies the behavior of this ``Tracer`` when it encounters
a call to an ``nn.Module`` instance.
By default, the behavior is to check if the called module is a leaf module
via ``is_leaf_module``. If it is, emit a ``call_module`` node referring to
``m`` in the ``Graph``. Otherwise, call the ``Module`` normally, tracing through
the operations in its ``forward`` function.
This method can be overridden to--for example--create nested traced
GraphModules, or any other behavior you would want while tracing across
``Module`` boundaries.
Args:
m (Module): The module for which a call is being emitted
forward (Callable): The forward() method of the ``Module`` to be invoked
args (Tuple): args of the module callsite
kwargs (Dict): kwargs of the module callsite
Return:
The return value from the Module call. In the case that a ``call_module``
node was emitted, this is a ``Proxy`` value. Otherwise, it is whatever
value was returned from the ``Module`` invocation.
"""
module_qualified_name = self.path_of_module(m)
with ScopeContextManager(
self.scope, Scope(module_qualified_name, type(m))
) as _scope:
# module_stack is an ordered dict so writing then deleting the
# entry is equivalent to push/pop on a list
num_calls = self.num_calls.get(module_qualified_name, 0)
module_key = (
f"{_scope.module_path}@{num_calls}"
if num_calls > 0
else _scope.module_path
)
self.module_stack[module_key] = (module_qualified_name, _scope.module_type)
self.num_calls[module_qualified_name] = num_calls + 1
if not self.is_leaf_module(m, module_qualified_name):
ret_val = forward(*args, **kwargs)
else:
ret_val = self.create_proxy(
"call_module", module_qualified_name, args, kwargs
)
key, _ = self.module_stack.popitem(last=True)
assert key == module_key, f" Unexpected key {key}"
return ret_val
@compatibility(is_backward_compatible=False)
def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: dict[str, Any]):
"""
Method that specifies the behavior of this ``Tracer`` when we call getattr
on a call to an ``nn.Module`` instance.
By default, the behavior is to return a proxy value for the attribute. It
also stores the proxy value in the ``parameter_proxy_cache``, so that future
calls will reuse the proxy rather than creating a new one.
This method can be overridden to --for example-- not return proxies when
querying parameters.
Args:
attr (str): The name of the attribute being queried
attr_val (Any): The value of the attribute
parameter_proxy_cache (Dict[str, Any]): A cache of attr names to proxies
Return:
The return value from the getattr call.
"""
def maybe_get_proxy_for_attr(
attr_val, collection_to_search, parameter_proxy_cache
):
for n, p in collection_to_search:
if attr_val is p:
if n not in parameter_proxy_cache:
kwargs = {}
if (
"proxy_factory_fn"
in inspect.signature(self.create_proxy).parameters
):
kwargs["proxy_factory_fn"] = (
# pyrefly: ignore [unsupported-operation]
None
if not self.param_shapes_constant
else lambda node: ParameterProxy(
self, node, n, attr_val
)
)
val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type]
parameter_proxy_cache[n] = val_proxy
return parameter_proxy_cache[n]
return None
if isinstance(attr_val, torch.nn.Parameter):
maybe_parameter_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_parameters(), parameter_proxy_cache
)
if maybe_parameter_proxy is not None:
return maybe_parameter_proxy
if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor):
maybe_buffer_proxy = maybe_get_proxy_for_attr(
attr_val, self.root.named_buffers(), parameter_proxy_cache
)
if maybe_buffer_proxy is not None:
return maybe_buffer_proxy
return attr_val
# This method will be refactored
@compatibility(is_backward_compatible=False)
def create_args_for_root(self, root_fn, is_module, concrete_args=None):
"""
Create ``placeholder`` nodes corresponding to the signature of the ``root``
Module. This method introspects root's signature and emits those
nodes accordingly, also supporting ``*args`` and ``**kwargs``.
"""
# In some cases, a function or method has been decorated with a wrapper
# defined via ``functools.wraps``. In this case, the outer code object
# will likely not contain the actual parameters we care about, so unwrap
# the function to get to the innermost callable.
fn_for_analysis = inspect.unwrap(root_fn)
co = fn_for_analysis.__code__
total_args = co.co_argcount + co.co_kwonlyargcount
orig_args = list(co.co_varnames)
names_iter = iter(co.co_varnames)
args: list[Any] = []
skip_arg_idx = 0
if is_module:
if total_args == 0:
raise RuntimeError(
"``self`` argument cannot be part of *args expansion!"
)
skip_arg_idx = 1
next(names_iter) # skip self
args.append(self.root)
sig = inspect.signature(fn_for_analysis)
# This covers the very specific case where we are passing in flat
# concrete_args as a tuple, but our traced fn takes (*args, **kwargs).
# In this case, just take the concrete_args and pass them through.
name_idx = 0
if (
isinstance(concrete_args, tuple)
and len(concrete_args) > 0
and (co.co_flags & HAS_VARSTUFF)
and total_args == 1
):
for concrete_arg in concrete_args:
out = self.create_proxy("placeholder", f"input_{name_idx}", (), {})
if isinstance(concrete_arg, PHBase):
if concrete_arg != PH:
# Transfer attrs in the case where you're using a placeholder other
# than the singleton PH (PH has no attributes to transfer).
# Proxies were created out of the placeholders.
# Transfer any metadata (put on the placeholders in the form of
# attributes set by the user) from the placeholder to the
# underlying nodes (the proxy is unwrapped by the user, but
# the metadata should hold).
_transfer_attrs(fr=concrete_arg, to=out.node)
args.append(out)
name_idx += 1
return root_fn, args
arg_names = [next(names_iter) for idx in range(skip_arg_idx, total_args)]
if isinstance(concrete_args, tuple):
if len(arg_names) != len(concrete_args):
raise RuntimeError(
f"Tracing expected {len(arg_names)} arguments but got {len(concrete_args)} concrete arguments"
)
concrete_args = dict(zip(arg_names, concrete_args))
def proxy_placeholder(name):
return self._proxy_placeholder(name, concrete_args, sig, fn_for_analysis)
args.extend(proxy_placeholder(names) for names in arg_names)
if co.co_kwonlyargcount > 0 or co.co_flags & HAS_VARSTUFF:
# TODO: type annotations for *args and **kwargs
if co.co_flags & inspect.CO_VARARGS:
args.append(proxy_placeholder("*" + next(names_iter)))
if co.co_flags & inspect.CO_VARKEYWORDS:
args.append(proxy_placeholder("**" + next(names_iter)))
root_fn = _patch_function(root_fn, len(args))
flat_args, in_spec = pytree.tree_flatten(tuple(args))
if not all(child.is_leaf() for child in in_spec.children()):
# In the case that we have pytree-flattened inputs in
# `concrete_args`, generate a flattening wrapper around the
# original root function and return that.
self.graph._codegen = _PyTreeCodeGen( # type: ignore[has-type]
_PyTreeInfo(orig_args[:total_args], in_spec, None)
)
def flatten_fn(*args):
tree_args = pytree.tree_unflatten(list(args), in_spec)
tree_out = root_fn(*tree_args)
out_args, out_spec = pytree.tree_flatten(tree_out)
assert isinstance(self.graph._codegen, _PyTreeCodeGen) # type: ignore[has-type]
self.graph._codegen.pytree_info = (
self.graph._codegen.pytree_info._replace(out_spec=out_spec)
)
return out_args
return flatten_fn, flat_args
return root_fn, args
@compatibility(is_backward_compatible=True)
def trace(
self,
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[dict[str, Any]] = None,
) -> Graph:
"""
Trace ``root`` and return the corresponding FX ``Graph`` representation. ``root``
can either be an ``nn.Module`` instance or a Python callable.
Note that after this call, ``self.root`` may be different from the ``root`` passed
in here. For example, when a free function is passed to ``trace()``, we will
create an ``nn.Module`` instance to use as the root and add embedded constants
to.
Args:
root (Union[Module, Callable]): Either a ``Module`` or a function to be
traced through. Backwards-compatibility for this parameter is
guaranteed.
concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
not be treated as Proxies. This parameter is experimental and
its backwards-compatibility is *NOT* guaranteed.
Returns:
A ``Graph`` representing the semantics of the passed-in ``root``.
"""
global _is_fx_tracing_flag
old_is_fx_tracing_flag = _is_fx_tracing_flag
_is_fx_tracing_flag = True
try:
if isinstance(root, torch.nn.Module):
# do real recompilation for _LazyGraphModule before retracing since the trace
# method can not trace the _lazy_forward method. Got error:
# https://gist.github.com/shunting314/75549c2e82ae07ac1139c94a3583d259
# without this.
from torch.fx._lazy_graph_module import _LazyGraphModule
_LazyGraphModule.force_recompile(root)
self.root = root
assert hasattr(type(root), self.traced_func_name), (
f"traced_func_name={self.traced_func_name} doesn't exist in {type(root).__name__}"
)
fn = getattr(type(root), self.traced_func_name)
self.root_module_name = root._get_name()
self.submodule_paths = {mod: name for name, mod in root.named_modules()}
else:
self.root = torch.nn.Module()
fn = root
tracer_cls: Optional[type[Tracer]] = getattr(self, "__class__", None)
self.graph = Graph(tracer_cls=tracer_cls)
if hasattr(fn, "__code__"):
code = fn.__code__
self.graph._co_fields = {
"co_name": code.co_name,
"co_filename": code.co_filename,
"co_firstlineno": code.co_firstlineno,
}
# When we encounter a Tensor value that's not a parameter, we look if it
# is some other attribute on the model. Construct a dict mapping Tensor
# values to the qualified name here for efficiency. This is used downstream
# in create_arg
self.tensor_attrs: dict[
_ConstantAttributeType,
str,
] = {}
def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: list[str]):
for k, v in m.__dict__.items():
if isinstance(v, _constant_attribute_types):
self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
for k, v in m.named_children():
collect_tensor_attrs(v, prefix_atoms + [k])
collect_tensor_attrs(self.root, [])
assert isinstance(fn, FunctionType)
fn_globals = fn.__globals__ # run before it gets patched
fn, args = self.create_args_for_root(
fn, isinstance(root, torch.nn.Module), concrete_args
)
parameter_proxy_cache: dict[
str, Proxy
] = {} # Reduce number of get_attr calls
# Method dispatch on parameters is not recorded unless it's directly used.
# Thus, we need to insert a proxy when __getattr__ requests a parameter.
@functools.wraps(_orig_module_getattr)
def module_getattr_wrapper(mod, attr):
attr_val = _orig_module_getattr(mod, attr)
return self.getattr(attr, attr_val, parameter_proxy_cache)
@functools.wraps(_orig_module_call)
def module_call_wrapper(mod, *args, **kwargs):
def forward(*args, **kwargs):
return _orig_module_call(mod, *args, **kwargs)
_autowrap_check(
patcher, # type: ignore[has-type]
getattr(getattr(mod, "forward", mod), "__globals__", {}),
self._autowrap_function_ids,
)
return self.call_module(mod, forward, args, kwargs)
with _new_patcher() as patcher:
# allow duplicate patches to support the case of nested calls
patcher.patch_method(
torch.nn.Module,
"__getattr__",
module_getattr_wrapper,
deduplicate=False,
)
patcher.patch_method(
torch.nn.Module,
"__call__",
module_call_wrapper,
deduplicate=False,
)
_patch_wrapped_functions(patcher)
_autowrap_check(patcher, fn_globals, self._autowrap_function_ids)
for module in self._autowrap_search:
_autowrap_check(
patcher, module.__dict__, self._autowrap_function_ids
)
ann = inspect.get_annotations(inspect.unwrap(fn))
self.create_node(
"output",
"output",
(self.create_arg(fn(*args)),),
{},
type_expr=ann.get("return", None),
)
self.submodule_paths = None
except RuntimeError as e:
if e.args and isinstance(e.args[0], str) and "data-dependent" in e.args[0]:
partial_fx_graph = self.graph.python_code(
root_module="self",
verbose=True,
).src
e.partial_fx_graph = partial_fx_graph # type: ignore[attr-defined]
raise
raise
finally:
_is_fx_tracing_flag = old_is_fx_tracing_flag
return self.graph
def __deepcopy__(self, memo):
# _autowrap_search contains modules, which cannot be deepcopied.
new_tracer = Tracer.__new__(Tracer)
for k, v in self.__dict__.items():
if k == "_autowrap_search":
new_obj = copy.copy(v)
else:
new_obj = copy.deepcopy(v, memo)
new_tracer.__dict__[k] = new_obj
return new_tracer
def _proxy_placeholder(self, name, concrete_args, sig, fn_for_analysis):
if concrete_args is not None and name in concrete_args:
cnt = 0
def replace_ph(x):
nonlocal cnt
cnt += 1
param = sig.parameters[name]
default: tuple[Any, ...] = (
() if param.default is inspect.Parameter.empty else (param.default,)
)
out = self.create_proxy(
"placeholder", f"{name}_{str(cnt)}", default, {}
)
if isinstance(x, PHBase):
if x != PH:
# Transfer attrs in the case where you're using a placeholder other
# than the singleton PH (PH has no attributes to transfer).
# Proxies were created out of the placeholders.
# Transfer any metadata (put on the placeholders in the form of
# attributes set by the user) from the placeholder to the
# underlying nodes (the proxy is unwrapped by the user, but
# the metadata should hold).
_transfer_attrs(fr=x, to=out.node)
return out
# Union[int, bool] == bool in Python <= 3.6
if (
type(x) is bool
or type(x) in base_types
and type(x) is not torch.Tensor
):
torch._assert(
out == x,
f"{name} has been specialized to have value {x} but got another value",
)
elif x is None:
args = (
out,
f"{name} has been specialized to have value None but got another value",
)
self.create_proxy("call_function", _assert_is_none, args, {})
else:
warnings.warn(
f"Was not able to add assertion to guarantee correct input {name} to "
f"specialized function. It is up to the user to make sure that your inputs match the "
f"inputs you specialized the function with."
)
return x
return pytree.tree_map(replace_ph, concrete_args[name])
if name[0] == "*":
default: tuple[Any, ...] = ()
else:
param = sig.parameters[name]
default = ( # type: ignore[assignment]
() if param.default is inspect.Parameter.empty else (param.default,)
)
return self.create_proxy(
"placeholder",
name,
default,
{},
type_expr=fn_for_analysis.__annotations__.get(name, None),
)
# Dictionary of (id(globals dict), function name) => globals_dict to patch for
# the purposes of the wrap() API.
# We key by the globals dict id and function name to ensure we're wrapping a given
# function only once.
_wrapped_fns_to_patch: dict[tuple[int, str], dict] = {}
# List of methods on classes to wrap (class type, function name)
# this currently only works for Tensor.* methods that aren't traced properly
_wrapped_methods_to_patch: list[tuple[type, str]] = []
if os.environ.get("FX_PATCH_GETITEM") == "1":
# This change is needed to trace models like PositionalEmbedding from BERT:
# https://github.com/pytorch/benchmark/blob/master/torchbenchmark/models/BERT_pytorch/bert_pytorch/model/embedding/position.py
# but causes issues in quantization documented here:
# https://github.com/pytorch/pytorch/issues/50710
# once that is fixed we can make this the default behavior.
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
def _find_proxy(*objects_to_search):
"""
Recursively search a data structure for a Proxy() and return it,
return None if not found.
"""
proxy = None
def find_proxy(x):
nonlocal proxy
if isinstance(x, Proxy):
proxy = x
map_aggregate(objects_to_search, find_proxy)
return proxy
def _create_wrapped_func(orig_fn):
@functools.wraps(orig_fn)
def wrapped(*args, **kwargs):
"""
Given an closed-over ``orig_function`` to invoke, search the args and kwargs for
a Proxy object. If there is one, emit a ``call_function`` node to preserve the
call to this leaf function directly. Otherwise, just return the results of
this function call, as this function is not being traced.
"""
proxy = _find_proxy(args, kwargs)
if proxy is not None:
return_proxy = proxy.tracer.create_proxy(
"call_function", orig_fn, args, kwargs
)
return_proxy.node.meta["is_wrapped"] = True
return return_proxy
return orig_fn(*args, **kwargs)
return wrapped
def _create_wrapped_method(cls, name):
orig_fn = getattr(cls, name)
@functools.wraps(orig_fn)
def wrapped(*args, **kwargs):
"""
Search the args and kwargs for a Proxy object. If there is one,
emit a ``call_method`` node to preserve the call to this method
directly. Otherwise, just return the results of this function
call, as this function is not being traced.
"""
proxy = _find_proxy(args, kwargs)
if proxy is not None:
return proxy.tracer.create_proxy("call_method", name, args, kwargs)
return orig_fn(*args, **kwargs)
return wrapped
| Tracer |
python | sphinx-doc__sphinx | sphinx/domains/c/_symbol.py | {
"start": 1718,
"end": 27153
} | class ____:
debug_indent = 0
debug_indent_string = ' '
debug_lookup = False
debug_show_tree = False
def __copy__(self) -> Self:
raise AssertionError # shouldn't happen
def __deepcopy__(self, memo: Any) -> Symbol:
if self.parent:
raise AssertionError # shouldn't happen
# the domain base class makes a copy of the initial data, which is fine
return Symbol(None, None, None, None, None)
@staticmethod
def debug_print(*args: Any) -> None:
msg = Symbol.debug_indent_string * Symbol.debug_indent
msg += ''.join(str(e) for e in args)
logger.debug(msg)
def _assert_invariants(self) -> None:
if not self.parent:
# parent == None means global scope, so declaration means a parent
assert not self.declaration
assert not self.docname
else:
if self.declaration:
assert self.docname
def __setattr__(self, key: str, value: Any) -> None:
if key == 'children':
raise AssertionError
return super().__setattr__(key, value)
def __init__(
self,
parent: Symbol | None,
ident: ASTIdentifier | None,
declaration: ASTDeclaration | None,
docname: str | None,
line: int | None,
) -> None:
self.parent = parent
# declarations in a single directive are linked together
self.siblingAbove: Symbol | None = None
self.siblingBelow: Symbol | None = None
self.ident = ident
self.declaration = declaration
self.docname = docname
self.line = line
self.isRedeclaration = False
self._assert_invariants()
# These properties store the same children for different access patterns.
# ``_add_child()`` and ``_remove_child()`` should be used for modifying them.
self._children_by_name: dict[str, Symbol] = {}
self._children_by_docname: dict[str, dict[str, Symbol]] = {}
self._anon_children: set[Symbol] = set()
if self.parent:
self.parent._add_child(self)
if self.declaration:
self.declaration.symbol = self
# Do symbol addition after self._children has been initialised.
self._add_function_params()
def __repr__(self) -> str:
return f'<Symbol {self.to_string(indent=0)!r}>'
@property
def _children(self) -> Iterable[Symbol]:
return self._children_by_name.values()
def _add_child(self, child: Symbol) -> None:
name = child.ident.name
if name in self._children_by_name:
# Duplicate so don't add - will be reported in _add_symbols()
return
self._children_by_name[name] = child
self._children_by_docname.setdefault(child.docname, {})[name] = child
if child.ident.is_anonymous:
self._anon_children.add(child)
def _remove_child(self, child: Symbol) -> None:
name = child.ident.name
self._children_by_name.pop(name, None)
if children := self._children_by_docname.get(child.docname):
children.pop(name, None)
if child.ident.is_anonymous:
self._anon_children.discard(child)
def _fill_empty(self, declaration: ASTDeclaration, docname: str, line: int) -> None:
self._assert_invariants()
assert self.declaration is None
assert self.docname is None
assert self.line is None
assert declaration is not None
assert docname is not None
assert line is not None
self.declaration = declaration
self.declaration.symbol = self
self.docname = docname
self.line = line
self._assert_invariants()
# and symbol addition should be done as well
self._add_function_params()
def _add_function_params(self) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('_add_function_params:')
# Note: we may be called from _fill_empty, so the symbols we want
# to add may actually already be present (as empty symbols).
# add symbols for function parameters, if any
if (
self.declaration is not None
and self.declaration.function_params is not None
):
for p in self.declaration.function_params:
if p.arg is None:
continue
nn = p.arg.name
if nn is None:
continue
# (comparing to the template params: we have checked that we are a declaration)
decl = ASTDeclaration('functionParam', None, p)
assert not nn.rooted
assert len(nn.names) == 1
self._add_symbols(nn, decl, self.docname, self.line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def remove(self) -> None:
if self.parent:
self.parent._remove_child(self)
self.parent = None
def clear_doc(self, docname: str) -> None:
if docname not in self._children_by_docname:
for child in self._children:
child.clear_doc(docname)
return
children: dict[str, Symbol] = self._children_by_docname.pop(docname)
for child in children.values():
child.declaration = None
child.docname = None
child.line = None
if child.siblingAbove is not None:
child.siblingAbove.siblingBelow = child.siblingBelow
if child.siblingBelow is not None:
child.siblingBelow.siblingAbove = child.siblingAbove
child.siblingAbove = None
child.siblingBelow = None
self._remove_child(child)
def get_all_symbols(self) -> Iterator[Symbol]:
yield self
for s_child in self._children:
yield from s_child.get_all_symbols()
@property
def children(self) -> Iterator[Symbol]:
yield from self._children
def get_lookup_key(self) -> LookupKey:
# The pickle files for the environment and for each document are distinct.
# The environment has all the symbols, but the documents has xrefs that
# must know their scope. A lookup key is essentially a specification of
# how to find a specific symbol.
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
key = [
# TODO: do we need the ID?
(s.ident, None if s.declaration is None else s.declaration.get_newest_id())
for s in reversed(symbols)
]
return LookupKey(key)
def get_full_nested_name(self) -> ASTNestedName:
symbols = []
s = self
while s.parent:
symbols.append(s)
s = s.parent
symbols.reverse()
names = [s.ident for s in symbols]
return ASTNestedName(names, rooted=False)
def _symbol_lookup(
self,
nested_name: ASTNestedName,
on_missing_qualified_symbol: Callable[[Symbol, ASTIdentifier], Symbol | None],
ancestor_lookup_type: str | None,
match_self: bool,
recurse_in_anon: bool,
search_in_siblings: bool,
) -> SymbolLookupResult | None:
# TODO: further simplification from C++ to C
# ancestor_lookup_type: if not None, specifies the target type of the lookup
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('_symbol_lookup:')
Symbol.debug_indent += 1
Symbol.debug_print('self:')
logger.debug(self.to_string(Symbol.debug_indent + 1, addEndNewline=False))
Symbol.debug_print('nested_name: ', nested_name)
Symbol.debug_print('ancestor_lookup_type:', ancestor_lookup_type)
Symbol.debug_print('match_self: ', match_self)
Symbol.debug_print('recurse_in_anon: ', recurse_in_anon)
Symbol.debug_print('search_in_siblings: ', search_in_siblings)
names = nested_name.names
# find the right starting point for lookup
parent_symbol = self
if nested_name.rooted:
while parent_symbol.parent is not None:
parent_symbol = parent_symbol.parent
if ancestor_lookup_type is not None:
# walk up until we find the first identifier
first_name = names[0]
while parent_symbol.parent:
if first_name.name in parent_symbol._children_by_name:
break
parent_symbol = parent_symbol.parent
if Symbol.debug_lookup:
Symbol.debug_print('starting point:')
logger.debug(
parent_symbol.to_string(Symbol.debug_indent + 1, addEndNewline=False)
)
# and now the actual lookup
for ident in names[:-1]:
name = ident.name
if name in parent_symbol._children_by_name:
symbol = parent_symbol._children_by_name[name]
else:
symbol = on_missing_qualified_symbol(parent_symbol, ident)
if symbol is None:
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return None
parent_symbol = symbol
if Symbol.debug_lookup:
Symbol.debug_print('handle last name from:')
logger.debug(
parent_symbol.to_string(Symbol.debug_indent + 1, addEndNewline=False)
)
# handle the last name
ident = names[-1]
name = ident.name
symbol = parent_symbol._children_by_name.get(name)
if not symbol and recurse_in_anon:
for child in parent_symbol._anon_children:
if name in child._children_by_name:
symbol = child._children_by_name[name]
break
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
result = [symbol] if symbol else []
return SymbolLookupResult(result, parent_symbol, ident)
def _add_symbols(
self,
nested_name: ASTNestedName,
declaration: ASTDeclaration | None,
docname: str | None,
line: int | None,
) -> Symbol:
# TODO: further simplification from C++ to C
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('_add_symbols:')
Symbol.debug_indent += 1
Symbol.debug_print('nn: ', nested_name)
Symbol.debug_print('decl: ', declaration)
Symbol.debug_print(f'location: {docname}:{line}')
def on_missing_qualified_symbol(
parent_symbol: Symbol, ident: ASTIdentifier
) -> Symbol:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('_add_symbols, on_missing_qualified_symbol:')
Symbol.debug_indent += 1
Symbol.debug_print('ident: ', ident)
Symbol.debug_indent -= 2
return Symbol(
parent=parent_symbol,
ident=ident,
declaration=None,
docname=None,
line=None,
)
lookup_result = self._symbol_lookup(
nested_name,
on_missing_qualified_symbol,
ancestor_lookup_type=None,
match_self=False,
recurse_in_anon=False,
search_in_siblings=False,
)
# we create symbols all the way, so that can't happen
assert lookup_result is not None
symbols = list(lookup_result.symbols)
if len(symbols) == 0:
if Symbol.debug_lookup:
Symbol.debug_print('_add_symbols, result, no symbol:')
Symbol.debug_indent += 1
Symbol.debug_print('ident: ', lookup_result.ident)
Symbol.debug_print('declaration: ', declaration)
Symbol.debug_print(f'location: {docname}:{line}')
Symbol.debug_indent -= 1
symbol = Symbol(
parent=lookup_result.parent_symbol,
ident=lookup_result.ident,
declaration=declaration,
docname=docname,
line=line,
)
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return symbol
if Symbol.debug_lookup:
Symbol.debug_print('_add_symbols, result, symbols:')
Symbol.debug_indent += 1
Symbol.debug_print('number symbols:', len(symbols))
Symbol.debug_indent -= 1
if not declaration:
if Symbol.debug_lookup:
Symbol.debug_print('no declaration')
Symbol.debug_indent -= 2
# good, just a scope creation
# TODO: what if we have more than one symbol?
return symbols[0]
no_decl = []
with_decl = []
dup_decl = []
for s in symbols:
if s.declaration is None:
no_decl.append(s)
elif s.isRedeclaration:
dup_decl.append(s)
else:
with_decl.append(s)
if Symbol.debug_lookup:
Symbol.debug_print('#no_decl: ', len(no_decl))
Symbol.debug_print('#with_decl:', len(with_decl))
Symbol.debug_print('#dup_decl: ', len(dup_decl))
# With partial builds we may start with a large symbol tree stripped of declarations.
# Essentially any combination of no_decl, with_decl, and dup_decls seems possible.
# TODO: make partial builds fully work. What should happen when the primary symbol gets
# deleted, and other duplicates exist? The full document should probably be rebuild.
# First check if one of those with a declaration matches.
# If it's a function, we need to compare IDs,
# otherwise there should be only one symbol with a declaration.
if len(with_decl) == 0:
cand_symbol = None
else:
cand_symbol = self._make_cand_symbol(
lookup_result, declaration, docname, line
)
if declaration.objectType != 'function':
assert len(with_decl) <= 1
self._handle_duplicate_declaration(
with_decl[0], cand_symbol, declaration
)
# (not reachable)
# a function, so compare IDs
cand_id = declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print('cand_id:', cand_id)
for symbol in with_decl:
old_id = symbol.declaration.get_newest_id()
if Symbol.debug_lookup:
Symbol.debug_print('old_id: ', old_id)
if cand_id == old_id:
self._handle_duplicate_declaration(symbol, cand_symbol, declaration)
# (not reachable)
# no candidate symbol found with matching ID
# if there is an empty symbol, fill that one
if len(no_decl) == 0:
if Symbol.debug_lookup:
Symbol.debug_print(
'no match, no empty, cand_sybmol is not None?:',
cand_symbol is not None,
)
Symbol.debug_indent -= 2
if cand_symbol is not None:
return cand_symbol
else:
return self._make_cand_symbol(lookup_result, declaration, docname, line)
else:
if Symbol.debug_lookup:
Symbol.debug_print(
'no match, but fill an empty declaration, cand_sybmol is not None?:',
cand_symbol is not None,
)
Symbol.debug_indent -= 2
if cand_symbol is not None:
cand_symbol.remove()
# assert len(no_decl) == 1
# TODO: enable assertion when we at some point find out how to do cleanup
# for now, just take the first one, it should work fine ... right?
symbol = no_decl[0]
# If someone first opened the scope, and then later
# declares it, e.g,
# .. namespace:: Test
# .. namespace:: nullptr
# .. class:: Test
symbol._fill_empty(declaration, docname, line)
return symbol
@staticmethod
def _make_cand_symbol(
lookup_result: SymbolLookupResult,
declaration: ASTDeclaration | None,
docname: str | None,
line: int | None,
) -> Symbol:
if Symbol.debug_lookup:
Symbol.debug_print('begin: creating candidate symbol')
symbol = Symbol(
parent=lookup_result.parent_symbol,
ident=lookup_result.ident,
declaration=declaration,
docname=docname,
line=line,
)
if Symbol.debug_lookup:
Symbol.debug_print('end: creating candidate symbol')
return symbol
@staticmethod
def _handle_duplicate_declaration(
symbol: Symbol, cand_symbol: Symbol, declaration: ASTDeclaration
) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('redeclaration')
Symbol.debug_indent -= 1
Symbol.debug_indent -= 2
# Redeclaration of the same symbol.
# Let the new one be there, but raise an error to the client
# so it can use the real symbol as subscope.
# This will probably result in a duplicate id warning.
cand_symbol.isRedeclaration = True
raise _DuplicateSymbolError(symbol, declaration)
def merge_with(
self, other: Symbol, docnames: list[str], env: BuildEnvironment
) -> None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('merge_with:')
assert other is not None
for other_child in other._children:
other_name = other_child.ident.name
if other_name not in self._children_by_name:
# TODO: hmm, should we prune by docnames?
other_child.parent = self
self._add_child(other_child)
other_child._assert_invariants()
continue
our_child = self._children_by_name[other_name]
if other_child.declaration and other_child.docname in docnames:
if not our_child.declaration:
our_child._fill_empty(
other_child.declaration, other_child.docname, other_child.line
)
elif our_child.docname != other_child.docname:
name = str(our_child.declaration)
msg = __(
'Duplicate C declaration, also defined at %s:%s.\n'
"Declaration is '.. c:%s:: %s'."
)
logger.warning(
msg,
our_child.docname,
our_child.line,
our_child.declaration.directiveType,
name,
location=(other_child.docname, other_child.line),
type='duplicate_declaration',
subtype='c',
)
else:
# Both have declarations, and in the same docname.
# This can apparently happen, it should be safe to
# just ignore it, right?
pass
our_child.merge_with(other_child, docnames, env)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
def add_name(self, nestedName: ASTNestedName) -> Symbol:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('add_name:')
res = self._add_symbols(nestedName, declaration=None, docname=None, line=None)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def add_declaration(
self, declaration: ASTDeclaration, docname: str, line: int
) -> Symbol:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('add_declaration:')
assert declaration is not None
assert docname is not None
assert line is not None
nested_name = declaration.name
res = self._add_symbols(nested_name, declaration, docname, line)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
return res
def find_identifier(
self,
ident: ASTIdentifier,
matchSelf: bool,
recurseInAnon: bool,
searchInSiblings: bool,
) -> Symbol | None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('find_identifier:')
Symbol.debug_indent += 1
Symbol.debug_print('ident: ', ident)
Symbol.debug_print('matchSelf: ', matchSelf)
Symbol.debug_print('recurseInAnon: ', recurseInAnon)
Symbol.debug_print('searchInSiblings:', searchInSiblings)
logger.debug(self.to_string(Symbol.debug_indent + 1, addEndNewline=False))
Symbol.debug_indent -= 2
current = self
while current is not None:
if Symbol.debug_lookup:
Symbol.debug_indent += 2
Symbol.debug_print('trying:')
logger.debug(
current.to_string(Symbol.debug_indent + 1, addEndNewline=False)
)
Symbol.debug_indent -= 2
if matchSelf and current.ident == ident:
return current
name = ident.name
if name in current._children_by_name:
return current._children_by_name[name]
if recurseInAnon:
for child in current._anon_children:
if name in child._children_by_name:
return child._children_by_name[name]
if not searchInSiblings:
break
current = current.siblingAbove
return None
def direct_lookup(self, key: LookupKey) -> Symbol | None:
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('direct_lookup:')
Symbol.debug_indent += 1
s = self
for ident, id_ in key.data:
s = s._children_by_name.get(ident.name)
if Symbol.debug_lookup:
Symbol.debug_print('name: ', ident.name)
Symbol.debug_print('id: ', id_)
if s is not None:
logger.debug(
s.to_string(Symbol.debug_indent + 1, addEndNewline=False)
)
else:
Symbol.debug_print('not found')
if s is None:
break
if Symbol.debug_lookup:
Symbol.debug_indent -= 2
return s
def find_declaration(
self, nestedName: ASTNestedName, typ: str, matchSelf: bool, recurseInAnon: bool
) -> Symbol | None:
# templateShorthand: missing template parameter lists for templates is ok
if Symbol.debug_lookup:
Symbol.debug_indent += 1
Symbol.debug_print('find_declaration:')
def on_missing_qualified_symbol(
parent_symbol: Symbol, ident: ASTIdentifier
) -> Symbol | None:
return None
lookup_result = self._symbol_lookup(
nestedName,
on_missing_qualified_symbol,
ancestor_lookup_type=typ,
match_self=matchSelf,
recurse_in_anon=recurseInAnon,
search_in_siblings=False,
)
if Symbol.debug_lookup:
Symbol.debug_indent -= 1
if lookup_result is None:
return None
symbols = list(lookup_result.symbols)
if len(symbols) == 0:
return None
return symbols[0]
def to_string(self, indent: int, *, addEndNewline: bool = True) -> str:
res = [Symbol.debug_indent_string * indent]
if not self.parent:
res.append('::')
else:
if self.ident:
res.append(self.ident.name)
else:
res.append(str(self.declaration))
if self.declaration:
res.append(': ')
if self.isRedeclaration:
res.append('!!duplicate!! ')
res.append(str(self.declaration))
if self.docname:
res.extend((
'\t(',
self.docname,
')',
))
if addEndNewline:
res.append('\n')
return ''.join(res)
def dump(self, indent: int) -> str:
return ''.join([
self.to_string(indent),
*(c.dump(indent + 1) for c in self._children),
])
| Symbol |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/plugins/facets.py | {
"start": 1485,
"end": 2522
} | class ____(JobFacet):
"""
Composite Airflow job facet.
This facet encapsulates all the necessary information to re-create full scope of an Airflow DAG logic,
enabling reconstruction, visualization, and analysis of DAGs in a comprehensive manner.
It includes detailed representations of the tasks, task groups, and their hierarchical relationships,
making it possible to draw a graph that visually represents the entire DAG structure (like in Airflow UI).
It also indicates whether a task should emit an OpenLineage (OL) event, enabling consumers to anticipate
the number of events and identify the tasks from which they can expect these events.
Attributes:
taskTree: A dictionary representing the hierarchical structure of tasks in the DAG.
taskGroups: A dictionary that contains information about task groups within the DAG.
tasks: A dictionary detailing individual tasks within the DAG.
"""
taskTree: dict
taskGroups: dict
tasks: dict
@define
| AirflowJobFacet |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 25422,
"end": 25624
} | class ____(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
| DummyDDP |
python | kamyu104__LeetCode-Solutions | Python/count-words-obtained-after-adding-a-letter.py | {
"start": 29,
"end": 575
} | class ____(object):
def wordCount(self, startWords, targetWords):
"""
:type startWords: List[str]
:type targetWords: List[str]
:rtype: int
"""
def bitmask(w):
return reduce(lambda x, y: x|y, (1 << (ord(c)-ord('a')) for i, c in enumerate(w)))
lookup = set(bitmask(w) for w in startWords)
result = 0
for w in targetWords:
mask = bitmask(w)
result += any(mask ^ (1 << ord(c)-ord('a')) in lookup for c in w)
return result
| Solution |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 370805,
"end": 382608
} | class ____(TestCase):
def _get_example_val(self, ty: str):
from torch.fx.experimental.sym_node import SymNode
from torch.fx.experimental.symbolic_shapes import ShapeEnv
def create_symtype(cls, pytype, shape_env, val):
from torch._dynamo.source import ConstantSource
symbol = shape_env.create_symbol(
val,
source=ConstantSource(
f"__testing_hop_schema{len(shape_env.var_to_val)}"
),
)
return cls(SymNode(symbol, shape_env, pytype, hint=val))
if ty == "bool":
return True
elif ty == "int":
return 1
elif ty == "float":
return 1.0
elif ty == "str":
return "foo"
elif ty == "Tensor":
return torch.tensor(1)
elif ty == "SymInt":
shape_env = ShapeEnv()
return create_symtype(torch.SymInt, int, shape_env, 1)
elif ty == "SymBool":
shape_env = ShapeEnv()
return create_symtype(torch.SymBool, bool, shape_env, True)
elif ty == "GraphModule":
def f(x):
return x.sin()
return make_fx(f)(torch.ones(1))
elif ty == "ScriptObj":
from torch.testing._internal.torchbind_impls import (
init_torchbind_implementations,
)
init_torchbind_implementations()
foo = torch.classes._TorchScriptTesting._Foo(3, 4)
return foo
else:
raise NotImplementedError(ty)
@parametrize("schema_type", _hop_schema_test_schema_types)
def test_type_gen(self, schema_type):
from torchgen.gen_schema_utils import TypeGen
example_val = self._get_example_val(schema_type)
ty = TypeGen.from_example(example_val)
# Test the generated type can be parsed
self.assertEqual(ty.parse(str(ty)), ty)
@parametrize("schema_type", _hop_schema_test_schema_types)
def test_list_gen(self, schema_type):
from torchgen.gen_schema_utils import TypeGen
example_val = self._get_example_val(schema_type)
li1 = [example_val]
ty1 = TypeGen.from_example(li1)
ty2 = TypeGen.from_example(li1)
self.assertEqual(ty1.parse(str(ty1)), ty1)
self.assertEqual(ty2.parse(str(ty2)), ty2)
def test_function_schema_gen(self):
from torchgen.gen_schema_utils import FunctionSchemaGen
inps = [
(schema_type + "_v", self._get_example_val(schema_type))
for schema_type in _hop_schema_test_schema_types
]
schema1 = FunctionSchemaGen.from_example("test_op1", inps, torch.ones(1))
schema2 = FunctionSchemaGen.from_example(
"test_op2",
inps,
[
torch.ones(1),
],
)
schema3 = FunctionSchemaGen.from_example(
"test_op3", inps, [torch.ones(1), torch.ones(1)]
)
self.assertExpectedInline(
str(schema1),
"""test_op1(bool bool_v, int int_v, float float_v, str str_v, Tensor Tensor_v, SymInt SymInt_v, SymBool SymBool_v, GraphModule GraphModule_v, __torch__.torch.classes._Foo ScriptObj_v) -> Tensor""", # noqa: B950
)
self.assertExpectedInline(
str(schema2),
"""test_op2(bool bool_v, int int_v, float float_v, str str_v, Tensor Tensor_v, SymInt SymInt_v, SymBool SymBool_v, GraphModule GraphModule_v, __torch__.torch.classes._Foo ScriptObj_v) -> Tensor""", # noqa: B950
)
self.assertExpectedInline(
str(schema3),
"""test_op3(bool bool_v, int int_v, float float_v, str str_v, Tensor Tensor_v, SymInt SymInt_v, SymBool SymBool_v, GraphModule GraphModule_v, __torch__.torch.classes._Foo ScriptObj_v) -> (Tensor, Tensor)""", # noqa: B950,
)
self.assertEqual(schema1.parse(str(schema1)), schema1)
self.assertEqual(schema2.parse(str(schema2)), schema2)
self.assertEqual(schema3.parse(str(schema3)), schema3)
def test_schema_tree_spec(self):
schema_gen = HopSchemaGenerator(torch.ops.higher_order.cond)
args = (torch.randn(3, 4), torch.randn(2, 3))
with self.assertRaisesRegex(
RuntimeError, "Please only add flattened inputs to the hop schema"
):
schema_gen.add_arg("tuple_args", args)
for i, arg in enumerate(args):
schema_gen.add_arg(f"tuple_args{i}", arg)
schema_gen.add_schema_tree_spec(pytree.tree_flatten(args)[1])
flat_schema = schema_gen.gen_schema()
self.assertExpectedInline(
str(flat_schema), """cond(Tensor tuple_args0, Tensor tuple_args1) -> ()"""
)
def test_cond_gen_schema_tensor_inputs(self):
schema = torch.ops.higher_order.cond.gen_schema(
torch.tensor(True),
lambda x: x.sin(),
lambda x: x.cos(),
(torch.randn(3, 4),),
)
self.assertExpectedInline(
str(schema),
"""cond(Tensor pred, Any true_fn, Any false_fn, Tensor operand0) -> ((Tensor))""",
)
def test_cond_gen_schema_symbool_inputs(self):
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.fx.experimental.symbolic_shapes import ShapeEnv
fake_mode = FakeTensorMode(shape_env=ShapeEnv())
with fake_mode, fake_mode.shape_env.ignore_fresh_unbacked_symbols():
sym_bool = torch.randn(3, 4).nonzero().size(0) == 0
schema = torch.ops.higher_order.cond.gen_schema(
sym_bool,
lambda x: x.sin(),
lambda x: x.cos(),
(torch.randn(3, 4),),
)
self.assertExpectedInline(
str(schema),
"""cond(SymBool pred, Any true_fn, Any false_fn, Tensor operand0) -> ((Tensor))""",
)
def test_while_loop_gen_schema_tensor_inputs(self):
def cond_fn(x, y):
return x.sum() < 10
def body_fn(x, y):
return x + 1, y.sin()
schema = torch.ops.higher_order.while_loop.gen_schema(
cond_fn,
body_fn,
(torch.randn(3, 4), torch.randn(2, 3)),
(),
)
self.assertExpectedInline(
str(schema),
"""while_loop(Any cond_fn, Any body_fn, Tensor carried_input0, Tensor carried_input1) -> (Tensor, Tensor)""",
)
def test_while_loop_gen_schema_with_additional_inputs(self):
def cond_fn(x, y, z):
return x.sum() < z
def body_fn(x, y, z):
return x + 1, y.sin()
schema = torch.ops.higher_order.while_loop.gen_schema(
cond_fn,
body_fn,
(torch.randn(3, 4), torch.randn(2, 3)),
(torch.tensor(10),),
)
self.assertExpectedInline(
str(schema),
"""while_loop(Any cond_fn, Any body_fn, Tensor carried_input0, Tensor carried_input1, Tensor additional_input0) -> (Tensor, Tensor)""", # noqa: B950
)
def test_scan_gen_schema_tensor_inputs(self):
def combine_fn(carry, x):
return carry + x, carry * x
schema = torch.ops.higher_order.scan.gen_schema(
combine_fn,
(torch.randn(3, 4),),
(torch.randn(5, 3, 4),),
(),
)
self.assertExpectedInline(
str(schema),
"""scan(Any combine_fn, Tensor init0, Tensor xs0) -> (Tensor, Tensor)""",
)
def test_scan_gen_schema_with_additional_inputs(self):
def combine_fn(carry, x, scale):
return carry + x * scale, carry * x
schema = torch.ops.higher_order.scan.gen_schema(
combine_fn,
(torch.randn(3, 4),),
(torch.randn(5, 3, 4),),
(torch.tensor(2.0),),
)
self.assertExpectedInline(
str(schema),
"""scan(Any combine_fn, Tensor init0, Tensor xs0, Tensor additional_input0) -> (Tensor, Tensor)""", # noqa: B950
)
def test_scan_gen_schema_multiple_inputs(self):
def combine_fn(carry1, carry2, x1, x2):
return carry1 + x1, carry2 * x2, carry1 - x1, carry2 + x2
schema = torch.ops.higher_order.scan.gen_schema(
combine_fn,
(torch.randn(3, 4), torch.randn(2, 3)),
(torch.randn(5, 3, 4), torch.randn(5, 2, 3)),
(),
)
self.assertExpectedInline(
str(schema),
"""scan(Any combine_fn, Tensor init0, Tensor init1, Tensor xs0, Tensor xs1) -> (Tensor, Tensor, Tensor, Tensor)""", # noqa: B950
)
def test_associative_scan_gen_schema_tensor_inputs(self):
def combine_fn(x, y):
return x + y
schema = torch.ops.higher_order.associative_scan.gen_schema(
combine_fn,
(torch.randn(5, 3, 4),),
(),
)
self.assertExpectedInline(
str(schema),
"""associative_scan(Any combine_fn, Tensor xs0) -> ((Tensor))""",
)
def test_associative_scan_gen_schema_with_additional_inputs(self):
def combine_fn(x, y, scale):
return x * y * scale
schema = torch.ops.higher_order.associative_scan.gen_schema(
combine_fn,
(torch.randn(5, 3, 4),),
(torch.tensor(2.0),),
)
self.assertExpectedInline(
str(schema),
"""associative_scan(Any combine_fn, Tensor xs0, Tensor additional_input0) -> ((Tensor))""",
)
def test_associative_scan_gen_schema_multiple_inputs(self):
def combine_fn(x1, x2, y1, y2):
return x1 + y1, x2 * y2
schema = torch.ops.higher_order.associative_scan.gen_schema(
combine_fn,
(torch.randn(5, 3, 4), torch.randn(5, 2, 3)),
(),
)
self.assertExpectedInline(
str(schema),
"""associative_scan(Any combine_fn, Tensor xs0, Tensor xs1) -> (Tensor, Tensor)""",
)
def test_while_loop_gen_schema_with_int_carries(self):
def cond_fn(x, y, z, c):
return x < y
def body_fn(x, y, z, c):
return x + 1, y - 1, z.sin(), c + x
schema = torch.ops.higher_order.while_loop.gen_schema(
cond_fn,
body_fn,
(2, 10, torch.randn(2, 3)),
(torch.tensor(10),),
)
self.assertExpectedInline(
str(schema),
"""while_loop(Any cond_fn, Any body_fn, int carried_input0, int carried_input1, Tensor carried_input2, Tensor additional_input0) -> (int, int, Tensor, Tensor)""", # noqa: B950
)
def test_while_loop_gen_schema_with_input_mutation(self):
def cond_fn(x, y, z, c):
return x < y
def body_fn(x, y, z, c):
x.add_(1)
y.sub_(1)
z.sin_()
c.add_(x)
return x, y, z
c = torch.randn(3, 3)
schema = torch.ops.higher_order.while_loop.gen_schema(
cond_fn,
body_fn,
(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)),
(c,),
)
self.assertExpectedInline(
str(schema),
"""while_loop(Any cond_fn, Any body_fn, Tensor(a2!) carried_input0, Tensor(a3!) carried_input1, Tensor(a4!) carried_input2, Tensor(a5!) additional_input0) -> (Tensor, Tensor, Tensor)""", # noqa: B950
)
instantiate_parametrized_tests(TestHopSchema)
instantiate_parametrized_tests(TestControlFlowTraced)
instantiate_parametrized_tests(TestControlFlow)
instantiate_parametrized_tests(AssociativeScanTests)
if __name__ == "__main__":
run_tests()
| TestHopSchema |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_assigned_email.py | {
"start": 603,
"end": 1079
} | class ____(ActivityMailDebugView):
def get_activity(self, request: AuthenticatedHttpRequest, event):
return {
"type": ActivityType.ASSIGNED.value,
"user_id": request.user.id,
"data": {
"assignee": str(request.user.id),
"assigneeEmail": request.user.email,
"assigneeName": request.user.name,
"assigneeType": "user",
},
}
| DebugSelfAssignedEmailView |
python | sympy__sympy | sympy/polys/agca/modules.py | {
"start": 42516,
"end": 42821
} | class ____(ModuleElement):
"""Element of a quotient module."""
def eq(self, d1, d2):
"""Equality comparison."""
return self.module.killed_module.contains(d1 - d2)
def __repr__(self):
return repr(self.data) + " + " + repr(self.module.killed_module)
| QuotientModuleElement |
python | PrefectHQ__prefect | tests/runtime/test_flow_run.py | {
"start": 7680,
"end": 8585
} | class ____:
async def test_name_is_attribute(self):
assert "name" in dir(flow_run)
async def test_name_is_empty_when_not_set(self):
assert flow_run.name is None
async def test_name_returns_name_when_present_dynamically(self):
assert flow_run.name is None
with FlowRunContext.model_construct(
flow_run=FlowRun.model_construct(name="foo")
):
assert flow_run.name == "foo"
assert flow_run.name is None
async def test_name_pulls_from_api_when_needed(
self, monkeypatch: pytest.MonkeyPatch, prefect_client: PrefectClient
):
run = await prefect_client.create_flow_run(
flow=flow(lambda: None, name="test"), name="foo"
)
assert flow_run.name is None
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(run.id))
assert flow_run.name == "foo"
| TestName |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 1209,
"end": 1502
} | class ____(generics.ListCreateAPIView):
serializer_class = BasicSerializer
authentication_classes = [authentication.BasicAuthentication]
permission_classes = [permissions.DjangoModelPermissions]
def get_queryset(self):
return BasicModel.objects.all()
| GetQuerySetListView |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0011_version-media-availability.py | {
"start": 150,
"end": 893
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0010_add-description-field-to-automation-rule"),
]
operations = [
migrations.AddField(
model_name="version",
name="has_epub",
field=models.BooleanField(default=False, verbose_name="Has ePub"),
),
migrations.AddField(
model_name="version",
name="has_htmlzip",
field=models.BooleanField(default=False, verbose_name="Has HTML Zip"),
),
migrations.AddField(
model_name="version",
name="has_pdf",
field=models.BooleanField(default=False, verbose_name="Has PDF"),
),
]
| Migration |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 78411,
"end": 82924
} | class ____(TypedDict, total=False):
type: Required[Literal['function-wrap']]
function: Required[WrapValidatorFunction]
schema: Required[CoreSchema]
ref: str
json_schema_input_schema: CoreSchema
metadata: dict[str, Any]
serialization: SerSchema
def no_info_wrap_validator_function(
function: NoInfoWrapValidatorFunction,
schema: CoreSchema,
*,
ref: str | None = None,
json_schema_input_schema: CoreSchema | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> WrapValidatorFunctionSchema:
"""
Returns a schema which calls a function with a `validator` callable argument which can
optionally be used to call inner validation with the function logic, this is much like the
"onion" implementation of middleware in many popular web frameworks, no `info` argument is passed, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
def fn(
v: str,
validator: core_schema.ValidatorFunctionWrapHandler,
) -> str:
return validator(input_value=v) + 'world'
schema = core_schema.no_info_wrap_validator_function(
function=fn, schema=core_schema.str_schema()
)
v = SchemaValidator(schema)
assert v.validate_python('hello ') == 'hello world'
```
Args:
function: The validator function to call
schema: The schema to validate the output of the validator function
ref: optional unique identifier of the schema, used to reference the schema in other places
json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='function-wrap',
function={'type': 'no-info', 'function': function},
schema=schema,
json_schema_input_schema=json_schema_input_schema,
ref=ref,
metadata=metadata,
serialization=serialization,
)
def with_info_wrap_validator_function(
function: WithInfoWrapValidatorFunction,
schema: CoreSchema,
*,
field_name: str | None = None,
json_schema_input_schema: CoreSchema | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> WrapValidatorFunctionSchema:
"""
Returns a schema which calls a function with a `validator` callable argument which can
optionally be used to call inner validation with the function logic, this is much like the
"onion" implementation of middleware in many popular web frameworks, an `info` argument is also passed, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
def fn(
v: str,
validator: core_schema.ValidatorFunctionWrapHandler,
info: core_schema.ValidationInfo,
) -> str:
return validator(input_value=v) + 'world'
schema = core_schema.with_info_wrap_validator_function(
function=fn, schema=core_schema.str_schema()
)
v = SchemaValidator(schema)
assert v.validate_python('hello ') == 'hello world'
```
Args:
function: The validator function to call
schema: The schema to validate the output of the validator function
field_name: The name of the field this validator is applied to, if any (deprecated)
json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
if field_name is not None:
warnings.warn(
'The `field_name` argument on `with_info_wrap_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.',
DeprecationWarning,
stacklevel=2,
)
return _dict_not_none(
type='function-wrap',
function=_dict_not_none(type='with-info', function=function, field_name=field_name),
schema=schema,
json_schema_input_schema=json_schema_input_schema,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| WrapValidatorFunctionSchema |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/test_system_message.py | {
"start": 16130,
"end": 20845
} | class ____:
"""Test multiple middleware modifying system message in sequence."""
def test_multiple_middleware_can_chain_modifications(self) -> None:
"""Test that multiple middleware can modify system message sequentially."""
def first_middleware(request: ModelRequest, handler) -> ModelResponse:
"""First middleware sets base system message."""
new_request = request.override(
system_message=SystemMessage(
content="Base prompt",
additional_kwargs={"middleware_1": "applied"},
)
)
return handler(new_request)
def second_middleware(request: ModelRequest, handler) -> ModelResponse:
"""Second middleware appends to system message."""
current_content = request.system_message.text
current_kwargs = request.system_message.additional_kwargs
new_request = request.override(
system_message=SystemMessage(
content=current_content + " + middleware 2",
additional_kwargs={**current_kwargs, "middleware_2": "applied"},
)
)
return handler(new_request)
def third_middleware(request: ModelRequest, handler) -> ModelResponse:
"""Third middleware appends to system message."""
current_content = request.system_message.text
current_kwargs = request.system_message.additional_kwargs
new_request = request.override(
system_message=SystemMessage(
content=current_content + " + middleware 3",
additional_kwargs={**current_kwargs, "middleware_3": "applied"},
)
)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=cast("AgentState", {"messages": []}), # type: ignore[name-defined]
runtime=_fake_runtime(),
)
def final_handler(req: ModelRequest) -> ModelResponse:
# Verify all middleware applied
assert req.system_message.text == "Base prompt + middleware 2 + middleware 3"
assert req.system_message.additional_kwargs["middleware_1"] == "applied"
assert req.system_message.additional_kwargs["middleware_2"] == "applied"
assert req.system_message.additional_kwargs["middleware_3"] == "applied"
return ModelResponse(result=[AIMessage(content="response")])
# Chain middleware calls
first_middleware(
request,
lambda req: second_middleware(req, lambda req2: third_middleware(req2, final_handler)),
)
def test_middleware_can_mix_string_and_system_message_updates(self) -> None:
"""Test mixing string and SystemMessage updates across middleware."""
def string_middleware(request: ModelRequest, handler) -> ModelResponse:
"""Use string-based update."""
new_request = request.override(system_prompt="String prompt")
return handler(new_request)
def system_message_middleware(request: ModelRequest, handler) -> ModelResponse:
"""Use SystemMessage-based update."""
current_content = request.system_message.text if request.system_message else ""
new_request = request.override(
system_message=SystemMessage(
content=current_content + " + SystemMessage",
additional_kwargs={"metadata": "added"},
)
)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=cast("AgentState", {"messages": []}), # type: ignore[name-defined]
runtime=_fake_runtime(),
)
def final_handler(req: ModelRequest) -> ModelResponse:
assert "String prompt + SystemMessage" == req.system_message.text
assert req.system_message.additional_kwargs["metadata"] == "added"
return ModelResponse(result=[AIMessage(content="response")])
string_middleware(request, lambda req: system_message_middleware(req, final_handler))
| TestMultipleMiddlewareChaining |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_histogram.py | {
"start": 1011,
"end": 10631
} | class ____(ColumnAggregateMetricProvider):
metric_name = "column.histogram"
value_keys = ("bins",)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = accessor_domain_kwargs["column"]
bins = metric_value_kwargs["bins"]
column_series: pd.Series = df[column]
column_null_elements_cond: pd.Series = column_series.isnull()
column_nonnull_elements: pd.Series = column_series[~column_null_elements_cond]
hist, _bin_edges = np.histogram(column_nonnull_elements, bins, density=False)
return list(hist)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
"""return a list of counts corresponding to bins
Args:
column: the name of the column for which to get the histogram
bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
""" # noqa: E501 # FIXME CoP
selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = accessor_domain_kwargs["column"]
bins = metric_value_kwargs["bins"]
if isinstance(bins, np.ndarray):
bins = bins.tolist()
else:
bins = list(bins)
case_conditions = []
if len(bins) == 1 and not (
(
bins[0]
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=True)
)
or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=True)
)
or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=False)
)
or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=False)
)
):
# Single-valued column data are modeled using "impulse" (or "sample") distributions (on open interval). # noqa: E501 # FIXME CoP
case_conditions.append(
sa.func.sum(
sa.case(
(
sa.and_(
float(bins[0] - np.finfo(float).eps) < sa.column(column),
sa.column(column) < float(bins[0] + np.finfo(float).eps),
),
1,
),
else_=0,
)
).label("bin_0")
)
query = (
sa.select(*case_conditions)
.where(
sa.column(column) != None, # noqa: E711 # FIXME CoP
)
.select_from(selectable) # type: ignore[arg-type] # FIXME CoP
)
# Run the data through convert_to_json_serializable to ensure we do not have Decimal types # noqa: E501 # FIXME CoP
return convert_to_json_serializable(
list(execution_engine.execute_query(query).fetchone()) # type: ignore[arg-type] # FIXME CoP
)
idx = 0
# If we have an infinite lower bound, don't express that in sql
if (
bins[0] == get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=True)
) or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=True)
):
case_conditions.append(
sa.func.sum(sa.case((sa.column(column) < bins[idx + 1], 1), else_=0)).label(
f"bin_{idx!s}"
)
)
idx += 1
negative_boundary: float
positive_boundary: float
for idx in range( # noqa: B020 # loop-variable-overrides-iterator
idx, len(bins) - 2
):
negative_boundary = float(bins[idx])
positive_boundary = float(bins[idx + 1])
case_conditions.append(
sa.func.sum(
sa.case(
(
sa.and_(
negative_boundary <= sa.column(column),
sa.column(column) < positive_boundary,
),
1,
),
else_=0,
)
).label(f"bin_{idx!s}")
)
if (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(schema="api_np", negative=False)
) or (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(schema="api_cast", negative=False)
):
negative_boundary = float(bins[-2])
case_conditions.append(
sa.func.sum(sa.case((negative_boundary <= sa.column(column), 1), else_=0)).label(
f"bin_{len(bins) - 1!s}"
)
)
else:
negative_boundary = float(bins[-2])
positive_boundary = float(bins[-1])
case_conditions.append(
sa.func.sum(
sa.case(
(
sa.and_(
negative_boundary <= sa.column(column),
sa.column(column) <= positive_boundary,
),
1,
),
else_=0,
)
).label(f"bin_{len(bins) - 1!s}")
)
query = (
sa.select(*case_conditions)
.where(
sa.column(column) != None, # noqa: E711 # FIXME CoP
)
.select_from(selectable) # type: ignore[arg-type] # FIXME CoP
)
# Run the data through convert_to_json_serializable to ensure we do not have Decimal types
return convert_to_json_serializable(list(execution_engine.execute_query(query).fetchone())) # type: ignore[arg-type] # FIXME CoP
@metric_value(engine=SparkDFExecutionEngine)
def _spark( # noqa: C901 # FIXME CoP
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
df, _, _accessor_domain_kwargs = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
bins = metric_value_kwargs["bins"]
column = metric_domain_kwargs["column"]
"""return a list of counts corresponding to bins"""
bins = list(copy.deepcopy(bins)) # take a copy since we are inserting and popping
if bins[0] == -np.inf or bins[0] == -float("inf"):
added_min = False
bins[0] = -float("inf")
else:
added_min = True
bins.insert(0, -float("inf"))
if bins[-1] == np.inf or bins[-1] == float("inf"):
added_max = False
bins[-1] = float("inf")
else:
added_max = True
bins.append(float("inf"))
temp_column = df.select(column).where(F.col(column).isNotNull())
bucketizer = pyspark.Bucketizer(splits=bins, inputCol=column, outputCol="buckets")
bucketed = bucketizer.setHandleInvalid("skip").transform(temp_column)
# This is painful to do, but: bucketizer cannot handle values outside of a range
# (hence adding -/+ infinity above)
# Further, it *always* follows the numpy convention of lower_bound <= bin < upper_bound
# for all but the last bin
# But, since the last bin in our case will often be +infinity, we need to
# find the number of values exactly equal to the upper bound to add those
# We'll try for an optimization by asking for it at the same time
if added_max:
upper_bound_count = temp_column.select(column).filter(F.col(column) == bins[-2]).count()
else:
upper_bound_count = 0
hist_rows = bucketed.groupBy("buckets").count().collect()
# Spark only returns buckets that have nonzero counts.
hist = [0] * (len(bins) - 1)
for row in hist_rows:
hist[int(row["buckets"])] = row["count"]
hist[-2] += upper_bound_count
if added_min:
below_bins = hist.pop(0)
bins.pop(0)
if below_bins > 0:
logger.warning("Discarding histogram values below lowest bin.")
if added_max:
above_bins = hist.pop(-1)
bins.pop(-1)
if above_bins > 0:
logger.warning("Discarding histogram values above highest bin.")
return hist
| ColumnHistogram |
python | huggingface__transformers | tests/quantization/vptq_integration/test_vptq.py | {
"start": 1524,
"end": 7175
} | class ____(unittest.TestCase):
model_name = "VPTQ-community/Meta-Llama-3.1-8B-Instruct-v12-k65536-4096-woft"
input_text = "Hello my name is"
max_new_tokens = 32
EXPECTED_OUTPUT = "Hello my name is Sarah and I am a 25 year old woman from the United States. I am a college graduate and I am currently working as a marketing specialist for a small"
device_map = "cuda"
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
device_map=cls.device_map,
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_raise_if_non_quantized(self):
model_id = "facebook/opt-125m"
quantization_config = VptqConfig()
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto")
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens, do_sample=False)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from vptq import VQuantLinear
from transformers.integrations import replace_with_vptq_linear
model_id = "facebook/opt-350m"
config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5")
modules_to_not_convert = ["lm_head"]
names = [
"q_proj",
"k_proj",
"v_proj",
"out_proj",
"fc1",
"fc2",
]
value = {
"enable_norm": True,
"enable_perm": True,
"group_num": 1,
"group_size": 128,
"indices_as_float": False,
"num_centroids": [-1, 128],
"num_res_centroids": [-1, 128],
"outlier_size": 0,
"vector_lens": [-1, 12],
}
shared_layer_config = {}
for name in names:
shared_layer_config[name] = value
for i in range(24):
modules_to_not_convert.append(f"model.decoder.layers.{i}.fc1")
layer_configs = {}
layer_configs["model.decoder.project_out"] = value
layer_configs["model.decoder.project_in"] = value
quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_vptq_linear(model, quantization_config=quantization_config)
nb_vptq_linear = 0
for module in model.modules():
if isinstance(module, VQuantLinear):
nb_vptq_linear += 1
self.assertEqual(nb_linears - 1, nb_vptq_linear)
# Try with `linear_weights_not_to_quantize`
with init_empty_weights():
model = AutoModelForCausalLM.from_config(config)
quantization_config = VptqConfig(config_for_layers=layer_configs, shared_layer_config=shared_layer_config)
model, _ = replace_with_vptq_linear(
model, quantization_config=quantization_config, modules_to_not_convert=modules_to_not_convert
)
nb_vptq_linear = 0
for module in model.modules():
if isinstance(module, VQuantLinear):
nb_vptq_linear += 1
# 25 comes from 24 decoder.layers.{layer_idx}.fc1
# and the last lm_head
self.assertEqual(nb_linears - 25, nb_vptq_linear)
| VptqTest |
python | pytorch__pytorch | torch/ao/pruning/_experimental/pruner/parametrization.py | {
"start": 1472,
"end": 2047
} | class ____:
def __init__(self, parametrization, prune_bias):
self.param = parametrization
self.prune_bias = prune_bias
def __call__(self, module, input, output):
if getattr(module, "_bias", None) is not None:
bias = module._bias.data
if self.prune_bias:
bias[~self.param.mask] = 0
# reshape bias to broadcast over output dimensions
idx = [1] * len(output.shape)
idx[1] = -1
bias = bias.reshape(idx)
output += bias
return output
| BiasHook |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/agent_toolkits/vectorstore/toolkit.py | {
"start": 553,
"end": 2071
} | class ____(BaseToolkit):
"""Toolkit for interacting with a `VectorStore`."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
except ImportError as e:
msg = "You need to install langchain-community to use this toolkit."
raise ImportError(msg) from e
description = VectorStoreQATool.get_description(
self.vectorstore_info.name,
self.vectorstore_info.description,
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name,
self.vectorstore_info.description,
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
| VectorStoreToolkit |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 27859,
"end": 27947
} | class ____(BinExpr):
"""Divides the left by the right node."""
operator = "/"
| Div |
python | celery__celery | t/unit/utils/test_serialization.py | {
"start": 487,
"end": 910
} | class ____:
@pytest.mark.masked_modules('cPickle')
def test_no_cpickle(self, mask_modules):
prev = sys.modules.pop('celery.utils.serialization', None)
try:
import pickle as orig_pickle
from celery.utils.serialization import pickle
assert pickle.dumps is orig_pickle.dumps
finally:
sys.modules['celery.utils.serialization'] = prev
| test_AAPickle |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py | {
"start": 6928,
"end": 22974
} | class ____(BaseJobConfiguration):
"""
Configuration class used by the Kubernetes worker.
An instance of this class is passed to the Kubernetes worker's `run` method
for each flow run. It contains all of the information necessary to execute
the flow run as a Kubernetes job.
Attributes:
name: The name to give to created Kubernetes job.
command: The command executed in created Kubernetes jobs to kick off
flow run execution.
env: The environment variables to set in created Kubernetes jobs.
labels: The labels to set on created Kubernetes jobs.
namespace: The Kubernetes namespace to create Kubernetes jobs in.
job_manifest: The Kubernetes job manifest to use to create Kubernetes jobs.
cluster_config: The Kubernetes cluster configuration to use for authentication
to a Kubernetes cluster.
job_watch_timeout_seconds: The number of seconds to wait for the job to
complete before timing out. If `None`, the worker will wait indefinitely.
pod_watch_timeout_seconds: The number of seconds to wait for the pod to
complete before timing out.
stream_output: Whether or not to stream the job's output.
"""
namespace: str = Field(default="default")
job_manifest: Dict[str, Any] = Field(
json_schema_extra=dict(template=_get_default_job_manifest_template())
)
cluster_config: Optional[KubernetesClusterConfig] = Field(default=None)
job_watch_timeout_seconds: Optional[int] = Field(default=None)
pod_watch_timeout_seconds: int = Field(default=60)
stream_output: bool = Field(default=True)
env: Union[Dict[str, Optional[str]], List[Dict[str, Any]]] = Field(
default_factory=dict
)
# internal-use only
_api_dns_name: Optional[str] = None # Replaces 'localhost' in API URL
@model_validator(mode="after")
def _validate_job_manifest(self) -> Self:
"""
Validates the job manifest by ensuring the presence of required fields
and checking for compatible values.
"""
job_manifest = self.job_manifest
# Ensure metadata is present
if "metadata" not in job_manifest:
job_manifest["metadata"] = {}
# Ensure labels is present in metadata
if "labels" not in job_manifest["metadata"]:
job_manifest["metadata"]["labels"] = {}
# Ensure namespace is present in metadata
if "namespace" not in job_manifest["metadata"]:
job_manifest["metadata"]["namespace"] = self.namespace
# Check if job includes all required components
patch = JsonPatch.from_diff(job_manifest, _get_base_job_manifest())
missing_paths = sorted([op["path"] for op in patch if op["op"] == "add"])
if missing_paths:
raise ValueError(
"Job is missing required attributes at the following paths: "
f"{', '.join(missing_paths)}"
)
# Check if job has compatible values
incompatible = sorted(
[
f"{op['path']} must have value {op['value']!r}"
for op in patch
if op["op"] == "replace"
]
)
if incompatible:
raise ValueError(
"Job has incompatible values for the following attributes: "
f"{', '.join(incompatible)}"
)
return self
@field_validator("env", mode="before")
@classmethod
def _coerce_env(cls, v):
if isinstance(v, list):
return v
return {k: str(v) if v is not None else None for k, v in v.items()}
@staticmethod
def _base_flow_run_labels(flow_run: "FlowRun") -> Dict[str, str]:
"""
Generate a dictionary of labels for a flow run job.
"""
return {
"prefect.io/flow-run-id": str(flow_run.id),
"prefect.io/flow-run-name": flow_run.name,
"prefect.io/version": _slugify_label_value(
prefect.__version__.split("+")[0]
),
}
def get_environment_variable_value(self, name: str) -> str | None:
"""
Returns the value of an environment variable from the job manifest.
"""
manifest_env: list[dict[str, Any]] = self.job_manifest["spec"]["template"][
"spec"
]["containers"][0].get("env", [])
return next(
(
env_entry.get("value")
for env_entry in manifest_env
if env_entry.get("name") == name
),
None,
)
def prepare_for_flow_run(
self,
flow_run: "FlowRun",
deployment: "DeploymentResponse | None" = None,
flow: "APIFlow | None" = None,
work_pool: "WorkPool | None" = None,
worker_name: str | None = None,
):
"""
Prepares the job configuration for a flow run.
Ensures that necessary values are present in the job manifest and that the
job manifest is valid.
Args:
flow_run: The flow run to prepare the job configuration for
deployment: The deployment associated with the flow run used for
preparation.
flow: The flow associated with the flow run used for preparation.
work_pool: The work pool associated with the flow run used for preparation.
worker_name: The name of the worker used for preparation.
"""
# Save special Kubernetes env vars (like those with valueFrom)
special_env_vars = []
if isinstance(self.env, list):
special_env_vars = [item for item in self.env if "valueFrom" in item]
original_env = {}
for item in self.env:
if "name" in item and "value" in item:
original_env[item["name"]] = item.get("value")
self.env = original_env
super().prepare_for_flow_run(flow_run, deployment, flow, work_pool, worker_name)
self._configure_eviction_handling()
self._update_prefect_api_url_if_local_server()
# Restore any special env vars with valueFrom before populating the manifest
if special_env_vars:
# Convert dict env back to list format
env_list = [{"name": k, "value": v} for k, v in self.env.items()]
# Add special env vars back in
env_list.extend(special_env_vars)
self.env = env_list
self._populate_env_in_manifest()
self._slugify_labels()
self._populate_image_if_not_present()
self._populate_command_if_not_present()
self._populate_generate_name_if_not_present()
self._propagate_labels_to_pod()
def _configure_eviction_handling(self):
"""
Configures eviction handling for the job pod. Needs to run before
If `backoffLimit` is set to 0 and `PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR` is
not set in env, we'll tell the Runner to reschedule its flow run when it receives
a SIGTERM.
If `backoffLimit` is set to a positive number, we'll ensure that the
reschedule SIGTERM handling is not set. Having both a `backoffLimit` and
reschedule handling set can cause duplicate flow run execution.
"""
# If backoffLimit is set to 0, we'll tell the Runner to reschedule
# its flow run when it receives a SIGTERM.
if self.job_manifest["spec"].get("backoffLimit") == 0:
if isinstance(self.env, dict):
if not self.env.get("PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR"):
self.env["PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR"] = "reschedule"
elif not any(
v.get("name") == "PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR"
for v in self.env
):
self.env.append(
{
"name": "PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR",
"value": "reschedule",
}
)
# Otherwise, we'll ensure that the reschedule SIGTERM handling is not set.
else:
if isinstance(self.env, dict):
self.env.pop("PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR", None)
elif any(
v.get("name") == "PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR"
for v in self.env
):
self.env = [
v
for v in self.env
if v.get("name") != "PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR"
]
def _populate_env_in_manifest(self):
"""
Populates environment variables in the job manifest.
When `env` is templated as a variable in the job manifest it comes in as a
dictionary. We need to convert it to a list of dictionaries to conform to the
Kubernetes job manifest schema.
This function also handles the case where the user has removed the `{{ env }}`
placeholder and hard coded a value for `env`. In this case, we need to prepend
our environment variables to the list to ensure Prefect setting propagation.
An example reason the a user would remove the `{{ env }}` placeholder to
hardcode Kubernetes secrets in the base job template.
"""
# Handle both dictionary and list formats for environment variables
if isinstance(self.env, dict):
transformed_env = [{"name": k, "value": v} for k, v in self.env.items()]
else:
# If env is already a list (k8s format), use it directly
transformed_env = self.env
template_env = self.job_manifest["spec"]["template"]["spec"]["containers"][
0
].get("env")
# If user has removed `{{ env }}` placeholder and hard coded a value for `env`,
# we need to prepend our environment variables to the list to ensure Prefect
# setting propagation.
if isinstance(template_env, list):
# Get the names of env vars we're about to add
transformed_env_names = {env["name"] for env in transformed_env}
# Filter out any env vars from template_env that are duplicates
# (these came from template rendering of work pool variables)
# Keep only user-hardcoded vars (not in transformed_env)
unique_template_env = [
env
for env in template_env
if env.get("name") not in transformed_env_names
]
self.job_manifest["spec"]["template"]["spec"]["containers"][0]["env"] = [
*transformed_env,
*unique_template_env,
]
# Current templating adds `env` as a dict when the kubernetes manifest requires
# a list of dicts. Might be able to improve this in the future with a better
# default `env` value and better typing.
else:
self.job_manifest["spec"]["template"]["spec"]["containers"][0]["env"] = (
transformed_env
)
def _update_prefect_api_url_if_local_server(self):
"""If the API URL has been set by the base environment rather than the by the
user, update the value to ensure connectivity when using a bridge network by
updating local connections to use the internal host
"""
if isinstance(self.env, dict):
if (api_url := self.env.get("PREFECT_API_URL")) and self._api_dns_name:
self.env["PREFECT_API_URL"] = api_url.replace(
"localhost", self._api_dns_name
).replace("127.0.0.1", self._api_dns_name)
else:
# Handle list format
for env_var in self.env:
if (
env_var.get("name") == "PREFECT_API_URL"
and self._api_dns_name
and (value := env_var.get("value"))
):
env_var["value"] = value.replace(
"localhost", self._api_dns_name
).replace("127.0.0.1", self._api_dns_name)
def _slugify_labels(self):
"""Slugifies the labels in the job manifest."""
all_labels = {**self.job_manifest["metadata"].get("labels", {}), **self.labels}
self.job_manifest["metadata"]["labels"] = {
_slugify_label_key(k): _slugify_label_value(v)
for k, v in all_labels.items()
}
def _populate_image_if_not_present(self):
"""Ensures that the image is present in the job manifest. Populates the image
with the default Prefect image if it is not present."""
try:
if (
"image"
not in self.job_manifest["spec"]["template"]["spec"]["containers"][0]
):
self.job_manifest["spec"]["template"]["spec"]["containers"][0][
"image"
] = get_prefect_image_name()
except KeyError:
raise ValueError(
"Unable to verify image due to invalid job manifest template."
)
def _populate_command_if_not_present(self):
"""
Ensures that the command is present in the job manifest. Populates the command
with the `prefect -m prefect.engine` if a command is not present.
"""
try:
command = self.job_manifest["spec"]["template"]["spec"]["containers"][
0
].get("args")
if command is None:
self.job_manifest["spec"]["template"]["spec"]["containers"][0][
"args"
] = shlex.split(self._base_flow_run_command())
elif isinstance(command, str):
self.job_manifest["spec"]["template"]["spec"]["containers"][0][
"args"
] = shlex.split(command)
elif not isinstance(command, list):
raise ValueError(
"Invalid job manifest template: 'command' must be a string or list."
)
except KeyError:
raise ValueError(
"Unable to verify command due to invalid job manifest template."
)
def _populate_generate_name_if_not_present(self):
"""Ensures that the generateName is present in the job manifest."""
manifest_generate_name = self.job_manifest["metadata"].get("generateName", "")
has_placeholder = len(find_placeholders(manifest_generate_name)) > 0
# if name wasn't present during template rendering, generateName will be
# just a hyphen
manifest_generate_name_templated_with_empty_string = (
manifest_generate_name == "-"
)
if (
not manifest_generate_name
or has_placeholder
or manifest_generate_name_templated_with_empty_string
or manifest_generate_name == "None-"
):
generate_name = None
if self.name:
generate_name = _slugify_name(self.name)
# _slugify_name will return None if the slugified name in an exception
if not generate_name:
generate_name = "prefect-job"
self.job_manifest["metadata"]["generateName"] = f"{generate_name}-"
def _propagate_labels_to_pod(self):
"""Propagates Prefect-specific labels to the pod in the job manifest."""
current_pod_metadata = self.job_manifest["spec"]["template"].get("metadata", {})
current_pod_labels = current_pod_metadata.get("labels", {})
all_labels = {**current_pod_labels, **self.labels}
current_pod_metadata["labels"] = {
_slugify_label_key(k): _slugify_label_value(v)
for k, v in all_labels.items()
}
self.job_manifest["spec"]["template"]["metadata"] = current_pod_metadata
| KubernetesWorkerJobConfiguration |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 53173,
"end": 55282
} | class ____:
def test_incorrect_input(self):
with pytest.raises(ValueError):
list(
arrow.Arrow.interval(
"month", datetime(2013, 1, 2), datetime(2013, 4, 15), 0
)
)
def test_correct(self):
result = list(
arrow.Arrow.interval(
"hour", datetime(2013, 5, 5, 12, 30), datetime(2013, 5, 5, 17, 15), 2
)
)
assert result == [
(
arrow.Arrow(2013, 5, 5, 12),
arrow.Arrow(2013, 5, 5, 13, 59, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 14),
arrow.Arrow(2013, 5, 5, 15, 59, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 16),
arrow.Arrow(2013, 5, 5, 17, 59, 59, 999999),
),
]
def test_bounds_param_is_passed(self):
result = list(
arrow.Arrow.interval(
"hour",
datetime(2013, 5, 5, 12, 30),
datetime(2013, 5, 5, 17, 15),
2,
bounds="[]",
)
)
assert result == [
(arrow.Arrow(2013, 5, 5, 12), arrow.Arrow(2013, 5, 5, 14)),
(arrow.Arrow(2013, 5, 5, 14), arrow.Arrow(2013, 5, 5, 16)),
(arrow.Arrow(2013, 5, 5, 16), arrow.Arrow(2013, 5, 5, 18)),
]
def test_exact(self):
result = list(
arrow.Arrow.interval(
"hour",
datetime(2013, 5, 5, 12, 30),
datetime(2013, 5, 5, 17, 15),
4,
exact=True,
)
)
expected = [
(
arrow.Arrow(2013, 5, 5, 12, 30),
arrow.Arrow(2013, 5, 5, 16, 29, 59, 999999),
),
(
arrow.Arrow(2013, 5, 5, 16, 30),
arrow.Arrow(2013, 5, 5, 17, 14, 59, 999999),
),
]
assert result == expected
@pytest.mark.usefixtures("time_2013_02_15")
| TestArrowInterval |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-oceanbase/llama_index/vector_stores/oceanbase/base.py | {
"start": 2199,
"end": 19401
} | class ____(BasePydanticVectorStore):
"""
OceanBase Vector Store.
You need to install `pyobvector` and run a standalone observer or OceanBase cluster.
See the following documentation for how to deploy OceanBase:
https://github.com/oceanbase/oceanbase-doc/blob/V4.3.1/en-US/400.deploy/500.deploy-oceanbase-database-community-edition/100.deployment-overview.md
IF USING L2/INNER_PRODUCT metric, IT IS HIGHLY SUGGESTED TO set `normalize = True`.
Args:
_client (ObVecClient): OceanBase vector store client.
Refer to `pyobvector` for more information.
dim (int): Dimension of embedding vector.
table_name (str): Which table name to use. Defaults to "llama_vector".
vidx_metric_type (str): Metric method of distance between vectors.
This parameter takes values in `l2` and `inner_product`. Defaults to `l2`.
vidx_algo_params (Optional[dict]): Which index params to use. Now OceanBase
supports HNSW only. Refer to `DEFAULT_OCEANBASE_HNSW_BUILD_PARAM`
for example.
drop_old (bool): Whether to drop the current table. Defaults
to False.
primary_field (str): Name of the primary key column. Defaults to "id".
doc_id_field (str): Name of the doc id column. Defaults to "doc_id".
vector_field (str): Name of the vector column. Defaults to "embedding".
text_field (str): Name of the text column. Defaults to "document".
metadata_field (Optional[str]): Name of the metadata column.
Defaults to "metadata". When `metadata_field` is specified,
the document's metadata will store as json.
vidx_name (str): Name of the vector index table.
partitions (ObPartition): Partition strategy of table. Refer to `pyobvector`'s
documentation for more examples.
extra_columns (Optional[List[Column]]): Extra sqlalchemy columns
to add to the table.
normalize (bool): normalize vector or not.
Examples:
`pip install llama-index-vector-stores-oceanbase`
```python
from llama_index.vector_stores.oceanbase import OceanBaseVectorStore
# Setup ObVecClient
from pyobvector import ObVecClient
client = ObVecClient(
uri=os.getenv("OB_URI", "127.0.0.1:2881"),
user=os.getenv("OB_USER", "root@test"),
password=os.getenv("OB_PWD", ""),
db_name=os.getenv("OB_DBNAME", "test"),
)
# Initialize OceanBaseVectorStore
oceanbase = OceanBaseVectorStore(
client=client,
dim=1024,
)
```
"""
stores_text: bool = True
_client: ObVecClient = PrivateAttr()
_dim: int = PrivateAttr()
_table_name: str = PrivateAttr()
_vidx_metric_type: str = PrivateAttr()
_vidx_algo_params: dict = PrivateAttr()
_primary_field: str = PrivateAttr()
_doc_id_field: str = PrivateAttr()
_vector_field: str = PrivateAttr()
_text_field: str = PrivateAttr()
_metadata_field: str = PrivateAttr()
_vidx_name: str = PrivateAttr()
_partitions: Optional[Any] = PrivateAttr()
_extra_columns: Optional[List[Column]] = PrivateAttr()
_hnsw_ef_search: int = PrivateAttr()
_normalize: bool = PrivateAttr()
def __init__(
self,
client: ObVecClient,
dim: int,
table_name: str = DEFAULT_OCEANBASE_VECTOR_TABLE_NAME,
vidx_metric_type: str = DEFAULT_OCEANBASE_VECTOR_METRIC_TYPE,
vidx_algo_params: Optional[dict] = None,
drop_old: bool = False,
*,
primary_field: str = DEFAULT_OCEANBASE_PFIELD,
doc_id_field: str = DEFAULT_OCEANBASE_DOCID_FIELD,
vector_field: str = DEFAULT_OCEANBASE_VEC_FIELD,
text_field: str = DEFAULT_OCEANBASE_DOC_FIELD,
metadata_field: str = DEFAULT_OCEANBASE_METADATA_FIELD,
vidx_name: str = DEFAULT_OCEANBASE_VEC_INDEX_NAME,
partitions: Optional[Any] = None,
extra_columns: Optional[List[Column]] = None,
normalize: bool = False,
**kwargs,
):
super().__init__()
try:
from pyobvector import ObVecClient
except ImportError:
raise ImportError(
"Could not import pyobvector package. "
"Please install it with `pip install pyobvector`."
)
if client is not None:
if not isinstance(client, ObVecClient):
raise ValueError("client must be of type pyobvector.ObVecClient")
else:
raise ValueError("client not specified")
self._dim = dim
self._client: ObVecClient = client
self._table_name = table_name
self._extra_columns = extra_columns
self._vidx_metric_type = vidx_metric_type.lower()
if self._vidx_metric_type not in ("l2", "inner_product"):
raise ValueError(
"`vidx_metric_type` should be set in `l2`/`inner_product`."
)
self._vidx_algo_params = vidx_algo_params or DEFAULT_OCEANBASE_HNSW_BUILD_PARAM
self._primary_field = primary_field
self._doc_id_field = doc_id_field
self._vector_field = vector_field
self._text_field = text_field
self._metadata_field = metadata_field
self._vidx_name = vidx_name
self._partition = partitions
self._hnsw_ef_search = -1
self._normalize = normalize
if drop_old:
self._client.drop_table_if_exist(table_name=self._table_name)
self._create_table_with_index()
def _enhance_filter_key(self, filter_key: str) -> str:
return f"{self._metadata_field}->'$.{filter_key}'"
def _to_oceanbase_filter(
self, metadata_filters: Optional[MetadataFilters] = None
) -> str:
filters = []
for filter in metadata_filters.filters:
if isinstance(filter, MetadataFilters):
filters.append(f"({self._to_oceanbase_filter(filter)})")
continue
filter_value = _parse_filter_value(filter.value)
if filter_value is None and filter.operator != FilterOperator.IS_EMPTY:
continue
if filter.operator == FilterOperator.EQ:
filters.append(f"{self._enhance_filter_key(filter.key)}={filter_value}")
elif filter.operator == FilterOperator.GT:
filters.append(f"{self._enhance_filter_key(filter.key)}>{filter_value}")
elif filter.operator == FilterOperator.LT:
filters.append(f"{self._enhance_filter_key(filter.key)}<{filter_value}")
elif filter.operator == FilterOperator.NE:
filters.append(
f"{self._enhance_filter_key(filter.key)}!={filter_value}"
)
elif filter.operator == FilterOperator.GTE:
filters.append(
f"{self._enhance_filter_key(filter.key)}>={filter_value}"
)
elif filter.operator == FilterOperator.LTE:
filters.append(
f"{self._enhance_filter_key(filter.key)}<={filter_value}"
)
elif filter.operator == FilterOperator.IN:
filters.append(
f"{self._enhance_filter_key(filter.key)} in {filter_value}"
)
elif filter.operator == FilterOperator.NIN:
filters.append(
f"{self._enhance_filter_key(filter.key)} not in {filter_value}"
)
elif filter.operator == FilterOperator.TEXT_MATCH:
filters.append(
f"{self._enhance_filter_key(filter.key)} like {_parse_filter_value(filter.value, True)}"
)
elif filter.operator == FilterOperator.IS_EMPTY:
filters.append(f"{self._enhance_filter_key(filter.key)} IS NULL")
else:
raise ValueError(
f'Operator {filter.operator} ("{filter.operator.value}") is not supported by OceanBase.'
)
return f" {metadata_filters.condition.value} ".join(filters)
def _parse_metric_type_str_to_dist_func(self) -> Any:
if self._vidx_metric_type == "l2":
return func.l2_distance
if self._vidx_metric_type == "cosine":
return func.cosine_distance
if self._vidx_metric_type == "inner_product":
return func.negative_inner_product
raise ValueError(f"Invalid vector index metric type: {self._vidx_metric_type}")
def _load_table(self) -> None:
table = Table(
self._table_name,
self._client.metadata_obj,
autoload_with=self._client.engine,
)
column_names = [column.name for column in table.columns]
optional_len = len(self._extra_columns or [])
assert len(column_names) == (5 + optional_len)
logging.info(f"load exist table with {column_names} columns")
self._primary_field = column_names[0]
self._doc_id_field = column_names[1]
self._vector_field = column_names[2]
self._text_field = column_names[3]
self._metadata_field = column_names[4]
def _create_table_with_index(self):
if self._client.check_table_exists(self._table_name):
self._load_table()
return
cols = [
Column(
self._primary_field, String(4096), primary_key=True, autoincrement=False
),
Column(self._doc_id_field, String(4096)),
Column(self._vector_field, VECTOR(self._dim)),
Column(self._text_field, LONGTEXT),
Column(self._metadata_field, JSON),
]
if self._extra_columns is not None:
cols.extend(self._extra_columns)
vidx_params = self._client.prepare_index_params()
vidx_params.add_index(
field_name=self._vector_field,
index_type=OCEANBASE_SUPPORTED_VECTOR_INDEX_TYPE,
index_name=self._vidx_name,
metric_type=self._vidx_metric_type,
params=self._vidx_algo_params,
)
self._client.create_table_with_index_params(
table_name=self._table_name,
columns=cols,
indexes=None,
vidxs=vidx_params,
partitions=self._partition,
)
@classmethod
def class_name(cls) -> str:
return "OceanBaseVectorStore"
@property
def client(self) -> Any:
"""Get client."""
return self._client
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""
Get nodes from OceanBase.
Args:
node_ids (Optional[List[str]], optional): IDs of nodes to delete.
Defaults to None.
filters (Optional[MetadataFilters], optional): Metadata filters.
Defaults to None.
Returns:
List[BaseNode]: List of text nodes.
"""
if filters is not None:
filter = self._to_oceanbase_filter(filters)
else:
filter = None
res = self._client.get(
table_name=self._table_name,
ids=node_ids,
where_clause=[text(filter)] if filter is not None else None,
output_column_name=[
self._text_field,
self._metadata_field,
],
)
return [
metadata_dict_to_node(
metadata=(json.loads(r[1]) if not isinstance(r[1], dict) else r[1]),
text=r[0],
)
for r in res.fetchall()
]
def add(
self,
nodes: List[BaseNode],
batch_size: Optional[int] = None,
extras: Optional[List[dict]] = None,
) -> List[str]:
"""
Add nodes into OceanBase.
Args:
nodes (List[BaseNode]): List of nodes with embeddings
to insert.
batch_size (Optional[int]): Insert nodes in batch.
extras (Optional[List[dict]]): If `extra_columns` is set
when initializing `OceanBaseVectorStore`, you can add
nodes with extra infos.
Returns:
List[str]: List of ids inserted.
"""
batch_size = batch_size or DEFAULT_OCEANBASE_BATCH_SIZE
extra_data = extras or [{} for _ in nodes]
if len(nodes) != len(extra_data):
raise ValueError("nodes size & extras size mismatch")
data = [
{
self._primary_field: node.id_,
self._doc_id_field: node.ref_doc_id or None,
self._vector_field: (
node.get_embedding()
if not self._normalize
else _normalize(node.get_embedding())
),
self._text_field: node.get_content(metadata_mode=MetadataMode.NONE),
self._metadata_field: node_to_metadata_dict(node, remove_text=True),
**extra,
}
for node, extra in zip(nodes, extra_data)
]
for data_batch in iter_batch(data, batch_size):
self._client.insert(self._table_name, data_batch)
return [node.id_ for node in nodes]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
self._client.delete(
table_name=self._table_name,
where_clause=[text(f"{self._doc_id_field}='{ref_doc_id}'")],
)
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Deletes nodes.
Args:
node_ids (Optional[List[str]], optional): IDs of nodes to delete.
Defaults to None.
filters (Optional[MetadataFilters], optional): Metadata filters.
Defaults to None.
"""
if filters is not None:
filter = self._to_oceanbase_filter(filters)
else:
filter = None
self._client.delete(
table_name=self._table_name,
ids=node_ids,
where_clause=[text(filter)] if filter is not None else None,
)
def clear(self) -> None:
"""Clears table."""
self._client.perform_raw_text_sql(f"TRUNCATE TABLE {self._table_name}")
def _parse_distance_to_similarities(self, distance: float) -> float:
if self._vidx_metric_type == "l2":
return _euclidean_similarity(distance)
elif self._vidx_metric_type == "inner_product":
return _neg_inner_product_similarity(distance)
raise ValueError(f"Metric Type {self._vidx_metric_type} is not supported")
def query(
self, query: VectorStoreQuery, param: Optional[dict] = None, **kwargs: Any
) -> VectorStoreQueryResult:
"""
Perform top-k ANN search.
Args:
query (VectorStoreQuery): query infos
param (Optional[dict]): The search params for the index type.
Defaults to None. Refer to `DEFAULT_OCEANBASE_HNSW_SEARCH_PARAM`
for example.
"""
search_param = (
param if param is not None else DEFAULT_OCEANBASE_HNSW_SEARCH_PARAM
)
ef_search = search_param.get(
"efSearch", DEFAULT_OCEANBASE_HNSW_SEARCH_PARAM["efSearch"]
)
if ef_search != self._hnsw_ef_search:
self._client.set_ob_hnsw_ef_search(ef_search)
self._hnsw_ef_search = ef_search
if query.filters:
qfilters = self._to_oceanbase_filter(query.filters)
else:
qfilters = None
res = self._client.ann_search(
table_name=self._table_name,
vec_data=(
query.query_embedding
if not self._normalize
else _normalize(query.query_embedding)
),
vec_column_name=self._vector_field,
distance_func=self._parse_metric_type_str_to_dist_func(),
with_dist=True,
output_column_names=[
self._primary_field,
self._text_field,
self._metadata_field,
],
topk=query.similarity_top_k,
where_clause=([text(qfilters)] if qfilters else None),
)
records = []
for r in res.fetchall():
records.append(r)
return VectorStoreQueryResult(
nodes=[
metadata_dict_to_node(
metadata=json.loads(r[2]),
text=r[1],
)
for r in records
],
similarities=[self._parse_distance_to_similarities(r[3]) for r in records],
ids=[r[0] for r in records],
)
| OceanBaseVectorStore |
python | Netflix__metaflow | metaflow/exception.py | {
"start": 2614,
"end": 2693
} | class ____(MetaflowException):
headline = "Object not found"
| MetaflowNotFound |
python | pypa__warehouse | tests/unit/admin/views/test_malware_reports.py | {
"start": 1126,
"end": 6647
} | class ____:
def test_malware_reports_project_list(self, db_request):
project = ProjectFactory.create()
assert views.malware_reports_project_list(project, db_request) == {
"project": project,
"malware_reports": [],
}
def test_malware_reports_project_list_with_project(self, db_request):
project = ProjectFactory.create()
report = ProjectObservationFactory.create(kind="is_malware", related=project)
assert views.malware_reports_project_list(project, db_request) == {
"project": project,
"malware_reports": [report],
}
def test_malware_reports_project_list_with_project_and_actions(self, db_request):
project = ProjectFactory.create()
ProjectObservationFactory.create(
kind="is_malware", related=project, actions={"foo": "bar"}
)
assert views.malware_reports_project_list(project, db_request) == {
"project": project,
"malware_reports": [],
}
def test_malware_reports_project_verdict_not_malware(self, db_request):
project = ProjectFactory.create()
report = ProjectObservationFactory.create(kind="is_malware", related=project)
ProjectObservationFactory.create(kind="something_else", related=project)
db_request.POST["confirm_project_name"] = project.name
db_request.POST["reason"] = "This is a test"
db_request.route_path = lambda a: "/admin/malware_reports/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
result = views.malware_reports_project_verdict_not_malware(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/admin/malware_reports/"
assert db_request.session.flash.calls == [
pretend.call(
f"Project {project.name} marked as not malware.\n"
"Please update related Help Scout conversations.",
queue="success",
)
]
assert len(report.actions) == 1
action_record = list(report.actions.values())[0]
assert action_record["action"] == "verdict_not_malware"
assert action_record["actor"] == db_request.user.username
assert isinstance(datetime.fromisoformat(action_record["created_at"]), datetime)
assert action_record["reason"] == "This is a test"
def test_malware_reports_project_verdict_quarantine(self, db_request):
owner_user = UserFactory.create(is_frozen=False)
project = ProjectFactory.create()
RoleFactory(user=owner_user, project=project, role_name="Owner")
report = ProjectObservationFactory.create(kind="is_malware", related=project)
db_request.route_path = lambda a: "/admin/malware_reports/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
result = views.malware_reports_project_verdict_quarantine(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/admin/malware_reports/"
assert db_request.session.flash.calls == [
pretend.call(
f"Project {project.name} quarantined.\n"
"Please update related Help Scout conversations.",
queue="success",
)
]
assert project.lifecycle_status == LifecycleStatus.QuarantineEnter
assert project.lifecycle_status_changed is not None
assert (
project.lifecycle_status_note
== f"Quarantined by {db_request.user.username}."
)
assert len(report.actions) == 0
assert owner_user.is_frozen is True
def test_malware_reports_project_verdict_remove_malware(self, db_request):
owner_user = UserFactory.create(is_frozen=False)
project = ProjectFactory.create()
RoleFactory(user=owner_user, project=project, role_name="Owner")
report = ProjectObservationFactory.create(
kind="is_malware",
related=project,
additional={
"helpscout_conversation_url": "https://example.com/conversation/123"
},
)
db_request.POST["confirm_project_name"] = project.name
db_request.route_path = lambda a: "/admin/malware_reports/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.user = UserFactory.create()
result = views.malware_reports_project_verdict_remove_malware(
project, db_request
)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/admin/malware_reports/"
assert db_request.session.flash.calls == [
pretend.call(f"Deleted the project '{project.name}'", queue="success"),
pretend.call(
f"Malware Project {project.name} removed.\n"
"Please update related Help Scout conversations.",
queue="success",
),
]
assert len(report.actions) == 1
assert db_request.db.get(Project, project.id) is None
assert owner_user.is_frozen is True
assert owner_user.observations[0].kind == "account_abuse"
| TestMalwareReportsProjectList |
python | django__django | tests/db_functions/text/test_length.py | {
"start": 195,
"end": 1694
} | class ____(TestCase):
def test_basic(self):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(
name_length=Length("name"),
alias_length=Length("alias"),
)
self.assertQuerySetEqual(
authors.order_by("name"),
[(10, 6), (6, None)],
lambda a: (a.name_length, a.alias_length),
)
self.assertEqual(authors.filter(alias_length__lte=Length("name")).count(), 1)
def test_ordering(self):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="John Smith", alias="smithj1")
Author.objects.create(name="Rhonda", alias="ronny")
authors = Author.objects.order_by(Length("name"), Length("alias"))
self.assertQuerySetEqual(
authors,
[
("Rhonda", "ronny"),
("John Smith", "smithj"),
("John Smith", "smithj1"),
],
lambda a: (a.name, a.alias),
)
def test_transform(self):
with register_lookup(CharField, Length):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__length__gt=7)
self.assertQuerySetEqual(
authors.order_by("name"), ["John Smith"], lambda a: a.name
)
| LengthTests |
python | doocs__leetcode | solution/2500-2599/2533.Number of Good Binary Strings/Solution.py | {
"start": 0,
"end": 440
} | class ____:
def goodBinaryStrings(
self, minLength: int, maxLength: int, oneGroup: int, zeroGroup: int
) -> int:
mod = 10**9 + 7
f = [1] + [0] * maxLength
for i in range(1, len(f)):
if i - oneGroup >= 0:
f[i] += f[i - oneGroup]
if i - zeroGroup >= 0:
f[i] += f[i - zeroGroup]
f[i] %= mod
return sum(f[minLength:]) % mod
| Solution |
python | keras-team__keras | keras/src/layers/preprocessing/image_preprocessing/random_elastic_transform.py | {
"start": 304,
"end": 10208
} | class ____(BaseImagePreprocessingLayer):
"""A preprocessing layer that applies random elastic transformations.
This layer distorts input images by applying elastic deformations,
simulating a physically realistic transformation. The magnitude of the
distortion is controlled by the `scale` parameter, while the `factor`
determines the probability of applying the transformation.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
factor: A single float or a tuple of two floats.
`factor` controls the probability of applying the transformation.
- `factor=0.0` ensures no erasing is applied.
- `factor=1.0` means erasing is always applied.
- If a tuple `(min, max)` is provided, a probability value
is sampled between `min` and `max` for each image.
- If a single float is provided, a probability is sampled
between `0.0` and the given float.
Default is 1.0.
scale: A float or a tuple of two floats defining the magnitude of
the distortion applied.
- If a tuple `(min, max)` is provided, a random scale value is
sampled within this range.
- If a single float is provided, a random scale value is sampled
between `0.0` and the given float.
Default is 1.0.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
Note that when using torch backend, `"reflect"` is redirected to
`"mirror"` `(c d c b | a b c d | c b a b)` because torch does not
support `"reflect"`.
Note that torch backend does not support `"wrap"`.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
value_range: the range of values the incoming images will have.
Represented as a two-number tuple written `[low, high]`. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
"""
_USE_BASE_FACTOR = False
_FACTOR_BOUNDS = (0, 1)
_SUPPORTED_INTERPOLATION = ("nearest", "bilinear")
_SUPPORTED_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
def __init__(
self,
factor=1.0,
scale=1.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
value_range=(0, 255),
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self._set_factor(factor)
self.scale = self._set_factor_by_name(scale, "scale")
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.value_range = value_range
self.seed = seed
self.generator = SeedGenerator(seed)
if interpolation not in self._SUPPORTED_INTERPOLATION:
raise NotImplementedError(
f"Unknown `interpolation` {interpolation}. Expected of one "
f"{self._SUPPORTED_INTERPOLATION}."
)
if fill_mode not in self._SUPPORTED_FILL_MODES:
raise NotImplementedError(
f"Unknown `fill_mode` {fill_mode}. Expected of one "
f"{self._SUPPORTED_FILL_MODES}."
)
if self.data_format == "channels_first":
self.height_axis = -2
self.width_axis = -1
self.channel_axis = -3
else:
self.height_axis = -3
self.width_axis = -2
self.channel_axis = -1
def _set_factor_by_name(self, factor, name):
error_msg = (
f"The `{name}` argument should be a number "
"(or a list of two numbers) "
"in the range "
f"[{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. "
f"Received: factor={factor}"
)
if isinstance(factor, (tuple, list)):
if len(factor) != 2:
raise ValueError(error_msg)
if (
factor[0] > self._FACTOR_BOUNDS[1]
or factor[1] < self._FACTOR_BOUNDS[0]
):
raise ValueError(error_msg)
lower, upper = sorted(factor)
elif isinstance(factor, (int, float)):
if (
factor < self._FACTOR_BOUNDS[0]
or factor > self._FACTOR_BOUNDS[1]
):
raise ValueError(error_msg)
factor = abs(factor)
lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]
else:
raise ValueError(error_msg)
return lower, upper
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if (self.scale[1] == 0) or (self.factor[1] == 0):
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
unbatched = len(images_shape) == 3
if unbatched:
batch_size = 1
else:
batch_size = images_shape[0]
seed = seed or self._get_seed_generator(self.backend._backend)
transformation_probability = self.backend.random.uniform(
shape=(batch_size,),
minval=self.factor[0],
maxval=self.factor[1],
seed=seed,
)
random_threshold = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
apply_transform = random_threshold < transformation_probability
distortion_factor = self.backend.random.uniform(
shape=(),
minval=self.scale[0],
maxval=self.scale[1],
seed=seed,
dtype=self.compute_dtype,
)
return {
"apply_transform": apply_transform,
"distortion_factor": distortion_factor,
"seed": seed,
}
def get_elastic_transform_params(self, height, width, factor):
alpha_scale = 0.1 * factor
sigma_scale = 0.05 * factor
alpha = max(height, width) * alpha_scale
sigma = min(height, width) * sigma_scale
return alpha, sigma
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training and transformation is not None:
apply_transform = transformation["apply_transform"]
distortion_factor = transformation["distortion_factor"]
seed = transformation["seed"]
height, width = (
images.shape[self.height_axis],
images.shape[self.width_axis],
)
alpha, sigma = self.get_elastic_transform_params(
height, width, distortion_factor
)
transformed_images = self.backend.image.elastic_transform(
images,
alpha=alpha,
sigma=sigma,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
seed=seed,
data_format=self.data_format,
)
apply_transform = (
apply_transform[:, None, None]
if len(images.shape) == 3
else apply_transform[:, None, None, None]
)
images = self.backend.numpy.where(
apply_transform,
transformed_images,
images,
)
images = self.backend.numpy.clip(
images, self.value_range[0], self.value_range[1]
)
images = self.backend.cast(images, self.compute_dtype)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"factor": self.factor,
"scale": self.scale,
"interpolation": self.interpolation,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"value_range": self.value_range,
"seed": self.seed,
}
return {**base_config, **config}
| RandomElasticTransform |
python | davidhalter__jedi | jedi/inference/compiled/access.py | {
"start": 4059,
"end": 4652
} | class ____:
def __init__(self, accesses):
self.accesses = accesses
def create_access_path(inference_state, obj) -> AccessPath:
access = create_access(inference_state, obj)
return AccessPath(access.get_access_path_tuples())
def get_api_type(obj):
if inspect.isclass(obj):
return 'class'
elif inspect.ismodule(obj):
return 'module'
elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
return 'function'
# Everything else...
return 'instance'
| AccessPath |
python | django__django | tests/field_defaults/models.py | {
"start": 738,
"end": 1317
} | class ____(models.Model):
"""
Values or expressions can be passed as the db_default parameter to a field.
When the object is created without an explicit value passed in, the
database will insert the default value automatically.
"""
headline = models.CharField(max_length=100, db_default="Default headline")
pub_date = models.DateTimeField(db_default=Now())
cost = models.DecimalField(
max_digits=3, decimal_places=2, db_default=Decimal("3.33")
)
class Meta:
required_db_features = {"supports_expression_defaults"}
| DBArticle |
python | huggingface__transformers | src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py | {
"start": 1082,
"end": 3765
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 384, "width": 384}
default_to_square = True
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
model_input_names = ["pixel_values"]
def _preprocess(
self,
videos: list["torch.Tensor"],
do_convert_rgb: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchFeature:
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_convert_rgb:
stacked_videos = self.convert_to_rgb(stacked_videos)
if do_resize:
stacked_videos = self.resize(stacked_videos, size=size, interpolation=interpolation)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
if do_center_crop:
stacked_videos = self.center_crop(stacked_videos, crop_size)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_videos_grouped[shape] = stacked_videos
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_videos = torch.stack(processed_videos, dim=0) if return_tensors else processed_videos
return BatchFeature(data={"pixel_values": processed_videos}, tensor_type=return_tensors)
__all__ = ["InstructBlipVideoVideoProcessor"]
| InstructBlipVideoVideoProcessor |
python | PrefectHQ__prefect | src/prefect/events/clients.py | {
"start": 24221,
"end": 25132
} | class ____(PrefectCloudEventSubscriber):
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
filter: Optional["EventFilter"] = None,
reconnection_attempts: int = 10,
):
"""
Args:
api_url: The base URL for a Prefect Cloud workspace
api_key: The API of an actor with the manage_events scope
reconnection_attempts: When the client is disconnected, how many times
the client should attempt to reconnect
"""
api_url, api_key = _get_api_url_and_key(api_url, api_key)
account_api_url, _, _ = api_url.partition("/workspaces/")
super().__init__(
api_url=account_api_url,
filter=filter,
reconnection_attempts=reconnection_attempts,
)
self._api_key = api_key
| PrefectCloudAccountEventSubscriber |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 9963,
"end": 10035
} | class ____(sqltypes.DateTime):
render_bind_cast = True
| AsyncpgDateTime |
python | huggingface__transformers | src/transformers/models/sam3/configuration_sam3.py | {
"start": 7796,
"end": 9934
} | class ____(PreTrainedConfig):
r"""
Configuration class for SAM3 Geometry Encoder.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers.
num_layers (`int`, *optional*, defaults to 3):
Number of transformer encoder layers for processing geometry prompts.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in the geometry encoder.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the feedforward layers.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability.
hidden_act (`str`, *optional*, defaults to `"relu"`):
Activation function in FFN.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
Epsilon for layer normalization.
roi_size (`int`, *optional*, defaults to 7):
ROI size for box pooling operations.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
model_type = "sam3_geometry_encoder"
def __init__(
self,
hidden_size=256,
num_layers=3,
num_attention_heads=8,
intermediate_size=2048,
dropout=0.1,
hidden_act="relu",
hidden_dropout=0.0,
layer_norm_eps=1e-6,
roi_size=7,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.hidden_act = hidden_act
self.hidden_dropout = hidden_dropout
self.layer_norm_eps = layer_norm_eps
self.roi_size = roi_size
self.initializer_range = initializer_range
| Sam3GeometryEncoderConfig |
python | spack__spack | lib/spack/spack/modules/lmod.py | {
"start": 3226,
"end": 9507
} | class ____(BaseConfiguration):
"""Configuration class for lmod module files."""
default_projections = {"all": "{name}/{version}"}
compiler: Optional[spack.spec.Spec]
def __init__(self, spec: spack.spec.Spec, module_set_name: str, explicit: bool) -> None:
super().__init__(spec, module_set_name, explicit)
candidates = collections.defaultdict(list)
for node in spec.traverse(deptype=("link", "run")):
candidates["c"].extend(node.dependencies(virtuals=("c",)))
candidates["cxx"].extend(node.dependencies(virtuals=("c",)))
if candidates["c"]:
self.compiler = candidates["c"][0]
if len(set(candidates["c"])) > 1:
warnings.warn(
f"{spec.short_spec} uses more than one compiler, and might not fit the "
f"LMod hierarchy. Using {self.compiler.short_spec} as the LMod compiler."
)
elif not candidates["c"]:
self.compiler = None
@property
def core_compilers(self) -> List[spack.spec.Spec]:
"""Returns the list of "Core" compilers
Raises:
CoreCompilersNotFoundError: if the key was not specified in the configuration file or
the sequence is empty
"""
compilers = []
for c in configuration(self.name).get("core_compilers", []):
compilers.extend(spack.spec.Spec(f"%{c}").dependencies())
if not compilers:
compilers = guess_core_compilers(self.name, store=True)
if not compilers:
msg = 'the key "core_compilers" must be set in modules.yaml'
raise CoreCompilersNotFoundError(msg)
return compilers
@property
def core_specs(self):
"""Returns the list of "Core" specs"""
return configuration(self.name).get("core_specs", [])
@property
def filter_hierarchy_specs(self):
"""Returns the dict of specs with modified hierarchies"""
return configuration(self.name).get("filter_hierarchy_specs", {})
@property
@lang.memoized
def hierarchy_tokens(self):
"""Returns the list of tokens that are part of the modulefile
hierarchy. ``compiler`` is always present.
"""
tokens = configuration(self.name).get("hierarchy", [])
# Check if all the tokens in the hierarchy are virtual specs.
# If not warn the user and raise an error.
not_virtual = [t for t in tokens if t != "compiler" and not spack.repo.PATH.is_virtual(t)]
if not_virtual:
msg = "Non-virtual specs in 'hierarchy' list for lmod: {0}\n"
msg += "Please check the 'modules.yaml' configuration files"
msg = msg.format(", ".join(not_virtual))
raise NonVirtualInHierarchyError(msg)
# Append 'compiler' which is always implied
tokens.append("compiler")
# Deduplicate tokens in case duplicates have been coded
tokens = list(lang.dedupe(tokens))
return tokens
@property
@lang.memoized
def requires(self):
"""Returns a dictionary mapping all the requirements of this spec to the actual provider.
The ``compiler`` key is always present among the requirements.
"""
# If it's a core_spec, lie and say it requires a core compiler
if (
any(self.spec.satisfies(core_spec) for core_spec in self.core_specs)
or self.compiler is None
):
return {"compiler": self.core_compilers[0]}
hierarchy_filter_list = []
for spec, filter_list in self.filter_hierarchy_specs.items():
if self.spec.satisfies(spec):
hierarchy_filter_list = filter_list
break
# Keep track of the requirements that this package has in terms
# of virtual packages that participate in the hierarchical structure
requirements = {"compiler": self.compiler}
# For each virtual dependency in the hierarchy
for x in self.hierarchy_tokens:
# Skip anything filtered for this spec
if x in hierarchy_filter_list:
continue
# If I depend on it
if x in self.spec and not self.spec.package.provides(x):
requirements[x] = self.spec[x] # record the actual provider
return requirements
@property
def provides(self):
"""Returns a dictionary mapping all the services provided by this
spec to the spec itself.
"""
provides = {}
# Treat the 'compiler' case in a special way, as compilers are not
# virtual dependencies in spack
# If it is in the list of supported compilers family -> compiler
if self.spec.name in spack.compilers.config.supported_compilers():
provides["compiler"] = spack.spec.Spec(self.spec.format("{name}{@versions}"))
elif self.spec.name in BUILTIN_TO_LEGACY_COMPILER:
# If it is the package for a supported compiler, but of a different name
cname = BUILTIN_TO_LEGACY_COMPILER[self.spec.name]
provides["compiler"] = spack.spec.Spec(cname, self.spec.versions)
# All the other tokens in the hierarchy must be virtual dependencies
for x in self.hierarchy_tokens:
if self.spec.package.provides(x):
provides[x] = self.spec
return provides
@property
def available(self):
"""Returns a dictionary of the services that are currently
available.
"""
available = {}
# What is available is what I require plus what I provide.
# 'compiler' is the only key that may be overridden.
available.update(self.requires)
available.update(self.provides)
return available
@property
@lang.memoized
def missing(self):
"""Returns the list of tokens that are not available."""
return [x for x in self.hierarchy_tokens if x not in self.available]
@property
def hidden(self):
# Never hide a module that opens a hierarchy
if any(self.spec.package.provides(x) for x in self.hierarchy_tokens):
return False
return super().hidden
| LmodConfiguration |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_enum.py | {
"start": 2140,
"end": 3904
} | class ____(BokehDirective):
has_content = True
required_arguments = 1
option_spec = {
"module": unchanged,
"noindex": lambda x: True, # directives.flag weirdly returns None
}
def run(self):
enum_name = self.arguments[0]
module_name = self.options["module"]
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError(f"Could not generate reference docs for {enum_name!r}: could not import module {module_name}")
enum = getattr(module, enum_name, None)
fullrepr = repr(enum)
if len(fullrepr) > 180:
shortrepr = f"{fullrepr[:40]} .... {fullrepr[-40:]}"
fullrepr = _wrapper.wrap(fullrepr)
else:
shortrepr = fullrepr
fullrepr = None
rst_text = ENUM_DETAIL.render(
name=enum_name,
module=self.options["module"],
noindex=self.options.get("noindex", False),
content=self.content,
shortrepr=shortrepr,
fullrepr=fullrepr,
)
return self.parse(rst_text, f"<bokeh-enum: {enum_name}>")
def setup(app):
""" Required Sphinx extension setup function. """
app.add_directive_to_domain("py", "bokeh-enum", BokehEnumDirective)
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
_wrapper = textwrap.TextWrapper(subsequent_indent=" ")
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| BokehEnumDirective |
python | bokeh__bokeh | src/bokeh/models/annotations/geometry.py | {
"start": 16025,
"end": 17354
} | class ____(Annotation):
""" Render a horizontal or vertical line span.
See :ref:`ug_basic_annotations_spans` for information on plotting spans.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
location = Nullable(CoordinateLike, help="""
The location of the span, along ``dimension``.
""")
location_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the location attribute. Interpreted as "data space"
units by default.
""")
dimension = Enum(Dimension, default='width', help="""
The direction of the span can be specified by setting this property
to "height" (``y`` direction) or "width" (``x`` direction).
""")
editable = Bool(default=False, help="""
Allows to interactively modify the geometry of this span.
.. note::
This property is experimental and may change at any point.
""")
line_props = Include(ScalarLineProps, help="""
The {prop} values for the span.
""")
hover_line_props = Include(ScalarLineProps, prefix="hover", help="""
The {prop} values for the span when hovering over.
""")
hover_line_color = Override(default=None)
hover_line_alpha = Override(default=0.3)
| Span |
python | getsentry__sentry | tests/sentry/workflow_engine/migrations/test_0085_crons_link_detectors_to_all_workflows.py | {
"start": 313,
"end": 8427
} | class ____(TestMigrations):
migrate_from = "0084_crons_dedupe_workflows"
migrate_to = "0085_crons_link_detectors_to_all_workflows"
app = "workflow_engine"
def setup_initial_state(self) -> None:
# Create organizations and projects
self.org1 = self.create_organization(name="org1")
self.org2 = self.create_organization(name="org2")
self.project1 = self.create_project(organization=self.org1)
self.project1b = self.create_project(organization=self.org1) # Another project in same org
self.project2 = self.create_project(organization=self.org2)
# Create workflows for project1
self.project1_rule1 = self.create_project_rule(project=self.project1)
self.project1_workflow1 = IssueAlertMigrator(self.project1_rule1).run()
self.project1_rule2 = self.create_project_rule(project=self.project1)
self.project1_workflow2 = IssueAlertMigrator(self.project1_rule2).run()
# Create a workflow for project1b (same org, different project)
self.project1b_rule1 = self.create_project_rule(project=self.project1b)
self.project1b_workflow1 = IssueAlertMigrator(self.project1b_rule1).run()
# Create a workflow in org2/project2
self.project2_rule1 = self.create_project_rule(project=self.project2)
self.project2_workflow1 = IssueAlertMigrator(self.project2_rule1).run()
# Create cron detectors (type="monitor_check_in_failure") for project1
self.cron_detector1 = self.create_detector(
project=self.project1, name="cron-detector-1", type="monitor_check_in_failure"
)
self.cron_detector2 = self.create_detector(
project=self.project1, name="cron-detector-2", type="monitor_check_in_failure"
)
# Create a cron detector for project1b (different project, same org)
self.cron_detector3 = self.create_detector(
project=self.project1b, name="cron-detector-3", type="monitor_check_in_failure"
)
# Create a cron detector in org2/project2
self.cron_detector4 = self.create_detector(
project=self.project2, name="cron-detector-4", type="monitor_check_in_failure"
)
# Create non-cron detectors (should not be affected)
self.regular_detector1 = self.create_detector(
project=self.project1, name="regular-detector-1", type="error" # Not a cron detector
)
self.regular_detector2 = self.create_detector(
project=self.project2, name="regular-detector-2", type="error" # Not a cron detector
)
# Create some existing DetectorWorkflow entries to ensure we don't duplicate
# Link cron_detector1 to project1_workflow1 (this should not be duplicated)
DetectorWorkflow.objects.create(
detector=self.cron_detector1, workflow=self.project1_workflow1
)
# Link regular_detector1 to project1_workflow1 (should be preserved but not affect cron linking)
DetectorWorkflow.objects.create(
detector=self.regular_detector1, workflow=self.project1_workflow1
)
def test_migration(self) -> None:
# Verify project1 cron detectors are linked to all project1 workflows only
project1_workflows = [self.project1_workflow1, self.project1_workflow2]
project1_cron_detectors = [self.cron_detector1, self.cron_detector2]
for detector in project1_cron_detectors:
detector_workflows = DetectorWorkflow.objects.filter(detector=detector)
assert detector_workflows.count() == len(project1_workflows), (
f"Cron detector {detector.name} should be linked to all {len(project1_workflows)} "
f"workflows in project1, but found {detector_workflows.count()}"
)
linked_workflow_ids = set(detector_workflows.values_list("workflow_id", flat=True))
expected_workflow_ids = {w.id for w in project1_workflows}
assert linked_workflow_ids == expected_workflow_ids, (
f"Cron detector {detector.name} should be linked to workflows "
f"{expected_workflow_ids}, but found {linked_workflow_ids}"
)
# Verify project1b cron detector is linked to project1b workflow only
project1b_detector_workflows = DetectorWorkflow.objects.filter(detector=self.cron_detector3)
assert project1b_detector_workflows.count() == 1, (
f"Project1b cron detector should be linked to 1 workflow, "
f"but found {project1b_detector_workflows.count()}"
)
project1b_detector_workflows_first = project1b_detector_workflows.first()
assert project1b_detector_workflows_first
assert (
project1b_detector_workflows_first.workflow_id == self.project1b_workflow1.id
), "Project1b cron detector should be linked to project1b workflow only"
# Verify project2 cron detector is linked to project2 workflow only
project2_detector_workflows = DetectorWorkflow.objects.filter(detector=self.cron_detector4)
assert project2_detector_workflows.count() == 1, (
f"Project2 cron detector should be linked to 1 workflow, "
f"but found {project2_detector_workflows.count()}"
)
project2_detector_workflows_first = project2_detector_workflows.first()
assert project2_detector_workflows_first
assert (
project2_detector_workflows_first.workflow_id == self.project2_workflow1.id
), "Project2 cron detector should be linked to project2 workflow"
# Verify cron detectors are NOT linked to workflows from other projects in same org
# cron_detector1 and cron_detector2 should NOT be linked to project1b_workflow1
for detector in project1_cron_detectors:
wrong_project_links = DetectorWorkflow.objects.filter(
detector=detector, workflow=self.project1b_workflow1
)
assert wrong_project_links.count() == 0, (
f"Cron detector {detector.name} from project1 should NOT be linked to "
f"project1b workflow, but found {wrong_project_links.count()} links"
)
# Verify regular detectors are not linked to all workflows
regular_detector1_workflows = DetectorWorkflow.objects.filter(
detector=self.regular_detector1
)
assert regular_detector1_workflows.count() == 1, (
f"Regular detector should still have only 1 workflow link, "
f"but found {regular_detector1_workflows.count()}"
)
regular_detector2_workflows = DetectorWorkflow.objects.filter(
detector=self.regular_detector2
)
assert regular_detector2_workflows.count() == 0, (
f"Regular detector2 should have no workflow links, "
f"but found {regular_detector2_workflows.count()}"
)
# Verify no duplicate DetectorWorkflow entries were created
# cron_detector1 already had a link to project1_workflow1, should still be just 1
detector1_workflow1_links = DetectorWorkflow.objects.filter(
detector=self.cron_detector1, workflow=self.project1_workflow1
)
assert (
detector1_workflow1_links.count() == 1
), "Should not create duplicate DetectorWorkflow entries"
# Verify total counts
total_cron_detector_workflows = DetectorWorkflow.objects.filter(
detector__type="monitor_check_in_failure"
).count()
expected_total = (
len(project1_cron_detectors)
* len(project1_workflows) # project1: 2 detectors * 2 workflows = 4
+ 1 # project1b: 1 detector * 1 workflow = 1
+ 1 # project2: 1 detector * 1 workflow = 1
)
assert total_cron_detector_workflows == expected_total, (
f"Expected {expected_total} total cron DetectorWorkflow entries, "
f"but found {total_cron_detector_workflows}"
)
| LinkCronDetectorsToAllWorkflowsTest |
python | spack__spack | lib/spack/spack/patch.py | {
"start": 5112,
"end": 7953
} | class ____(Patch):
"""Describes a patch that is retrieved from a file in the repository."""
_sha256: Optional[str] = None
def __init__(
self,
pkg: PatchPackageType,
relative_path: str,
level: int,
working_dir: str,
reverse: bool = False,
ordering_key: Optional[Tuple[str, int]] = None,
) -> None:
"""Initialize a new FilePatch instance.
Args:
pkg: the class object for the package that owns the patch
relative_path: path to patch, relative to the repository directory for a package.
level: level to pass to patch command
working_dir: path within the source directory where patch should be applied
reverse: reverse the patch
ordering_key: key used to ensure patches are applied in a consistent order
"""
self.relative_path = relative_path
# patches may be defined by relative paths to parent classes
# search mro to look for the file
abs_path: Optional[str] = None
# At different times we call FilePatch on instances and classes
pkg_cls = pkg if isinstance(pkg, type) else pkg.__class__
for cls in pkg_cls.__mro__: # type: ignore
if not hasattr(cls, "module"):
# We've gone too far up the MRO
break
# Cannot use pkg.package_dir because it's a property and we have
# classes, not instances.
pkg_dir = os.path.abspath(os.path.dirname(cls.module.__file__))
path = os.path.join(pkg_dir, self.relative_path)
if os.path.exists(path):
abs_path = path
break
if abs_path is None:
msg = "FilePatch: Patch file %s for " % relative_path
msg += "package %s.%s does not exist." % (pkg.namespace, pkg.name)
raise ValueError(msg)
super().__init__(pkg, abs_path, level, working_dir, reverse, ordering_key)
self.path = abs_path
@property
def sha256(self) -> str:
"""Get the patch checksum.
Returns:
The sha256 of the patch file.
"""
if self._sha256 is None and self.path is not None:
self._sha256 = checksum(hashlib.sha256, self.path)
assert isinstance(self._sha256, str)
return self._sha256
@sha256.setter
def sha256(self, value: str) -> None:
"""Set the patch checksum.
Args:
value: the sha256
"""
self._sha256 = value
def to_dict(self) -> Dict[str, Any]:
"""Dictionary representation of the patch.
Returns:
A dictionary representation.
"""
data = super().to_dict()
data["relative_path"] = self.relative_path
return data
| FilePatch |
python | huggingface__transformers | src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py | {
"start": 1528,
"end": 2323
} | class ____(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float())
# Llama does x.to(float16) * w whilst RecurrentGemma is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = output * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.eps}"
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->RecurrentGemma
| RecurrentGemmaRMSNorm |
python | allegroai__clearml | clearml/utilities/pyhocon/exceptions.py | {
"start": 163,
"end": 231
} | class ____(ConfigException, KeyError):
pass
| ConfigMissingException |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 13880,
"end": 13943
} | class ____:
def __init__(self, arg):
pass
| UnknownType |
python | walkccc__LeetCode | solutions/1488. Avoid Flood in The City/1488.py | {
"start": 41,
"end": 1028
} | class ____:
def avoidFlood(self, rains: list[int]) -> list[int]:
ans = [-1] * len(rains)
lakeIdToFullDay = {}
emptyDays = SortedSet() # indices of rains[i] == 0
for i, lakeId in enumerate(rains):
if lakeId == 0:
emptyDays.add(i)
continue
# The lake was full in a previous day. Greedily find the closest day
# to make the lake empty.
if lakeId in lakeIdToFullDay:
fullDay = lakeIdToFullDay[lakeId]
emptyDayIndex = emptyDays.bisect_right(fullDay)
if emptyDayIndex == len(emptyDays): # Not found.
return []
# Empty the lake at this day.
emptyDay = emptyDays[emptyDayIndex]
ans[emptyDay] = lakeId
emptyDays.discard(emptyDay)
# The lake with `lakeId` becomes full at the day `i`.
lakeIdToFullDay[lakeId] = i
# Empty an arbitrary lake if there are remaining empty days.
for emptyDay in emptyDays:
ans[emptyDay] = 1
return ans
| Solution |
python | huggingface__transformers | tests/models/glm4/test_modeling_glm4.py | {
"start": 1272,
"end": 1475
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Glm4ModelTester
_is_stateful = True
model_split_percents = [0.5, 0.6]
@slow
@require_torch_large_accelerator
| Glm4ModelTest |
python | jd__tenacity | tenacity/retry.py | {
"start": 8738,
"end": 9032
} | class ____(retry_base):
"""Retries if all the retries condition are valid."""
def __init__(self, *retries: retry_base) -> None:
self.retries = retries
def __call__(self, retry_state: "RetryCallState") -> bool:
return all(r(retry_state) for r in self.retries)
| retry_all |
python | pypa__warehouse | warehouse/accounts/models.py | {
"start": 11011,
"end": 11666
} | class ____(db.Model):
__tablename__ = "user_security_keys"
__table_args__ = (
UniqueConstraint("label", "user_id", name="_user_security_keys_label_uc"),
)
user_id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True),
ForeignKey("users.id", deferrable=True, initially="DEFERRED"),
nullable=False,
index=True,
)
user: Mapped[User] = orm.relationship(back_populates="webauthn")
label: Mapped[str]
credential_id: Mapped[str] = mapped_column(unique=True)
public_key: Mapped[str | None] = mapped_column(unique=True)
sign_count: Mapped[int | None] = mapped_column(default=0)
| WebAuthn |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/71_class_meth.py | {
"start": 0,
"end": 43
} | class ____:
def foo(self):
self
| C |
python | django__django | tests/template_tests/syntax_tests/i18n/test_translate.py | {
"start": 10367,
"end": 11900
} | class ____(MultipleLocaleActivationTestCase):
tag_name = "trans"
def get_template(self, template_string):
return Template(
template_string.replace("{{% translate ", "{{% {}".format(self.tag_name))
)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override("fr"):
self.assertEqual(
self.get_template("{% load i18n %}{% translate 'Yes' %}").render(
Context({})
),
"Oui",
)
def test_multiple_locale_trans(self):
with translation.override("de"):
t = self.get_template("{% load i18n %}{% translate 'No' %}")
with translation.override(self._old_language), translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
def test_multiple_locale_deactivate_trans(self):
with translation.override("de", deactivate=True):
t = self.get_template("{% load i18n %}{% translate 'No' %}")
with translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
def test_multiple_locale_direct_switch_trans(self):
with translation.override("de"):
t = self.get_template("{% load i18n %}{% translate 'No' %}")
with translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
| MultipleLocaleActivationTransTagTests |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_dtype.py | {
"start": 10766,
"end": 11189
} | class ____(TestCase):
def test_dtypes_are_true(self):
# test for gh-6294
assert bool(np.dtype("f8"))
assert bool(np.dtype("i8"))
@xpassIfTorchDynamo_np # (reason="No keyword arg for dtype ctor.")
def test_keyword_argument(self):
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
| TestMisc |
python | PyCQA__pycodestyle | pycodestyle.py | {
"start": 83883,
"end": 84011
} | class ____(BaseReport):
"""Collect the results of the checks and print the filenames."""
print_filename = True
| FileReport |
python | django__django | tests/invalid_models_tests/test_models.py | {
"start": 64072,
"end": 64568
} | class ____(TestCase):
def test_multiple_autofields(self):
msg = (
"Model invalid_models_tests.MultipleAutoFields can't have more "
"than one auto-generated field."
)
with self.assertRaisesMessage(ValueError, msg):
class MultipleAutoFields(models.Model):
auto1 = models.AutoField(primary_key=True)
auto2 = models.AutoField(primary_key=True)
@isolate_apps("invalid_models_tests")
| MultipleAutoFieldsTests |
python | realpython__materials | langchain-rag-app/source_code_final/chatbot_api/src/models/hospital_rag_query.py | {
"start": 86,
"end": 189
} | class ____(BaseModel):
input: str
output: str
intermediate_steps: list[str]
| HospitalQueryOutput |
python | tensorflow__tensorflow | tensorflow/python/trackable/base.py | {
"start": 9399,
"end": 42402
} | class ____(object):
"""Base class for `Trackable` objects without automatic dependencies.
This class has no __setattr__ override for performance reasons. Dependencies
must be added explicitly. Unless attribute assignment is performance-critical,
use `AutoTrackable` instead. Use `Trackable` for `isinstance`
checks.
"""
# For compatibility with wrapt.ObjectProxy, attributes are all prefixed with
# _self_. We have some properties to forward semi-public attributes to their
# _self_ equivalents.
@property
def _setattr_tracking(self):
if not hasattr(self, "_self_setattr_tracking"):
self._self_setattr_tracking = True
return self._self_setattr_tracking
@_setattr_tracking.setter
def _setattr_tracking(self, value):
self._self_setattr_tracking = value
@property
def _update_uid(self):
return self._self_update_uid
@_update_uid.setter
def _update_uid(self, value):
self._self_update_uid = value
@property
def _unconditional_checkpoint_dependencies(self):
return self._self_unconditional_checkpoint_dependencies
@property
def _unconditional_dependency_names(self):
return self._self_unconditional_dependency_names
@property
def _name_based_restores(self):
return self._self_name_based_restores
# Trackable does not do automatic dependency tracking, but uses the
# no_automatic_dependency_tracking decorator so it can avoid adding
# dependencies if a subclass is Trackable / inherits from Model (both of
# which have __setattr__ overrides).
@no_automatic_dependency_tracking
def _maybe_initialize_trackable(self):
"""Initialize dependency management.
Not __init__, since most objects will forget to call it.
"""
if hasattr(self, "_self_unconditional_checkpoint_dependencies"):
# __init__ already called. This check means that we don't need
# Trackable.__init__() in the constructor of every TensorFlow object.
return
# A list of TrackableReference objects. Some classes implementing
# `Trackable`, notably `Optimizer`s, may override the
# _checkpoint_dependencies property with conditional dependencies
# (e.g. based on the current graph when saving).
self._self_unconditional_checkpoint_dependencies = []
# Maps names -> Trackable objects
self._self_unconditional_dependency_names = {}
# Restorations for other Trackable objects on which this object may
# eventually depend. Maps local name -> CheckpointPosition list. Optimizers
# tack on conditional dependencies, and so need separate management of
# deferred dependencies too.
self._self_unconditional_deferred_dependencies = {}
# The UID of the highest assignment to this object. Used to ensure that the
# last requested assignment determines the final value of an object.
if hasattr(self, "_self_update_uid"):
raise AssertionError(
"Internal error: the object had an update UID set before its "
"initialization code was run.")
self._self_update_uid = -1
# When executing eagerly, holds a collection of _NameBasedRestoreCoordinator
# instances, which should be checked when creating variables or other
# saveables. These are passed on recursively to all dependencies, since
# unlike object-based checkpoint restores we don't know which subgraph is
# being restored in advance. This mechanism is only necessary for
# restore-on-create when executing eagerly, and so is unused when graph
# building.
self._self_name_based_restores = set()
# Dictionary of SaveableObjects factories. This dictionary is defined when
# the object is loaded from the SavedModel. When writing a custom class,
# prefer overriding "_gather_saveables_from_checkpoint" to using this
# attribute.
self._self_saveable_object_factories = {}
@property
def _object_identifier(self):
"""String used to identify this object in a SavedModel.
THIS FIELD HAS BEEN DEPRECATED IN FAVOR OF THE NAME REGISTERED WITH
`register_serializable`.
Generally, the object identifier is constant across objects of the same
class, while the metadata field is used for instance-specific data.
Returns:
String object identifier.
"""
return "_generic_user_object"
def _no_dependency(self, value):
"""If automatic dependency tracking is enabled, ignores `value`."""
return value
def _name_based_attribute_restore(self, checkpoint):
"""Restore the object's attributes from a name-based checkpoint."""
self._self_name_based_restores.add(checkpoint)
if self._self_update_uid < checkpoint.restore_uid:
checkpoint.eager_restore(self)
self._self_update_uid = checkpoint.restore_uid
@property
def _checkpoint_dependencies(self):
"""All dependencies of this object.
May be overridden to include conditional dependencies.
Returns:
A list of `TrackableReference` objects indicating named
`Trackable` dependencies which should be saved along with this
object.
"""
return self._self_unconditional_checkpoint_dependencies
@property
def _deferred_dependencies(self):
"""A dictionary with deferred dependencies.
Stores restorations for other Trackable objects on which this object
may eventually depend. May be overridden by sub-classes (e.g. Optimizers use
conditional dependencies based the current graph, and so need separate
management of deferred dependencies too).
Returns:
A dictionary mapping from local name to a list of CheckpointPosition
objects.
"""
return self._self_unconditional_deferred_dependencies
def _lookup_dependency(self, name, cached_dependencies=None):
"""Look up a dependency by name.
May be overridden to include conditional dependencies.
Args:
name: The local name of the dependency.
cached_dependencies: Optional dict containing all computed dependencies
returned by `self._trackable_children()`.
Returns:
A `Trackable` object, or `None` if no dependency by this name was
found.
"""
if cached_dependencies:
return cached_dependencies.get(name)
return self._self_unconditional_dependency_names.get(name)
def _add_variable_with_custom_getter(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
getter=None,
overwrite=False,
**kwargs_for_getter):
"""Restore-on-create for a variable be saved with this `Trackable`.
If the user has requested that this object or another `Trackable` which
depends on this object be restored from a checkpoint (deferred loading
before variable object creation), `initializer` may be ignored and the value
from the checkpoint used instead.
Args:
name: A name for the variable. Must be unique within this object.
shape: The shape of the variable.
dtype: The data type of the variable.
initializer: The initializer to use. Ignored if there is a deferred
restoration stored in the Trackable.
getter: The getter to wrap which actually fetches the variable.
overwrite: If True, disables unique name and type checks.
**kwargs_for_getter: Passed to the getter.
Returns:
The new variable object.
Raises:
ValueError: If the variable name is not unique.
"""
self._maybe_initialize_trackable()
with ops.init_scope():
if context.executing_eagerly():
# If this is a variable with a single Tensor stored in the checkpoint,
# we can set that value as an initializer rather than initializing and
# then assigning (when executing eagerly). This call returns None if
# there is nothing to restore.
checkpoint_initializer = self._preload_simple_restoration(name=name)
else:
checkpoint_initializer = None
if (checkpoint_initializer is not None and
not (isinstance(initializer, CheckpointInitialValueCallable) and
(initializer.restore_uid > checkpoint_initializer.restore_uid))):
# If multiple Trackable objects are "creating" the same variable
# via the magic of custom getters, the one with the highest restore UID
# (the one called last) has to make the final initializer. If another
# custom getter interrupts this process by overwriting the initializer,
# then we'll catch that when we call _track_trackable. So this is
# "best effort" to set the initializer with the highest restore UID.
initializer = checkpoint_initializer
new_variable = getter(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
**kwargs_for_getter)
# If we set an initializer and the variable processed it, tracking will not
# assign again. It will add this variable to our dependencies, and if there
# is a non-trivial restoration queued, it will handle that. This also
# handles slot variables.
if not overwrite or isinstance(new_variable, Trackable):
return self._track_trackable(new_variable, name=name, overwrite=overwrite)
else:
# TODO(allenl): Some variable types are not yet supported. Remove this
# fallback once all get_variable() return types are Trackable.
return new_variable
def _preload_simple_restoration(self, name):
"""Return a dependency's value for restore-on-create.
Note the restoration is not deleted; if for some reason preload is called
and then not assigned to the variable (for example because a custom getter
overrides the initializer), the assignment will still happen once the
variable is tracked (determined based on checkpoint.restore_uid).
Args:
name: The object-local name of the dependency holding the variable's
value.
Returns:
An callable for use as a variable's initializer/initial_value, or None if
one should not be set (either because there was no variable with this name
in the checkpoint or because it needs more complex deserialization). Any
non-trivial deserialization will happen when the variable object is
tracked.
"""
deferred_dependencies_list = self._deferred_dependencies.get(name, ())
if not deferred_dependencies_list:
# Nothing to do; we don't have a restore for this dependency queued up.
return
for checkpoint_position in deferred_dependencies_list:
if not checkpoint_position.is_simple_variable():
# If _any_ pending restoration is too complicated to fit in an
# initializer (because it has dependencies, or because there are
# multiple Tensors to restore), bail and let the general tracking code
# handle it.
return None
checkpoint_position = max(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid)
return CheckpointInitialValueCallable(
checkpoint_position=checkpoint_position)
def _track_trackable(self, trackable, name, overwrite=False):
"""Declare a dependency on another `Trackable` object.
Indicates that checkpoints for this object should include variables from
`trackable`.
Variables in a checkpoint are mapped to `Trackable`s based on the names
provided when the checkpoint was written. To avoid breaking existing
checkpoints when modifying a class, neither variable names nor dependency
names (the names passed to `_track_trackable`) may change.
Args:
trackable: A `Trackable` which this object depends on.
name: A local name for `trackable`, used for loading checkpoints into the
correct objects.
overwrite: Boolean, whether silently replacing dependencies is OK. Used
for __setattr__, where throwing an error on attribute reassignment would
be inappropriate.
Returns:
`trackable`, for convenience when declaring a dependency and
assigning to a member variable in one statement.
Raises:
TypeError: If `trackable` does not inherit from `Trackable`.
ValueError: If another object is already tracked by this name.
"""
self._maybe_initialize_trackable()
if not isinstance(trackable, Trackable):
raise TypeError(
"Trackable._track_trackable() can only be used to track objects of "
f"type Trackable. Got type {type(trackable)}.")
if not getattr(self, "_manual_tracking", True):
return trackable
new_reference = TrackableReference(name=name, ref=trackable)
current_object = self._lookup_dependency(name)
if (current_object is not None and current_object is not trackable):
if not overwrite:
raise ValueError(
f"Called Trackable._track_trackable() with name='{name}', "
"but a Trackable with this name is already declared as a "
"dependency. Names must be unique (or overwrite=True).")
# This is a weird thing to do, but we're not going to stop people from
# using __setattr__.
for index, (old_name, _) in enumerate(
self._self_unconditional_checkpoint_dependencies):
if name == old_name:
self._self_unconditional_checkpoint_dependencies[
index] = new_reference
elif current_object is None:
self._self_unconditional_checkpoint_dependencies.append(new_reference)
self._handle_deferred_dependencies(name=name, trackable=trackable)
self._self_unconditional_dependency_names[name] = trackable
return trackable
def _handle_deferred_dependencies(self, name, trackable):
"""Pop and load any deferred checkpoint restores into `trackable`.
This method does not add a new dependency on `trackable`, but it does
check if any outstanding/deferred dependencies have been queued waiting for
this dependency to be added (matched based on `name`). If so,
`trackable` and its dependencies are restored. The restorations are
considered fulfilled and so are deleted.
`_track_trackable` is more appropriate for adding a
normal/unconditional dependency, and includes handling for deferred
restorations. This method allows objects such as `Optimizer` to use the same
restoration logic while managing conditional dependencies themselves, by
overriding `_checkpoint_dependencies` and `_lookup_dependency` to change the
object's dependencies based on the context it is saved/restored in (a single
optimizer instance can have state associated with multiple graphs).
Args:
name: The name of the dependency within this object (`self`), used to
match `trackable` with values saved in a checkpoint.
trackable: The Trackable object to restore (inheriting from `Trackable`).
"""
self._maybe_initialize_trackable()
trackable._maybe_initialize_trackable() # pylint: disable=protected-access
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(trackable)
# Pass on any name-based restores queued in this object.
for name_based_restore in sorted(
self._self_name_based_restores,
key=lambda checkpoint: checkpoint.restore_uid,
reverse=True):
trackable._name_based_attribute_restore(name_based_restore) # pylint: disable=protected-access
def _gather_saveables_for_checkpoint(self):
"""Returns a dictionary of values to checkpoint with this object.
NOTE: This method is deprecated, prefer implementing `_serialize_to_tensors`
and `_restore_from_tensors` instead. This method is only used in the
deprecated `tf.compat.v1.train.Saver`.
Keys in the returned dictionary are local to this object and in a separate
namespace from dependencies. Values may either be `SaveableObject` factories
or variables easily converted to `SaveableObject`s (as in
`tf.compat.v1.train.Saver`'s
`var_list` constructor argument).
`SaveableObjects` have a name set, which Trackable needs to generate
itself. So rather than returning `SaveableObjects` directly, this method
should return a dictionary of callables which take `name` arguments and
return `SaveableObjects` with that name.
If this object may also be passed to the global-name-based
`tf.compat.v1.train.Saver`,
the returned callables should have a default value for their name argument
(i.e. be callable with no arguments).
Returned values must be saved only by this object; if any value may be
shared, it should instead be a dependency. For example, variable objects
save their own values with the key `VARIABLE_VALUE_KEY`, but objects which
reference variables simply add a dependency.
**AsyncCheckpoint Support**
If your Trackable implements `_gather_saveables_for_checkpoint`,
`_copy_trackable_to_cpu` needs to be implemented as well to support
asynchronous checkpoint.
Returns:
The dictionary mapping attribute names to `SaveableObject` factories
described above. For example:
{VARIABLE_VALUE_KEY:
lambda name="global_name_for_this_object":
SaveableObject(name=name, ...)}
"""
return getattr(self, "_self_saveable_object_factories", {})
def _serialize_to_tensors(self):
"""Gathers tensors to save to the checkpoint.
You should only override `_serialize_to_tensors` and `_restore_from_tensors`
if you are defining a custom resource or variable with custom ops.
Otherwise, please store the state of your trackable in `tf.Variable` objects
and add them to Trackable object hierarchy using `setattr` (for subclasses
of `AutoTrackable`) or overriding the `_trackable_children` method.
For an example of a valid implementation of these two methods, please see
`DenseHashTable`.
**Invalid implementation**
````
class NamedTrackable(Trackable):
def __init__(self, name: str):
self.name = name
def _serialize_to_tensors(self):
return {"name": self.name}
def _restore_from_tensors(self, restored_tensors):
self.name = restored_tensors["name"]
```
In this example, `NamedTrackable` can be saved and restored from
checkpoints, but is incompatible with SavedModel, which tries to convert
the serialize/restore functions into tf.functions. This fails because
attribute assignment (`self.attr = new_value`) is not graph-friendly.
**Suggested fix**
```
class NamedTrackable(Trackable):
def __init__(self, name: str):
self.name = tf.Variable(name)
def _trackable_children(self):
return {"name": self.name}
```
If the `name` attribute should be saved to the checkpoint, then convert it
a `tf.Variable`.
**TF1 Saver Compatibility**
If your Trackable needs to be comatible with `tf.compat.v1.train.Saver`,
implement `_gather_saveables_from_checkpoint`.
**AsyncCheckpoint Support**
If your Trackable implements `_serialize_to_tensors`,
`_copy_trackable_to_cpu` needs to be implemented as well to support
asynchronous checkpoint.
Returns:
A dictionary mapping names to tensors.
"""
raise NotImplementedError
def _restore_from_tensors(self, restored_tensors):
"""Restores checkpointed values to this `Trackable`.
Please see the documentation for `Trackable._serialize_to_tensors`.
Args:
restored_tensors: A dictionary mapping names to tensors. The keys to this
dictionary matches the names passed to _serialize_to_tensors.
Returns:
An op that runs the restoration.
"""
raise NotImplementedError
def _serialize_to_proto(self, object_proto=None, **kwargs):
"""Returns a proto of any type to be saved into the SavedModel.
Trackable classes decorated with `register_serializable` should overwrite
this method to save metadata for this object to the SavedModel. The proto
returned by this function will be passed to `_deserialize_from_proto` in the
form of a `google.protobuf.Any` proto.
This data is only saved and used by the Python API. Existing C++ loading
APIs such as `tensorflow::LoadSavedModel` will not read this field at all.
Args:
object_proto: A `SavedObject` proto that may be filled by this function.
Only the core serializable types (Variable, Function, Constant, Asset)
should modify this argument.
**kwargs: Future keyword arguments passed to the object during saving.
Returns:
A proto that serializes this class's type.
"""
del object_proto, kwargs # Unused.
return None
@classmethod
def _deserialize_from_proto(cls,
proto=None,
dependencies=None,
object_proto=None,
export_dir=None,
asset_file_def=None,
operation_attributes=None,
**kwargs):
"""Returns a new object restored by the SavedModel.
Trackable classes decorated with `register_serializable` should overwrite
this method to change how the object is loaded from SavedModel. By default,
the object is initialized with no arguments.
Example:
```
def _serialize_to_proto(self, **unused_kwargs):
return Message(name="a")
@classmethod
def _deserialize_from_proto(cls, proto, **unused_kwargs):
if proto.Is(Message.DESCRIPTOR):
unpacked = Message()
proto.Unpack(unpacked)
return cls(unpacked.name)
else:
return cls()
```
This function is only used by the Python API. C++ and TensorFlow Serving do
not have access to your registered class and cannot execute any of the
non-tf.functions attached to the Python class. However, all signatures and
tf.functions are still accessible.
**Avoid creating duplicate trackables**
SavedModel is saved by recursively gathering all of the trackables and their
children. SavedModel loading reverses those steps by creating all
trackables, then reconnecting the children trackables to their parents using
`Trackable._add_trackable_child`.
That means that if `_deserialize_from_proto` calls the `__init__` function,
which creates all of the children trackables, then those children end up
being created *twice*.
To avoid this, structure your code so that Trackables are not created
when deserialized from SavedModel:
```
@register_serializable()
class Serializable(trackable):
def __init __(self, from_proto=False):
create_non_trackable_objects()
if not from_proto:
create_variables_and_other_trackables()
def _deserialize_from_proto(cls, **kwargs):
return cls(from_proto=True)
def _add_trackable_child(self, name, value):
self.__setattr__(name, value)
```
Args:
proto: A `google.protobuf.Any` proto read from the `SavedModel`.
dependencies: A dictionary mapping names to dependencies (see
`_deserialization_dependencies`)
object_proto: The `SavedObject` proto for this object.
export_dir: The `SavedModel` directory
asset_file_def: The `MetaGraphDef`'s `asset_file_def` field.
operation_attributes: Dictionary mapping nodes to attribute from the
imported `GraphDef`.
**kwargs: Future keyword arguments passed to the object when loading.
Returns:
A new object.
"""
del (proto, dependencies, object_proto, export_dir, asset_file_def,
operation_attributes, kwargs)
return cls()
def _add_trackable_child(self, name, value):
"""Restores a connection between trackables when loading from SavedModel.
SavedModel stores both the object metadata and its list of children. When
loading, this function is used along with `_deserialize_from_proto` to load
objects from the SavedModel: First, all saved objects are created with
`_deserialize_from_proto`. After that is complete, the children are
connected using `_add_trackable_child`.
**Example**
`tf.Module`, `tf.keras.Model` and Keras layers use `__setattr__` to track
children. This is why users can call `model.v = tf.Variable(...)`, and the
variable will be automatically saved to the checkpoint. The implementation
of this method for the listed objects is:
```
def _add_trackable_child(self, name, value):
self.__setattr__(name, value)
```
Args:
name: The name of the connection between the parent and child `Trackable`.
value: The child `Trackable` object.
"""
self._track_trackable(value, name, overwrite=True)
def _deserialization_dependencies(self, children):
"""Returns a dictionary containing `Trackables` that this object depends on.
Dependencies define the order to serialize and deserialize objects in the
SavedModel. For example:
class A(Trackable):
b = B()
def _deserialization_dependencies(self, children):
return {'b': self.b}
class B(Trackable):
pass
We say that object `a=A()` depends on `a.b`.
Dependencies are guaranteed to be serialized and deserialized before the
object depending on them. The following methods use dependencies:
- `_deserialize_from_proto` [loading]
SavedModel loads with the bottom-up approach, by first creating all objects
in the order defined by the dependencies, then connecting the children.
Unlike `_trackable_children`, this function does not define the
`SavedObjectGraph`. It only changes the order in which things are
saved/loaded. Therefore, if there are dependencies that are not in the
`SavedObjectGraph`, saving will fail.
Args:
children: Dict returned from `_trackable_children`.
Returns:
A dictionary mapping names to `Trackable`.
"""
del children # Unused.
return {}
def _trackable_children(self,
save_type=SaveType.CHECKPOINT,
cache=None,
**kwargs):
"""Returns this object's `Trackable` attributes.
This method is used to build the object graph (or the object hierarchy,
in pickling terms) for checkpoint save/restore, and `SavedModel` export.
Override this method to define the children of this instance. Please read
the implementation restrictions:
**Rule 1: All children must be convertable to `Trackable`.**
Must pass `isinstance` check or `converter.convert_to_trackable`.
**Rule 2: [Checkpoint-only] Do not create new objects.**
When saving to a `SavedModel`, this method is called *exactly once* for each
`Trackable` in the object graph. When saving or restoring from a checkpoint,
this method may be called *multiple times*. Thus, this method may create
new Trackables when `save_type == SaveType.SAVEDMODEL` but not when
`save_type == SaveType.CHECKPOINT`.
When saving to `SavedModel`, new `Trackable` children can be created to save
non-Trackable attributes to the `SavedModel`. In the example below, `hyper`
is a regular python float hyperparameter. To save this value, a new Variable
is created to store the value of `hyper`:
```
def __init__(self):
self.hyper = 1e-5
def _trackable_children(self, save_type, **unused_kwargs):
# Correct implementation
children = {}
if format == 'saved_model':
children['hyper'] = tf.Variable(self.hyper)
return children
```
An incorrect implementation of `_trackable_children` is shown below. This
function would cause failures when loading the checkpoint, and calling
`load_status.assert_consumed()` or
`load_status.assert_existing_objects_matched`. If you want a value to be
saved in the checkpoint, hyper must be defined as a `tf.Variable` from the
start.
```
def _trackable_children(self, save_type, **unused_kwargs):
# Incorrect implementation
return {'hyper': tf.Variable(self.hyper)}
```
**Rule 3: [`SavedModel`-only] Watch out for un-traced tf.functions.**
At the begining of `_trackable_children`, always call
`get_concrete_function()` for any `tf.function` that has an input signature.
When `tf.functions` are saved to `SavedModel`, any `tf.functions` that have
an input signature and has never been called is traced at export time in
order to copy the op graph into the `SavedModel`. `tf.functions` that are
traced for the first time are allowed to create new state:
```
@tf.function(input_signature=[]):
def fn(self);
if self.v is None:
self.v = tf.Variable(1.)
return self.v
```
A problem occurs when there is a `Trackable` that returns `fn` as one of its
children and `self.v` has not been created yet. When `fn` is traced,
`self.v` is added to the `Trackable`, but `SavedModel` does not see this
modification since the `Trackable`'s children have already been gathered.
Therefore, as a precaution, call `get_concrete_function()` at the very
start of `_trackable_children` to ensure that the function is traced:
```
def _trackable_children(self):
self.fn.get_concrete_function()
return {"v": self.v, "fn": self.fn}
```
Args:
save_type: A string, can be 'savedmodel' or 'checkpoint'. Defaults to
SaveType.CHECKPOINT.
cache: May be `None`, or a dictionary. When `save_type == savedmodel`, a
new cache is created at the start of the SavedModel export, and shared
between all `Trackables` in the same object graph. This cache may be
used for advanced saving functionality.
**kwargs: Additional kwargs that may be added at a later time.
Returns:
Dictionary mapping names to child trackables.
"""
del save_type, cache, kwargs # Unused.
self._maybe_initialize_trackable()
return {name: ref for name, ref in self._checkpoint_dependencies}
def _export_to_saved_model_graph(self,
object_map,
tensor_map,
options,
**kwargs):
"""Creates a copy of this object's tensors onto SavedModel graph.
Needs to be overridden if the class contains tensors that must be saved
into the graph. This method should update the `object_map` and `tensor_map`
dictionaries.
This method is called on all nodes in the Trackable Graph (generated by
`_trackable_children`). The nodes are traversed in the order defined by
`_deserialization_dependencies`
All usages of _map_resources should be migrated to this method.
Args:
object_map: A dictionary that maps original Trackables to the copied
Trackables. This only needs to be updated if the object is a
tf.function, or if the copied tensors are necessary for checkpointing
this object.
tensor_map: Dictionary mapping original tensors to copied tensors.
options: A `tf.saved_model.SaveOptions` object.
**kwargs: Additional kwargs that may be added at a later time.
Returns:
Flat list of original tensors that have been copied.
"""
_, _, _ = object_map, tensor_map, options
del kwargs
return []
def _copy_trackable_to_cpu(self, object_map):
"""Creates a copy of this object onto CPU, also copies values over.
Needs to be overridden if the `Trackable` requires AsyncCheckpoint support.
The method first checks whether a copy of `self` is already created in
`object_map`, and creates one if not already created. Then the method copies
the **values** of itself over to its copy mapped by `object_map`.
Args:
object_map: A dictionary that maps original Trackables to the copied
Trackables, which reside in the CPU.
"""
del object_map # Unused
raise NotImplementedError("Need to implement _copy_trackable_to_cpu() if "
"the Trackable requires AsyncCheckpoint support.")
def _checkpoint_adapter(self, path: str):
"""Returns a checkpoint adapter for this object.
Needs to be overridden if the `Trackable` requires adapter at restore.
Override this method to define callbacks for checkpoint positions to be
applied at restore time.
Args:
path: Checkpoint path.
Returns:
A subclass of AbstractCheckpointAdapter that defines callbacks at restore
for this trackable.
"""
del path
return None
| Trackable |
python | kamyu104__LeetCode-Solutions | Python/heaters.py | {
"start": 122,
"end": 799
} | class ____(object):
def findRadius(self, houses, heaters):
"""
:type houses: List[int]
:type heaters: List[int]
:rtype: int
"""
heaters.sort()
min_radius = 0
for house in houses:
equal_or_larger = bisect.bisect_left(heaters, house)
curr_radius = float("inf")
if equal_or_larger != len(heaters):
curr_radius = heaters[equal_or_larger] - house
if equal_or_larger != 0:
smaller = equal_or_larger-1
curr_radius = min(curr_radius, house - heaters[smaller])
min_radius = max(min_radius, curr_radius)
return min_radius
| Solution |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 32556,
"end": 34956
} | class ____(SimpleTestCase):
maxDiff = None
def test_fix_default_value(self):
class Model(models.Model):
field_dt = models.TimeField(default=now())
field_t = models.TimeField(default=now().time())
# Timezone-aware time object (when USE_TZ=True).
field_tz = models.TimeField(default=now().timetz())
field_now = models.DateField(default=now)
names = ["field_dt", "field_t", "field_tz", "field_now"]
fields = [Model._meta.get_field(name) for name in names]
errors = []
for field in fields:
errors.extend(field.check())
self.assertEqual(
errors,
[
DjangoWarning(
"Fixed default value provided.",
hint="It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`",
obj=fields[0],
id="fields.W161",
),
DjangoWarning(
"Fixed default value provided.",
hint="It seems you set a fixed date / time / datetime "
"value as default for this field. This may not be "
"what you want. If you want to have the current date "
"as default, use `django.utils.timezone.now`",
obj=fields[1],
id="fields.W161",
),
DjangoWarning(
"Fixed default value provided.",
hint=(
"It seems you set a fixed date / time / datetime value as "
"default for this field. This may not be what you want. "
"If you want to have the current date as default, use "
"`django.utils.timezone.now`"
),
obj=fields[2],
id="fields.W161",
),
# field_now doesn't raise a warning.
],
)
@override_settings(USE_TZ=True)
def test_fix_default_value_tz(self):
self.test_fix_default_value()
@isolate_apps("invalid_models_tests")
| TimeFieldTests |
python | pytorch__pytorch | torch/_inductor/codegen/wrapper.py | {
"start": 15064,
"end": 15357
} | class ____(WrapperLine):
line: LineContext
def codegen(self, code: IndentedBuffer) -> None:
code.writeline(self.line)
@staticmethod
def codegen_fx(converter: FxConverter) -> FxConversionFunc:
return converter._generate_comment
@dataclasses.dataclass
| CommentLine |
python | langchain-ai__langchain | libs/partners/openai/langchain_openai/middleware/openai_moderation.py | {
"start": 732,
"end": 1434
} | class ____(RuntimeError):
"""Raised when OpenAI flags content and `exit_behavior` is set to ``"error"``."""
def __init__(
self,
*,
content: str,
stage: ViolationStage,
result: Moderation,
message: str,
) -> None:
"""Initialize the error with violation details.
Args:
content: The content that was flagged.
stage: The stage where the violation occurred.
result: The moderation result from OpenAI.
message: The error message.
"""
super().__init__(message)
self.content = content
self.stage = stage
self.result = result
| OpenAIModerationError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 374244,
"end": 376079
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateSponsorshipPreferences"""
__schema__ = github_schema
__field_names__ = (
"sponsor_id",
"sponsor_login",
"sponsorable_id",
"sponsorable_login",
"receive_emails",
"privacy_level",
"client_mutation_id",
)
sponsor_id = sgqlc.types.Field(ID, graphql_name="sponsorId")
"""The ID of the user or organization who is acting as the sponsor,
paying for the sponsorship. Required if sponsorLogin is not given.
"""
sponsor_login = sgqlc.types.Field(String, graphql_name="sponsorLogin")
"""The username of the user or organization who is acting as the
sponsor, paying for the sponsorship. Required if sponsorId is not
given.
"""
sponsorable_id = sgqlc.types.Field(ID, graphql_name="sponsorableId")
"""The ID of the user or organization who is receiving the
sponsorship. Required if sponsorableLogin is not given.
"""
sponsorable_login = sgqlc.types.Field(String, graphql_name="sponsorableLogin")
"""The username of the user or organization who is receiving the
sponsorship. Required if sponsorableId is not given.
"""
receive_emails = sgqlc.types.Field(Boolean, graphql_name="receiveEmails")
"""Whether the sponsor should receive email updates from the
sponsorable.
"""
privacy_level = sgqlc.types.Field(SponsorshipPrivacy, graphql_name="privacyLevel")
"""Specify whether others should be able to see that the sponsor is
sponsoring the sponsorable. Public visibility still does not
reveal which tier is used.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateSponsorshipPreferencesInput |
python | charliermarsh__ruff | crates/ruff_python_ast/generate.py | {
"start": 2932,
"end": 3618
} | class ____:
name: str
nodes: list[Node]
owned_enum_ty: str
add_suffix_to_is_methods: bool
anynode_is_label: str
doc: str | None
def __init__(self, group_name: str, group: dict[str, Any]) -> None:
self.name = group_name
self.owned_enum_ty = group_name
self.ref_enum_ty = group_name + "Ref"
self.add_suffix_to_is_methods = group.get("add_suffix_to_is_methods", False)
self.anynode_is_label = group.get("anynode_is_label", to_snake_case(group_name))
self.doc = group.get("doc")
self.nodes = [
Node(self, node_name, node) for node_name, node in group["nodes"].items()
]
@dataclass
| Group |
python | huggingface__transformers | src/transformers/models/xmod/modeling_xmod.py | {
"start": 18389,
"end": 20709
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.ln_before_adapter = config.ln_before_adapter
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.adapter_layer_norm:
self.adapter_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
else:
self.adapter_layer_norm = None
self.adapter_reuse_layer_norm = config.adapter_reuse_layer_norm
self.adapter_modules = nn.ModuleDict({})
for language in config.languages:
self.adapter_modules[str(language)] = XmodAdapter(config)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, lang_ids: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
hidden_states = self.lang_adapter(lang_ids, hidden_states)
return hidden_states
def lang_adapter(self, lang_ids: torch.Tensor, hidden_states: torch.Tensor):
# Process subsequent samples with the same lang_id in parallel
lang_ids, lang_lengths = torch.unique_consecutive(lang_ids, return_counts=True)
if not self.ln_before_adapter:
residual = hidden_states
if self.adapter_layer_norm is not None:
hidden_states = self.adapter_layer_norm(hidden_states)
elif self.adapter_reuse_layer_norm:
hidden_states = self.LayerNorm(hidden_states)
if self.ln_before_adapter:
residual = hidden_states
split_hidden_states = torch.split(hidden_states, lang_lengths.tolist(), 0)
lang_wise_outputs = []
for i, (lang_id, split_hidden_state) in enumerate(zip(lang_ids, split_hidden_states)):
lang = list(self.adapter_modules.keys())[int(lang_id.item())]
lang_wise_outputs.append(self.adapter_modules[lang](split_hidden_state))
hidden_states = torch.cat(lang_wise_outputs, 0)
hidden_states = self.dropout(hidden_states)
hidden_states += residual
return hidden_states
| XmodOutput |
python | pallets__werkzeug | src/werkzeug/routing/rules.py | {
"start": 7436,
"end": 9885
} | class ____(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(
self, rules: t.Iterable[RuleFactory], context: dict[str, t.Any]
) -> None:
self.rules = rules
self.context = context
def get_rules(self, map: Map) -> t.Iterator[Rule]:
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in rule.defaults.items():
if isinstance(value, str):
value = Template(value).substitute(self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = Template(rule.subdomain).substitute(self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, str):
new_endpoint = Template(new_endpoint).substitute(self.context)
yield Rule(
Template(rule.rule).substitute(self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes,
)
_ASTT = t.TypeVar("_ASTT", bound=ast.AST)
def _prefix_names(src: str, expected_type: type[_ASTT]) -> _ASTT:
"""ast parse and prefix names with `.` to avoid collision with user vars"""
tree: ast.AST = ast.parse(src).body[0]
if isinstance(tree, ast.Expr):
tree = tree.value
if not isinstance(tree, expected_type):
raise TypeError(
f"AST node is of type {type(tree).__name__}, not {expected_type.__name__}"
)
for node in ast.walk(tree):
if isinstance(node, ast.Name):
node.id = f".{node.id}"
return tree
_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
_IF_KWARGS_URL_ENCODE_CODE = """\
if kwargs:
params = self._encode_query_vars(kwargs)
q = "?" if params else ""
else:
q = params = ""
"""
_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE, ast.If)
_URL_ENCODE_AST_NAMES = (
_prefix_names("q", ast.Name),
_prefix_names("params", ast.Name),
)
| RuleTemplateFactory |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 91497,
"end": 92866
} | class ____(gdb.Command):
def readcode(self, expr):
if expr:
return expr, PythonCodeExecutor.Py_single_input
else:
lines = []
while True:
try:
line = input('>')
except EOFError:
break
else:
if line.rstrip() == 'end':
break
lines.append(line)
return '\n'.join(lines), PythonCodeExecutor.Py_file_input
@dont_suppress_errors
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = PythonCodeExecutor()
executor.xdecref(_evalcode_python(executor, input_type, global_dict, local_dict))
gdb.execute('set breakpoint pending on')
if hasattr(gdb, 'GdbError'):
# Wrap py-step and py-next in gdb defines to make them repeatable.
py_step = PyStep('-py-step', PythonInfo())
py_next = PyNext('-py-next', PythonInfo())
register_defines()
py_finish = PyFinish('py-finish', PythonInfo())
py_run = PyRun('py-run', PythonInfo())
py_cont = PyCont('py-cont', PythonInfo())
py_exec = FixGdbCommand('py-exec', '-py-exec')
_py_exec = PyExec("-py-exec", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
else:
warnings.warn("Use gdb 7.2 or higher to use the py-exec command.")
| PyExec |
python | networkx__networkx | networkx/algorithms/tree/tests/test_mst.py | {
"start": 466,
"end": 8826
} | class ____:
"""Base class for test classes for minimum spanning tree algorithms.
This class contains some common tests that will be inherited by
subclasses. Each subclass must have a class attribute
:data:`algorithm` that is a string representing the algorithm to
run, as described under the ``algorithm`` keyword argument for the
:func:`networkx.minimum_spanning_edges` function. Subclasses can
then implement any algorithm-specific tests.
"""
def setup_method(self, method):
"""Creates an example graph and stores the expected minimum and
maximum spanning tree edges.
"""
# This stores the class attribute `algorithm` in an instance attribute.
self.algo = self.algorithm
# This example graph comes from Wikipedia:
# https://en.wikipedia.org/wiki/Kruskal's_algorithm
edges = [
(0, 1, 7),
(0, 3, 5),
(1, 2, 8),
(1, 3, 9),
(1, 4, 7),
(2, 4, 5),
(3, 4, 15),
(3, 5, 6),
(4, 5, 8),
(4, 6, 9),
(5, 6, 11),
]
self.G = nx.Graph()
self.G.add_weighted_edges_from(edges)
self.minimum_spanning_edgelist = [
(0, 1, {"weight": 7}),
(0, 3, {"weight": 5}),
(1, 4, {"weight": 7}),
(2, 4, {"weight": 5}),
(3, 5, {"weight": 6}),
(4, 6, {"weight": 9}),
]
self.maximum_spanning_edgelist = [
(0, 1, {"weight": 7}),
(1, 2, {"weight": 8}),
(1, 3, {"weight": 9}),
(3, 4, {"weight": 15}),
(4, 6, {"weight": 9}),
(5, 6, {"weight": 11}),
]
def test_minimum_edges(self):
edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo)
# Edges from the spanning edges functions don't come in sorted
# orientation, so we need to sort each edge individually.
actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges)
assert edges_equal(actual, self.minimum_spanning_edgelist)
def test_maximum_edges(self):
edges = nx.maximum_spanning_edges(self.G, algorithm=self.algo)
# Edges from the spanning edges functions don't come in sorted
# orientation, so we need to sort each edge individually.
actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges)
assert edges_equal(actual, self.maximum_spanning_edgelist)
def test_without_data(self):
edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo, data=False)
# Edges from the spanning edges functions don't come in sorted
# orientation, so we need to sort each edge individually.
actual = sorted((min(u, v), max(u, v)) for u, v in edges)
expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist]
assert edges_equal(actual, expected)
def test_nan_weights(self):
# Edge weights NaN never appear in the spanning tree. see #2164
G = self.G
G.add_edge(0, 12, weight=float("nan"))
edges = nx.minimum_spanning_edges(
G, algorithm=self.algo, data=False, ignore_nan=True
)
actual = sorted((min(u, v), max(u, v)) for u, v in edges)
expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist]
assert edges_equal(actual, expected)
# Now test for raising exception
edges = nx.minimum_spanning_edges(
G, algorithm=self.algo, data=False, ignore_nan=False
)
with pytest.raises(ValueError):
list(edges)
# test default for ignore_nan as False
edges = nx.minimum_spanning_edges(G, algorithm=self.algo, data=False)
with pytest.raises(ValueError):
list(edges)
def test_nan_weights_MultiGraph(self):
G = nx.MultiGraph()
G.add_edge(0, 12, weight=float("nan"))
edges = nx.minimum_spanning_edges(
G, algorithm="prim", data=False, ignore_nan=False
)
with pytest.raises(ValueError):
list(edges)
# test default for ignore_nan as False
edges = nx.minimum_spanning_edges(G, algorithm="prim", data=False)
with pytest.raises(ValueError):
list(edges)
def test_nan_weights_order(self):
# now try again with a nan edge at the beginning of G.nodes
edges = [
(0, 1, 7),
(0, 3, 5),
(1, 2, 8),
(1, 3, 9),
(1, 4, 7),
(2, 4, 5),
(3, 4, 15),
(3, 5, 6),
(4, 5, 8),
(4, 6, 9),
(5, 6, 11),
]
G = nx.Graph()
G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges])
G.add_edge(0, 7, weight=float("nan"))
edges = nx.minimum_spanning_edges(
G, algorithm=self.algo, data=False, ignore_nan=True
)
actual = sorted((min(u, v), max(u, v)) for u, v in edges)
shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist]
assert edges_equal(actual, shift)
def test_isolated_node(self):
# now try again with an isolated node
edges = [
(0, 1, 7),
(0, 3, 5),
(1, 2, 8),
(1, 3, 9),
(1, 4, 7),
(2, 4, 5),
(3, 4, 15),
(3, 5, 6),
(4, 5, 8),
(4, 6, 9),
(5, 6, 11),
]
G = nx.Graph()
G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges])
G.add_node(0)
edges = nx.minimum_spanning_edges(
G, algorithm=self.algo, data=False, ignore_nan=True
)
actual = sorted((min(u, v), max(u, v)) for u, v in edges)
shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist]
assert edges_equal(actual, shift)
def test_minimum_tree(self):
T = nx.minimum_spanning_tree(self.G, algorithm=self.algo)
actual = sorted(T.edges(data=True))
assert edges_equal(actual, self.minimum_spanning_edgelist)
def test_maximum_tree(self):
T = nx.maximum_spanning_tree(self.G, algorithm=self.algo)
actual = sorted(T.edges(data=True))
assert edges_equal(actual, self.maximum_spanning_edgelist)
def test_disconnected(self):
G = nx.Graph([(0, 1, {"weight": 1}), (2, 3, {"weight": 2})])
T = nx.minimum_spanning_tree(G, algorithm=self.algo)
assert nodes_equal(list(T), list(range(4)))
assert edges_equal(list(T.edges()), [(0, 1), (2, 3)])
def test_empty_graph(self):
G = nx.empty_graph(3)
T = nx.minimum_spanning_tree(G, algorithm=self.algo)
assert nodes_equal(sorted(T), list(range(3)))
assert T.number_of_edges() == 0
def test_attributes(self):
G = nx.Graph()
G.add_edge(1, 2, weight=1, color="red", distance=7)
G.add_edge(2, 3, weight=1, color="green", distance=2)
G.add_edge(1, 3, weight=10, color="blue", distance=1)
G.graph["foo"] = "bar"
T = nx.minimum_spanning_tree(G, algorithm=self.algo)
assert T.graph == G.graph
assert nodes_equal(T, G)
for u, v in T.edges():
assert T.adj[u][v] == G.adj[u][v]
def test_weight_attribute(self):
G = nx.Graph()
G.add_edge(0, 1, weight=1, distance=7)
G.add_edge(0, 2, weight=30, distance=1)
G.add_edge(1, 2, weight=1, distance=1)
G.add_node(3)
T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight="distance")
assert nodes_equal(sorted(T), list(range(4)))
assert edges_equal(sorted(T.edges()), [(0, 2), (1, 2)])
T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight="distance")
assert nodes_equal(sorted(T), list(range(4)))
assert edges_equal(sorted(T.edges()), [(0, 1), (0, 2)])
def test_minimum_spanning_edges_directed_raises(self):
DG = nx.DiGraph()
DG.add_edge(0, 1, weight=1)
with pytest.raises(nx.NetworkXNotImplemented):
list(nx.minimum_spanning_edges(DG, algorithm=self.algo))
with pytest.raises(nx.NetworkXNotImplemented):
list(nx.maximum_spanning_edges(DG, algorithm=self.algo))
| MinimumSpanningTreeTestBase |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_components.py | {
"start": 762,
"end": 2001
} | class ____(APITestCase):
endpoint = "sentry-api-0-sentry-app-components"
def setUp(self) -> None:
self.superuser = self.create_user(email="a@example.com", is_superuser=True)
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.sentry_app = self.create_sentry_app(
name="Test",
organization=self.org,
published=True,
schema={"elements": [self.create_issue_link_schema()]},
)
self.component = self.sentry_app.components.first()
self.login_as(user=self.user)
def test_retrieves_all_components(self) -> None:
response = self.get_success_response(self.sentry_app.slug)
assert response.data[0] == {
"uuid": str(self.component.uuid),
"type": "issue-link",
"schema": self.component.schema,
"error": "",
"sentryApp": {
"uuid": self.sentry_app.uuid,
"slug": self.sentry_app.slug,
"name": self.sentry_app.name,
"avatars": get_sentry_app_avatars(self.sentry_app),
},
}
@control_silo_test
| SentryAppComponentsTest |
python | kamyu104__LeetCode-Solutions | Python/inorder-successor-in-bst-ii.py | {
"start": 54,
"end": 238
} | class ____(object):
def __init__(self, val, left, right, parent):
self.val = val
self.left = left
self.right = right
self.parent = parent
| Node |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_mixin.py | {
"start": 65967,
"end": 78214
} | class ____(DeclarativeTestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
def test_singleton_behavior_within_decl(self):
counter = mock.Mock()
class Mixin:
@declared_attr
def my_prop(cls):
counter(cls)
return Column("x", Integer)
class A(Base, Mixin):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
@declared_attr
def my_other_prop(cls):
return column_property(cls.my_prop + 5)
eq_(counter.mock_calls, [mock.call(A)])
class B(Base, Mixin):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
@declared_attr
def my_other_prop(cls):
return column_property(cls.my_prop + 5)
eq_(counter.mock_calls, [mock.call(A), mock.call(B)])
# this is why we need singleton-per-class behavior. We get
# an un-bound "x" column otherwise here, because my_prop() generates
# multiple columns.
a_col = A.my_other_prop.__clause_element__().element.left
b_col = B.my_other_prop.__clause_element__().element.left
is_(a_col.table, A.__table__)
is_(b_col.table, B.__table__)
is_(a_col, A.__table__.c.x)
is_(b_col, B.__table__.c.x)
s = fixture_session()
self.assert_compile(
s.query(A),
"SELECT a.x + :x_1 AS anon_1, a.id AS a_id, a.x AS a_x FROM a",
)
self.assert_compile(
s.query(B),
"SELECT b.x + :x_1 AS anon_1, b.id AS b_id, b.x AS b_x FROM b",
)
@testing.requires.predictable_gc
def test_singleton_gc(self):
counter = mock.Mock()
class Mixin:
@declared_attr
def my_prop(cls):
counter(cls.__name__)
return Column("x", Integer)
class A(Base, Mixin):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
@declared_attr
def my_other_prop(cls):
return column_property(cls.my_prop + 5)
eq_(counter.mock_calls, [mock.call("A")])
del A
gc_collect()
from sqlalchemy.orm.clsregistry import _key_is_empty
assert _key_is_empty(
"A",
Base.registry._class_registry,
lambda cls: hasattr(cls, "my_other_prop"),
)
def test_can_we_access_the_mixin_straight(self):
class Mixin:
@declared_attr
def my_prop(cls):
return Column("x", Integer)
with expect_warnings(
"Unmanaged access of declarative attribute my_prop "
"from non-mapped class Mixin"
):
Mixin.my_prop
def test_can_we_access_the_mixin_straight_special_names(self):
class Mixin:
@declared_attr.directive
def __table_args__(cls):
return (1, 2, 3)
@declared_attr.directive
def __arbitrary__(cls):
return (4, 5, 6)
eq_(Mixin.__table_args__, (1, 2, 3))
eq_(Mixin.__arbitrary__, (4, 5, 6))
def test_non_decl_access(self):
counter = mock.Mock()
class Mixin:
@declared_attr.directive
def __tablename__(cls):
counter(cls)
return "foo"
class Foo(Mixin, Base):
id = Column(Integer, primary_key=True)
@declared_attr.directive
def x(cls):
cls.__tablename__
@declared_attr.directive
def y(cls):
cls.__tablename__
eq_(counter.mock_calls, [mock.call(Foo)])
eq_(Foo.__tablename__, "foo")
eq_(Foo.__tablename__, "foo")
# here we are testing that access of __tablename__ does in fact
# call the user-defined function, as we are no longer in the
# "declarative_scan" phase. the class *is* mapped here.
eq_(
counter.mock_calls,
[mock.call(Foo), mock.call(Foo), mock.call(Foo)],
)
def test_property_noncascade(self):
counter = mock.Mock()
class Mixin:
@declared_attr
def my_prop(cls):
counter(cls)
return column_property(cls.x + 2)
class A(Base, Mixin):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
class B(A):
pass
eq_(counter.mock_calls, [mock.call(A)])
def test_property_cascade_mixin(self):
counter = mock.Mock()
class Mixin:
@declared_attr.cascading
def my_prop(cls):
counter(cls)
return column_property(cls.x + 2)
class A(Base, Mixin):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
class B(A):
pass
eq_(counter.mock_calls, [mock.call(A), mock.call(B)])
def test_property_cascade_mixin_override(self):
counter = mock.Mock()
class Mixin:
@declared_attr.cascading
def my_prop(cls):
counter(cls)
return column_property(cls.x + 2)
class A(Base, Mixin):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
with expect_warnings(
"Attribute 'my_prop' on class .*B.* "
"cannot be processed due to @declared_attr.cascading; "
"skipping"
):
class B(A):
my_prop = Column("foobar", Integer)
eq_(counter.mock_calls, [mock.call(A), mock.call(B)])
def test_property_cascade_abstract(self):
counter = mock.Mock()
class Abs(Base):
__abstract__ = True
@declared_attr.cascading
def my_prop(cls):
counter(cls)
return column_property(cls.x + 2)
class A(Abs):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
x = Column(Integer)
class B(A):
pass
eq_(counter.mock_calls, [mock.call(A), mock.call(B)])
def test_warn_cascading_used_w_tablename(self):
class Mixin:
@declared_attr.cascading
def __tablename__(cls):
return "foo"
with expect_warnings(
"@declared_attr.cascading is not supported on the "
"__tablename__ attribute on class .*A."
):
class A(Mixin, Base):
id = Column(Integer, primary_key=True)
eq_(A.__table__.name, "foo")
def test_col_prop_attrs_associated_w_class_for_mapper_args(self):
from sqlalchemy import Column
import collections
asserted = collections.defaultdict(set)
class Mixin:
@declared_attr.cascading
def my_attr(cls):
if has_inherited_table(cls):
id_ = Column(ForeignKey("a.my_attr"), primary_key=True)
asserted["b"].add(id_)
else:
id_ = Column(Integer, primary_key=True)
asserted["a"].add(id_)
return id_
class A(Base, Mixin):
__tablename__ = "a"
@declared_attr
def __mapper_args__(cls):
asserted["a"].add(cls.my_attr)
return {}
# here:
# 1. A is mapped. so A.my_attr is now the InstrumentedAttribute.
# 2. B wants to call my_attr also. Due to .cascading, it has been
# invoked specific to B, and is present in the dict_ that will
# be used when we map the class. But except for the
# special setattr() we do in _scan_attributes() in this case, would
# otherwise not been set on the class as anything from this call;
# the usual mechanics of calling it from the descriptor also do not
# work because A is fully mapped and because A set it up, is currently
# that non-expected InstrumentedAttribute and replaces the
# descriptor from being invoked.
class B(A):
__tablename__ = "b"
@declared_attr
def __mapper_args__(cls):
asserted["b"].add(cls.my_attr)
return {}
eq_(
asserted,
{
"a": {A.my_attr.property.columns[0]},
"b": {B.my_attr.property.columns[0]},
},
)
def test_column_pre_map(self):
counter = mock.Mock()
class Mixin:
@declared_attr
def my_col(cls):
counter(cls)
assert not orm_base._mapper_or_none(cls)
return Column("x", Integer)
class A(Base, Mixin):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
eq_(counter.mock_calls, [mock.call(A)])
def test_mixin_attr_refers_to_column_copies(self):
# this @declared_attr can refer to User.id
# freely because we now do the "copy column" operation
# before the declared_attr is invoked.
counter = mock.Mock()
class HasAddressCount:
id = Column(Integer, primary_key=True)
@declared_attr
def address_count(cls):
counter(cls.id)
return column_property(
select(func.count(Address.id))
.where(Address.user_id == cls.id)
.scalar_subquery()
)
class Address(Base):
__tablename__ = "address"
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey("user.id"))
class User(Base, HasAddressCount):
__tablename__ = "user"
eq_(counter.mock_calls, [mock.call(User.id)])
sess = fixture_session()
self.assert_compile(
sess.query(User).having(User.address_count > 5),
"SELECT (SELECT count(address.id) AS "
'count_1 FROM address WHERE address.user_id = "user".id) '
'AS anon_1, "user".id AS user_id FROM "user" '
"HAVING (SELECT count(address.id) AS "
'count_1 FROM address WHERE address.user_id = "user".id) '
"> :param_1",
)
def test_multilevel_mixin_attr_refers_to_column_copies(self):
"""test #8190.
This test is the same idea as test_mixin_attr_refers_to_column_copies
but tests the column copies from superclasses.
"""
counter = mock.Mock()
class SomeOtherMixin:
status = Column(String)
class HasAddressCount(SomeOtherMixin):
id = Column(Integer, primary_key=True)
@declared_attr
def address_count(cls):
counter(cls.id)
counter(cls.status)
return column_property(
select(func.count(Address.id))
.where(Address.user_id == cls.id)
.where(cls.status == "some status")
.scalar_subquery()
)
class Address(Base):
__tablename__ = "address"
id = Column(Integer, primary_key=True)
user_id = Column(ForeignKey("user.id"))
class User(Base, HasAddressCount):
__tablename__ = "user"
eq_(counter.mock_calls, [mock.call(User.id), mock.call(User.status)])
sess = fixture_session()
self.assert_compile(
sess.query(User).having(User.address_count > 5),
"SELECT (SELECT count(address.id) AS count_1 FROM address "
'WHERE address.user_id = "user".id AND "user".status = :param_1) '
'AS anon_1, "user".id AS user_id, "user".status AS user_status '
'FROM "user" HAVING (SELECT count(address.id) AS count_1 '
'FROM address WHERE address.user_id = "user".id '
'AND "user".status = :param_1) > :param_2',
)
| DeclaredAttrTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/langhelpers.py | {
"start": 60276,
"end": 67810
} | class ____:
r"""Apply translation of functions to accept \**kw arguments if they
don't already.
Used to ensure cross-compatibility with third party legacy code, for things
like compiler visit methods that need to accept ``**kw`` arguments,
but may have been copied from old code that didn't accept them.
"""
ensure_kwarg: str
"""a regular expression that indicates method names for which the method
should accept ``**kw`` arguments.
The class will scan for methods matching the name template and decorate
them if necessary to ensure ``**kw`` parameters are accepted.
"""
def __init_subclass__(cls) -> None:
fn_reg = cls.ensure_kwarg
clsdict = cls.__dict__
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = compat.inspect_getfullargspec(fn)
if not spec.varkw:
wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super().__init_subclass__()
@classmethod
def _wrap_w_kw(cls, fn: Callable[..., Any]) -> Callable[..., Any]:
def wrap(*arg: Any, **kw: Any) -> Any:
return fn(*arg)
return update_wrapper(wrap, fn)
def wrap_callable(wrapper, fn):
"""Augment functools.update_wrapper() to work with objects with
a ``__call__()`` method.
:param fn:
object with __call__ method
"""
if hasattr(fn, "__name__"):
return update_wrapper(wrapper, fn)
else:
_f = wrapper
_f.__name__ = fn.__class__.__name__
if hasattr(fn, "__module__"):
_f.__module__ = fn.__module__
if hasattr(fn.__call__, "__doc__") and fn.__call__.__doc__:
_f.__doc__ = fn.__call__.__doc__
elif fn.__doc__:
_f.__doc__ = fn.__doc__
return _f
def quoted_token_parser(value):
"""Parse a dotted identifier with accommodation for quoted names.
Includes support for SQL-style double quotes as a literal character.
E.g.::
>>> quoted_token_parser("name")
["name"]
>>> quoted_token_parser("schema.name")
["schema", "name"]
>>> quoted_token_parser('"Schema"."Name"')
['Schema', 'Name']
>>> quoted_token_parser('"Schema"."Name""Foo"')
['Schema', 'Name""Foo']
"""
if '"' not in value:
return value.split(".")
# 0 = outside of quotes
# 1 = inside of quotes
state = 0
result: List[List[str]] = [[]]
idx = 0
lv = len(value)
while idx < lv:
char = value[idx]
if char == '"':
if state == 1 and idx < lv - 1 and value[idx + 1] == '"':
result[-1].append('"')
idx += 1
else:
state ^= 1
elif char == "." and state == 0:
result.append([])
else:
result[-1].append(char)
idx += 1
return ["".join(token) for token in result]
def add_parameter_text(params: Any, text: str) -> Callable[[_F], _F]:
params = _collections.to_list(params)
def decorate(fn):
doc = fn.__doc__ is not None and fn.__doc__ or ""
if doc:
doc = inject_param_text(doc, {param: text for param in params})
fn.__doc__ = doc
return fn
return decorate
def _dedent_docstring(text: str) -> str:
split_text = text.split("\n", 1)
if len(split_text) == 1:
return text
else:
firstline, remaining = split_text
if not firstline.startswith(" "):
return firstline + "\n" + textwrap.dedent(remaining)
else:
return textwrap.dedent(text)
def inject_docstring_text(
given_doctext: Optional[str], injecttext: str, pos: int
) -> str:
doctext: str = _dedent_docstring(given_doctext or "")
lines = doctext.split("\n")
if len(lines) == 1:
lines.append("")
injectlines = textwrap.dedent(injecttext).split("\n")
if injectlines[0]:
injectlines.insert(0, "")
blanks = [num for num, line in enumerate(lines) if not line.strip()]
blanks.insert(0, 0)
inject_pos = blanks[min(pos, len(blanks) - 1)]
lines = lines[0:inject_pos] + injectlines + lines[inject_pos:]
return "\n".join(lines)
_param_reg = re.compile(r"(\s+):param (.+?):")
def inject_param_text(doctext: str, inject_params: Dict[str, str]) -> str:
doclines = collections.deque(doctext.splitlines())
lines = []
# TODO: this is not working for params like ":param case_sensitive=True:"
to_inject = None
while doclines:
line = doclines.popleft()
m = _param_reg.match(line)
if to_inject is None:
if m:
param = m.group(2).lstrip("*")
if param in inject_params:
# default indent to that of :param: plus one
indent = " " * len(m.group(1)) + " "
# but if the next line has text, use that line's
# indentation
if doclines:
m2 = re.match(r"(\s+)\S", doclines[0])
if m2:
indent = " " * len(m2.group(1))
to_inject = indent + inject_params[param]
elif m:
lines.extend(["\n", to_inject, "\n"])
to_inject = None
elif not line.rstrip():
lines.extend([line, to_inject, "\n"])
to_inject = None
elif line.endswith("::"):
# TODO: this still won't cover if the code example itself has
# blank lines in it, need to detect those via indentation.
lines.extend([line, doclines.popleft()])
continue
lines.append(line)
return "\n".join(lines)
def repr_tuple_names(names: List[str]) -> Optional[str]:
"""Trims a list of strings from the middle and return a string of up to
four elements. Strings greater than 11 characters will be truncated"""
if len(names) == 0:
return None
flag = len(names) <= 4
names = names[0:4] if flag else names[0:3] + names[-1:]
res = ["%s.." % name[:11] if len(name) > 11 else name for name in names]
if flag:
return ", ".join(res)
else:
return "%s, ..., %s" % (", ".join(res[0:3]), res[-1])
def has_compiled_ext(raise_=False):
from ._has_cython import HAS_CYEXTENSION
if HAS_CYEXTENSION:
return True
elif raise_:
raise ImportError(
"cython extensions were expected to be installed, "
"but are not present"
)
else:
return False
def load_uncompiled_module(module: _M) -> _M:
"""Load the non-compied version of a module that is also
compiled with cython.
"""
full_name = module.__name__
assert module.__spec__
parent_name = module.__spec__.parent
assert parent_name
parent_module = sys.modules[parent_name]
assert parent_module.__spec__
package_path = parent_module.__spec__.origin
assert package_path and package_path.endswith("__init__.py")
name = full_name.split(".")[-1]
module_path = package_path.replace("__init__.py", f"{name}.py")
py_spec = importlib.util.spec_from_file_location(full_name, module_path)
assert py_spec
py_module = importlib.util.module_from_spec(py_spec)
assert py_spec.loader
py_spec.loader.exec_module(py_module)
return cast(_M, py_module)
| EnsureKWArg |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess3.py | {
"start": 263,
"end": 323
} | class ____(A):
def __init__(self):
self.y = "hi"
| B |
python | doocs__leetcode | solution/3000-3099/3075.Maximize Happiness of Selected Children/Solution.py | {
"start": 0,
"end": 254
} | class ____:
def maximumHappinessSum(self, happiness: List[int], k: int) -> int:
happiness.sort(reverse=True)
ans = 0
for i, x in enumerate(happiness[:k]):
x -= i
ans += max(x, 0)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategy_options.py | {
"start": 52005,
"end": 63002
} | class ____(
cache_key.HasCacheKey, traversals.HasShallowCopy, visitors.Traversible
):
"""represents strategy information to select for a LoaderStrategy
and pass options to it.
:class:`._LoadElement` objects provide the inner datastructure
stored by a :class:`_orm.Load` object and are also the object passed
to methods like :meth:`.LoaderStrategy.setup_query`.
.. versionadded:: 2.0
"""
__slots__ = (
"path",
"strategy",
"propagate_to_loaders",
"local_opts",
"_extra_criteria",
"_reconcile_to_other",
)
__visit_name__ = "load_element"
_traverse_internals = [
("path", visitors.ExtendedInternalTraversal.dp_has_cache_key),
("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj),
(
"local_opts",
visitors.ExtendedInternalTraversal.dp_string_multi_dict,
),
("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list),
("propagate_to_loaders", visitors.InternalTraversal.dp_plain_obj),
("_reconcile_to_other", visitors.InternalTraversal.dp_plain_obj),
]
_cache_key_traversal = None
_extra_criteria: Tuple[Any, ...]
_reconcile_to_other: Optional[bool]
strategy: Optional[_StrategyKey]
path: PathRegistry
propagate_to_loaders: bool
local_opts: util.immutabledict[str, Any]
is_token_strategy: bool
is_class_strategy: bool
def __hash__(self) -> int:
return id(self)
def __eq__(self, other):
return traversals.compare(self, other)
@property
def is_opts_only(self) -> bool:
return bool(self.local_opts and self.strategy is None)
def _clone(self, **kw: Any) -> _LoadElement:
cls = self.__class__
s = cls.__new__(cls)
self._shallow_copy_to(s)
return s
def _update_opts(self, **kw: Any) -> _LoadElement:
new = self._clone()
new.local_opts = new.local_opts.union(kw)
return new
def __getstate__(self) -> Dict[str, Any]:
d = self._shallow_to_dict()
d["path"] = self.path.serialize()
return d
def __setstate__(self, state: Dict[str, Any]) -> None:
state["path"] = PathRegistry.deserialize(state["path"])
self._shallow_from_dict(state)
def _raise_for_no_match(self, parent_loader, mapper_entities):
path = parent_loader.path
found_entities = False
for ent in mapper_entities:
ezero = ent.entity_zero
if ezero:
found_entities = True
break
if not found_entities:
raise sa_exc.ArgumentError(
"Query has only expression-based entities; "
f"attribute loader options for {path[0]} can't "
"be applied here."
)
else:
raise sa_exc.ArgumentError(
f"Mapped class {path[0]} does not apply to any of the "
f"root entities in this query, e.g. "
f"""{
", ".join(
str(x.entity_zero)
for x in mapper_entities if x.entity_zero
)}. Please """
"specify the full path "
"from one of the root entities to the target "
"attribute. "
)
def _adjust_effective_path_for_current_path(
self, effective_path: PathRegistry, current_path: PathRegistry
) -> Optional[PathRegistry]:
"""receives the 'current_path' entry from an :class:`.ORMCompileState`
instance, which is set during lazy loads and secondary loader strategy
loads, and adjusts the given path to be relative to the
current_path.
E.g. given a loader path and current path:
.. sourcecode:: text
lp: User -> orders -> Order -> items -> Item -> keywords -> Keyword
cp: User -> orders -> Order -> items
The adjusted path would be:
.. sourcecode:: text
Item -> keywords -> Keyword
"""
chopped_start_path = Load._chop_path(
effective_path.natural_path, current_path
)
if not chopped_start_path:
return None
tokens_removed_from_start_path = len(effective_path) - len(
chopped_start_path
)
loader_lead_path_element = self.path[tokens_removed_from_start_path]
effective_path = PathRegistry.coerce(
(loader_lead_path_element,) + chopped_start_path[1:]
)
return effective_path
def _init_path(
self, path, attr, wildcard_key, attr_group, raiseerr, extra_criteria
):
"""Apply ORM attributes and/or wildcard to an existing path, producing
a new path.
This method is used within the :meth:`.create` method to initialize
a :class:`._LoadElement` object.
"""
raise NotImplementedError()
def _prepare_for_compile_state(
self,
parent_loader,
compile_state,
mapper_entities,
reconciled_lead_entity,
raiseerr,
):
"""implemented by subclasses."""
raise NotImplementedError()
def process_compile_state(
self,
parent_loader,
compile_state,
mapper_entities,
reconciled_lead_entity,
raiseerr,
):
"""populate ORMCompileState.attributes with loader state for this
_LoadElement.
"""
keys = self._prepare_for_compile_state(
parent_loader,
compile_state,
mapper_entities,
reconciled_lead_entity,
raiseerr,
)
for key in keys:
if key in compile_state.attributes:
compile_state.attributes[key] = _LoadElement._reconcile(
self, compile_state.attributes[key]
)
else:
compile_state.attributes[key] = self
@classmethod
def create(
cls,
path: PathRegistry,
attr: Union[_AttrType, _StrPathToken, None],
strategy: Optional[_StrategyKey],
wildcard_key: Optional[_WildcardKeyType],
local_opts: Optional[_OptsType],
propagate_to_loaders: bool,
raiseerr: bool = True,
attr_group: Optional[_AttrGroupType] = None,
reconcile_to_other: Optional[bool] = None,
extra_criteria: Optional[Tuple[Any, ...]] = None,
) -> _LoadElement:
"""Create a new :class:`._LoadElement` object."""
opt = cls.__new__(cls)
opt.path = path
opt.strategy = strategy
opt.propagate_to_loaders = propagate_to_loaders
opt.local_opts = (
util.immutabledict(local_opts) if local_opts else util.EMPTY_DICT
)
opt._extra_criteria = ()
if reconcile_to_other is not None:
opt._reconcile_to_other = reconcile_to_other
elif strategy is None and not local_opts:
opt._reconcile_to_other = True
else:
opt._reconcile_to_other = None
path = opt._init_path(
path, attr, wildcard_key, attr_group, raiseerr, extra_criteria
)
if not path:
return None # type: ignore
assert opt.is_token_strategy == path.is_token
opt.path = path
return opt
def __init__(self) -> None:
raise NotImplementedError()
def _recurse(self) -> _LoadElement:
cloned = self._clone()
cloned.path = PathRegistry.coerce(self.path[:] + self.path[-2:])
return cloned
def _prepend_path_from(self, parent: Load) -> _LoadElement:
"""adjust the path of this :class:`._LoadElement` to be
a subpath of that of the given parent :class:`_orm.Load` object's
path.
This is used by the :meth:`_orm.Load._apply_to_parent` method,
which is in turn part of the :meth:`_orm.Load.options` method.
"""
if not any(
orm_util._entity_corresponds_to_use_path_impl(
elem,
self.path.odd_element(0),
)
for elem in (parent.path.odd_element(-1),)
+ parent.additional_source_entities
):
raise sa_exc.ArgumentError(
f'Attribute "{self.path[1]}" does not link '
f'from element "{parent.path[-1]}".'
)
return self._prepend_path(parent.path)
def _prepend_path(self, path: PathRegistry) -> _LoadElement:
cloned = self._clone()
assert cloned.strategy == self.strategy
assert cloned.local_opts == self.local_opts
assert cloned.is_class_strategy == self.is_class_strategy
cloned.path = PathRegistry.coerce(path[0:-1] + cloned.path[:])
return cloned
@staticmethod
def _reconcile(
replacement: _LoadElement, existing: _LoadElement
) -> _LoadElement:
"""define behavior for when two Load objects are to be put into
the context.attributes under the same key.
:param replacement: ``_LoadElement`` that seeks to replace the
existing one
:param existing: ``_LoadElement`` that is already present.
"""
# mapper inheritance loading requires fine-grained "block other
# options" / "allow these options to be overridden" behaviors
# see test_poly_loading.py
if replacement._reconcile_to_other:
return existing
elif replacement._reconcile_to_other is False:
return replacement
elif existing._reconcile_to_other:
return replacement
elif existing._reconcile_to_other is False:
return existing
if existing is replacement:
return replacement
elif (
existing.strategy == replacement.strategy
and existing.local_opts == replacement.local_opts
):
return replacement
elif replacement.is_opts_only:
existing = existing._clone()
existing.local_opts = existing.local_opts.union(
replacement.local_opts
)
existing._extra_criteria += replacement._extra_criteria
return existing
elif existing.is_opts_only:
replacement = replacement._clone()
replacement.local_opts = replacement.local_opts.union(
existing.local_opts
)
replacement._extra_criteria += existing._extra_criteria
return replacement
elif replacement.path.is_token:
# use 'last one wins' logic for wildcard options. this is also
# kind of inconsistent vs. options that are specific paths which
# will raise as below
return replacement
raise sa_exc.InvalidRequestError(
f"Loader strategies for {replacement.path} conflict"
)
| _LoadElement |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/utils/pythonenv.py | {
"start": 725,
"end": 3910
} | class ____(TypedDict):
"""Schema for Python environment information."""
path: str
env_type: PythonEnvType
name: str
python_version: str
# These keys are necessary to build the console banner in Spyder
ipython_version: str
sys_version: str
def add_quotes(path):
"""Return quotes if needed for spaces on path."""
quotes = '"' if ' ' in path and '"' not in path else ''
return '{quotes}{path}{quotes}'.format(quotes=quotes, path=path)
def get_conda_env_path(pyexec, quote=False):
"""
Return the full path to the conda environment from a given python
executable.
If `quote` is True, then quotes are added if spaces are found in the path.
"""
pyexec = pyexec.replace('\\', '/')
if os.name == 'nt':
conda_env = os.path.dirname(pyexec)
else:
conda_env = os.path.dirname(os.path.dirname(pyexec))
if quote:
conda_env = add_quotes(conda_env)
return conda_env
def get_pixi_manifest_path_and_env_name(pyexec, quote=False):
pyexec_path = Path(pyexec.replace("\\", "/"))
pixi_env_path = pyexec_path.parents[0 if os.name == "nt" else 1]
pixi_env_name = pixi_env_path.name
pixi_dir_path = pixi_env_path.parents[1]
pixi_manifest_path = None
pixi_manifest_paths = [
pixi_dir_path.parent / "pixi.toml",
pixi_dir_path.parent / "pyproject.toml",
pixi_dir_path.parent / "manifests" / "pixi-global.toml",
]
for manifest_path in pixi_manifest_paths:
if manifest_path.exists():
pixi_manifest_path = manifest_path
break
if not pixi_manifest_path:
raise FileNotFoundError(
"No manifest file for your pixi environment was found!"
)
if quote:
pixi_env_name = add_quotes(pixi_env_name)
return str(pixi_manifest_path), pixi_env_name
def is_conda_env(prefix=None, pyexec=None):
"""Check if prefix or python executable are in a conda environment."""
if pyexec is not None:
pyexec = pyexec.replace('\\', '/')
if (prefix is None and pyexec is None) or (prefix and pyexec):
raise ValueError('Only `prefix` or `pyexec` should be provided!')
if pyexec and prefix is None:
prefix = get_conda_env_path(pyexec).replace('\\', '/')
return os.path.exists(os.path.join(prefix, 'conda-meta'))
def is_pyenv_env(pyexec):
"""Check if a python executable is a Pyenv environment."""
path = Path(pyexec)
return "pyenv" in path.parts[:-1]
def is_pixi_env(pyexec):
"""Check if a python executable is a Pixi environment."""
path = Path(pyexec)
return ".pixi" in path.parts[:-1]
def get_env_dir(interpreter, only_dir=False):
"""Get the environment directory from the interpreter executable."""
path = Path(interpreter)
if os.name == 'nt':
# This is enough for Conda and Pyenv envs
env_dir = path.parent
# This is necessary for envs created with `python -m venv`
if env_dir.parts[-1].lower() == "scripts":
env_dir = path.parents[1]
else:
env_dir = path.parents[1]
return env_dir.parts[-1] if only_dir else str(env_dir)
| PythonEnvInfo |
python | spyder-ide__spyder | spyder/widgets/config.py | {
"start": 2483,
"end": 46022
} | class ____(SidebarPage, ConfigAccessMixin):
"""
Page that can display graphical elements connected to our config system.
"""
# Signals
apply_button_enabled = Signal(bool)
# Constants
CONF_SECTION = None
LOAD_FROM_CONFIG = True
def __init__(self, parent):
SidebarPage.__init__(self, parent)
# Callback to call before saving settings to disk
self.pre_apply_callback = None
# Callback to call after saving settings to disk
self.apply_callback = lambda: self._apply_settings_tabs(
self.changed_options
)
self.checkboxes = {}
self.radiobuttons = {}
self.lineedits = {}
self.textedits = {}
self.validate_data = {}
self.spinboxes = {}
self.comboboxes = {}
self.fontboxes = {}
self.coloredits = {}
self.scedits = {}
self.cross_section_options = {}
self.changed_options = set()
self.restart_options = dict() # Dict to store name and localized text
self.default_button_group = None
self.tabs = None
self.is_modified = False
if getattr(parent, "main", None):
self.main = parent.main
else:
self.main = None
def initialize(self):
"""Initialize configuration page."""
self.setup_page()
if self.LOAD_FROM_CONFIG:
self.load_from_conf()
def _apply_settings_tabs(self, options):
if self.tabs is not None:
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
layout = tab.layout()
for i in range(layout.count()):
widget = layout.itemAt(i).widget()
if hasattr(widget, 'apply_settings'):
if issubclass(type(widget), BaseConfigTab):
options |= widget.apply_settings()
self.apply_settings(options)
def apply_settings(self, options):
raise NotImplementedError
def apply_changes(self):
"""Apply changes callback"""
if self.is_modified:
if self.pre_apply_callback is not None:
self.pre_apply_callback()
self.save_to_conf()
if self.apply_callback is not None:
self.apply_callback()
# Since the language cannot be retrieved by CONF and the language
# is needed before loading CONF, this is an extra method needed to
# ensure that when changes are applied, they are copied to a
# specific file storing the language value. This only applies to
# the main section config.
if self.CONF_SECTION == 'main':
self._save_lang()
restart = False
for restart_option in self.restart_options:
if restart_option in self.changed_options:
restart = self.prompt_restart_required()
break # Ensure a single popup is displayed
# Don't call set_modified() when restart() is called: The
# latter triggers closing of the application. Calling the former
# afterwards may result in an error because the underlying C++ Qt
# object of 'self' may be deleted at that point.
if restart:
self.restart()
else:
self.set_modified(False)
def check_settings(self):
"""This method is called to check settings after configuration
dialog has been shown"""
pass
def set_modified(self, state):
self.is_modified = state
self.apply_button_enabled.emit(state)
if not state:
self.changed_options = set()
def is_valid(self):
"""Return True if all widget contents are valid"""
status = True
for lineedit in self.lineedits:
if lineedit in self.validate_data and lineedit.isEnabled():
validator, invalid_msg = self.validate_data[lineedit]
text = str(lineedit.text())
if not validator(text):
QMessageBox.critical(self, self.get_name(),
f"{invalid_msg}:<br><b>{text}</b>",
QMessageBox.Ok)
return False
if self.tabs is not None and status:
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
layout = tab.layout()
for i in range(layout.count()):
widget = layout.itemAt(i).widget()
if issubclass(type(widget), BaseConfigTab):
status &= widget.is_valid()
if not status:
return status
return status
def reset_widget_dicts(self):
"""Reset the dicts of widgets tracked in the page."""
self.checkboxes = {}
self.radiobuttons = {}
self.lineedits = {}
self.textedits = {}
self.validate_data = {}
self.spinboxes = {}
self.comboboxes = {}
self.fontboxes = {}
self.coloredits = {}
self.scedits = {}
self.cross_section_options = {}
def load_from_conf(self):
"""Load settings from configuration file."""
for checkbox, (sec, option, default) in list(self.checkboxes.items()):
checkbox.setChecked(self.get_option(option, default, section=sec))
checkbox.clicked[bool].connect(lambda _, opt=option, sect=sec:
self.has_been_modified(sect, opt))
if checkbox.restart_required:
if sec is None:
self.restart_options[option] = checkbox.text()
else:
self.restart_options[(sec, option)] = checkbox.text()
for radiobutton, (sec, option, default) in list(
self.radiobuttons.items()):
radiobutton.setChecked(self.get_option(option, default,
section=sec))
radiobutton.toggled.connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
if radiobutton.restart_required:
if sec is None:
self.restart_options[option] = radiobutton.label_text
else:
self.restart_options[(sec, option)] = radiobutton.label_text
for lineedit, (sec, option, default) in list(self.lineedits.items()):
data = self.get_option(
option,
default,
section=sec,
secure=True
if (hasattr(lineedit, "password") and lineedit.password)
else False,
)
if getattr(lineedit, 'content_type', None) == list:
data = ', '.join(data)
else:
# Make option value a string to prevent errors when using it
# as widget text.
# See spyder-ide/spyder#18929
data = str(data)
lineedit.setText(data)
lineedit.textChanged.connect(lambda _, opt=option, sect=sec:
self.has_been_modified(sect, opt))
if lineedit.restart_required:
if sec is None:
self.restart_options[option] = lineedit.label_text
else:
self.restart_options[(sec, option)] = lineedit.label_text
for textedit, (sec, option, default) in list(self.textedits.items()):
data = self.get_option(option, default, section=sec)
if getattr(textedit, 'content_type', None) == list:
data = ', '.join(data)
elif getattr(textedit, 'content_type', None) == dict:
data = str(data)
textedit.setPlainText(data)
textedit.textChanged.connect(lambda opt=option, sect=sec:
self.has_been_modified(sect, opt))
if textedit.restart_required:
if sec is None:
self.restart_options[option] = textedit.label_text
else:
self.restart_options[(sec, option)] = textedit.label_text
for spinbox, (sec, option, default) in list(self.spinboxes.items()):
spinbox.setValue(self.get_option(option, default, section=sec))
spinbox.valueChanged.connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
for combobox, (sec, option, default) in list(self.comboboxes.items()):
value = self.get_option(option, default, section=sec)
for index in range(combobox.count()):
data = from_qvariant(combobox.itemData(index), str)
# For PyQt API v2, it is necessary to convert `data` to
# unicode in case the original type was not a string, like an
# integer for example (see qtpy.compat.from_qvariant):
if str(data) == str(value):
break
else:
if combobox.count() == 0:
index = None
if index:
combobox.setCurrentIndex(index)
combobox.currentIndexChanged.connect(
lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
if combobox.restart_required:
if sec is None:
self.restart_options[option] = combobox.label_text
else:
self.restart_options[(sec, option)] = combobox.label_text
for (fontbox, sizebox), option in list(self.fontboxes.items()):
font = self.get_font(option)
fontbox.setCurrentFont(font)
sizebox.setValue(font.pointSize())
fontbox.currentIndexChanged.connect(
lambda _foo, opt=option: self.has_been_modified(None, opt))
sizebox.valueChanged.connect(
lambda _foo, opt=option: self.has_been_modified(None, opt))
if fontbox.restart_required:
self.restart_options[option] = fontbox.label_text
if sizebox.restart_required:
self.restart_options[option] = sizebox.label_text
for clayout, (sec, option, default) in list(self.coloredits.items()):
edit = clayout.lineedit
btn = clayout.colorbtn
edit.setText(self.get_option(option, default, section=sec))
# QAbstractButton works differently for PySide and PyQt
if not API == 'pyside':
btn.clicked.connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
else:
btn.clicked.connect(lambda opt=option, sect=sec:
self.has_been_modified(sect, opt))
edit.textChanged.connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
for (clayout, cb_bold, cb_italic
), (sec, option, default) in list(self.scedits.items()):
edit = clayout.lineedit
btn = clayout.colorbtn
options = self.get_option(option, default, section=sec)
if options:
color, bold, italic = options
edit.setText(color)
cb_bold.setChecked(bold)
cb_italic.setChecked(italic)
edit.textChanged.connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
btn.clicked[bool].connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
cb_bold.clicked[bool].connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
cb_italic.clicked[bool].connect(lambda _foo, opt=option, sect=sec:
self.has_been_modified(sect, opt))
def save_to_conf(self):
"""Save settings to configuration file"""
for checkbox, (sec, option, _default) in list(
self.checkboxes.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
value = checkbox.isChecked()
self.set_option(option, value, section=sec,
recursive_notification=False)
for radiobutton, (sec, option, _default) in list(
self.radiobuttons.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
) and option is not None:
self.set_option(option, radiobutton.isChecked(), section=sec,
recursive_notification=False)
for lineedit, (sec, option, _default) in list(self.lineedits.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
data = lineedit.text()
content_type = getattr(lineedit, 'content_type', None)
if content_type == list:
data = [item.strip() for item in data.split(',')]
else:
data = str(data)
self.set_option(
option,
data,
section=sec,
recursive_notification=False,
secure=True
if (hasattr(lineedit, "password") and lineedit.password)
else False,
)
for textedit, (sec, option, _default) in list(self.textedits.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
data = textedit.toPlainText()
content_type = getattr(textedit, 'content_type', None)
if content_type == dict:
if data:
data = ast.literal_eval(data)
else:
data = textedit.content_type()
elif content_type in (tuple, list):
data = [item.strip() for item in data.split(',')]
else:
data = str(data)
self.set_option(option, data, section=sec,
recursive_notification=False)
for spinbox, (sec, option, _default) in list(self.spinboxes.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
self.set_option(option, spinbox.value(), section=sec,
recursive_notification=False)
for combobox, (sec, option, _default) in list(self.comboboxes.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
data = combobox.itemData(combobox.currentIndex())
self.set_option(option, from_qvariant(data, str),
section=sec, recursive_notification=False)
for (fontbox, sizebox), option in list(self.fontboxes.items()):
if option in self.changed_options or not self.LOAD_FROM_CONFIG:
font = fontbox.currentFont()
font.setPointSize(sizebox.value())
self.set_font(font, option)
for clayout, (sec, option, _default) in list(self.coloredits.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
self.set_option(option,
str(clayout.lineedit.text()),
section=sec, recursive_notification=False)
for (clayout, cb_bold, cb_italic), (sec, option, _default) in list(
self.scedits.items()):
if (
option in self.changed_options
or (sec, option) in self.changed_options
or not self.LOAD_FROM_CONFIG
):
color = str(clayout.lineedit.text())
bold = cb_bold.isChecked()
italic = cb_italic.isChecked()
self.set_option(option, (color, bold, italic), section=sec,
recursive_notification=False)
@Slot(str)
def has_been_modified(self, section, option):
self.set_modified(True)
if section is None:
self.changed_options.add(option)
else:
self.changed_options.add((section, option))
def add_help_info_label(self, layout, tip_text):
help_label = TipWidget(
tip_text=tip_text,
icon=ima.icon('question_tip'),
hover_icon=ima.icon('question_tip_hover')
)
layout.addWidget(help_label)
layout.addStretch(100)
return layout, help_label
def create_checkbox(self, text, option, default=NoDefault,
tip=None, msg_warning=None, msg_info=None,
msg_if_enabled=False, section=None, restart=False):
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
checkbox = QCheckBox(text)
layout.addWidget(checkbox)
self.checkboxes[checkbox] = (section, option, default)
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
if msg_warning is not None or msg_info is not None:
def show_message(is_checked=False):
if is_checked or not msg_if_enabled:
if msg_warning is not None:
QMessageBox.warning(self, self.get_name(),
msg_warning, QMessageBox.Ok)
if msg_info is not None:
QMessageBox.information(self, self.get_name(),
msg_info, QMessageBox.Ok)
checkbox.clicked.connect(show_message)
checkbox.restart_required = restart
widget = QWidget(self)
widget.checkbox = checkbox
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
widget.help_label = help_label
widget.setLayout(layout)
return widget
def create_radiobutton(self, text, option, default=NoDefault,
tip=None, msg_warning=None, msg_info=None,
msg_if_enabled=False, button_group=None,
restart=False, section=None, id_=None):
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
radiobutton = QRadioButton(text)
layout.addWidget(radiobutton)
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
if button_group is None:
if self.default_button_group is None:
self.default_button_group = QButtonGroup(self)
button_group = self.default_button_group
if id_ is None:
button_group.addButton(radiobutton)
else:
button_group.addButton(radiobutton, id=id_)
self.radiobuttons[radiobutton] = (section, option, default)
if msg_warning is not None or msg_info is not None:
def show_message(is_checked):
if is_checked or not msg_if_enabled:
if msg_warning is not None:
QMessageBox.warning(self, self.get_name(),
msg_warning, QMessageBox.Ok)
if msg_info is not None:
QMessageBox.information(self, self.get_name(),
msg_info, QMessageBox.Ok)
radiobutton.toggled.connect(show_message)
radiobutton.restart_required = restart
radiobutton.label_text = text
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
radiobutton.help_label = help_label
widget = QWidget(self)
widget.radiobutton = radiobutton
widget.setLayout(layout)
return widget
def create_lineedit(self, text, option, default=NoDefault,
tip=None, alignment=Qt.Vertical, regex=None,
restart=False, word_wrap=True, placeholder=None,
content_type=None, section=None, status_icon=None,
password=False, validate_callback=None,
validate_reason=None):
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
label = QLabel(text)
label.setWordWrap(word_wrap)
if validate_callback:
if not validate_reason:
raise RuntimeError(
"You need to provide a validate_reason if you want to use "
"a validate_callback"
)
edit = ValidationLineEdit(
validate_callback=validate_callback,
validate_reason=validate_reason,
)
status_action = edit.error_action
else:
edit = QLineEdit()
edit.content_type = content_type
if password:
edit.setEchoMode(QLineEdit.Password)
if status_icon is not None:
status_action = QAction(self)
edit.addAction(status_action, QLineEdit.TrailingPosition)
status_action.setIcon(status_icon)
status_action.setVisible(False)
if alignment == Qt.Vertical:
layout = QVBoxLayout()
# This is necessary to correctly align `label` and `edit` to the
# left when they are displayed vertically.
edit.setStyleSheet("margin-left: 5px")
if tip is not None:
label_layout = QHBoxLayout()
label_layout.setSpacing(0)
label_layout.addWidget(label)
label_layout, help_label = self.add_help_info_label(
label_layout, tip
)
layout.addLayout(label_layout)
else:
layout.addWidget(label)
layout.addWidget(edit)
else:
layout = QHBoxLayout()
layout.addWidget(label)
layout.addWidget(edit)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
layout.setContentsMargins(0, 0, 0, 0)
if regex:
edit.setValidator(
QRegularExpressionValidator(QRegularExpression(regex))
)
if placeholder:
edit.setPlaceholderText(placeholder)
self.lineedits[edit] = (section, option, default)
widget = QWidget(self)
widget.label = label
widget.textbox = edit
if tip is not None:
widget.help_label = help_label
if status_icon is not None or validate_callback is not None:
widget.status_action = status_action
widget.setLayout(layout)
edit.restart_required = restart
edit.label_text = text
edit.password = password
return widget
def create_textedit(self, text, option, default=NoDefault,
tip=None, restart=False, content_type=None,
section=None):
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
label = QLabel(text)
label.setWordWrap(True)
edit = QPlainTextEdit()
edit.content_type = content_type
edit.setWordWrapMode(QTextOption.WordWrap)
layout = QVBoxLayout()
layout.addWidget(label)
layout.addWidget(edit)
layout.setContentsMargins(0, 0, 0, 0)
self.textedits[edit] = (section, option, default)
widget = QWidget(self)
widget.label = label
widget.textbox = edit
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
widget.help_label = help_label
widget.setLayout(layout)
edit.restart_required = restart
edit.label_text = text
return widget
def create_browsedir(self, text, option, default=NoDefault, section=None,
tip=None, alignment=Qt.Horizontal, status_icon=None):
widget = self.create_lineedit(
text,
option,
default,
section=section,
alignment=alignment,
# We need the tip to be added by the lineedit if the alignment is
# vertical. If not, it'll be added below when setting the layout.
tip=tip if (tip and alignment == Qt.Vertical) else None,
status_icon=status_icon,
)
for edit in self.lineedits:
if widget.isAncestorOf(edit):
break
msg = _("Invalid directory path")
self.validate_data[edit] = (osp.isdir, msg)
browse_btn = QPushButton(ima.icon('DirOpenIcon'), '', self)
browse_btn.setToolTip(_("Select directory"))
browse_btn.clicked.connect(lambda: self.select_directory(edit))
browse_btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
if alignment == Qt.Vertical:
button_layout = QVBoxLayout()
button_layout.setContentsMargins(0, 0, 0, 0)
button_layout.addWidget(QLabel(""))
button_layout.addWidget(browse_btn)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
layout.addLayout(button_layout)
else:
# This is necessary to position browse_btn vertically centered with
# respect to the lineedit.
browse_btn.setStyleSheet("margin-top: 2px")
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
layout.addWidget(browse_btn)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
browsedir = QWidget(self)
browsedir.textbox = widget.textbox
if status_icon:
browsedir.status_action = widget.status_action
browsedir.setLayout(layout)
return browsedir
def select_directory(self, edit):
"""Select directory"""
basedir = str(edit.text())
if not osp.isdir(basedir):
basedir = get_home_dir()
title = _("Select directory")
directory = getexistingdirectory(self, title, basedir)
if directory:
edit.setText(directory)
def create_browsefile(self, text, option, default=NoDefault, section=None,
tip=None, filters=None, alignment=Qt.Horizontal,
status_icon=None, validate_callback=None,
validate_reason=None):
widget = self.create_lineedit(
text,
option,
default,
section=section,
alignment=alignment,
# We need the tip to be added by the lineedit if the alignment is
# vertical. If not, it'll be added below when setting the layout.
tip=tip if (tip and alignment == Qt.Vertical) else None,
status_icon=status_icon,
validate_callback=validate_callback,
validate_reason=validate_reason,
)
for edit in self.lineedits:
if widget.isAncestorOf(edit):
break
msg = _('Invalid file path')
self.validate_data[edit] = (osp.isfile, msg)
browse_btn = QPushButton(ima.icon('DirOpenIcon'), '', self)
browse_btn.setToolTip(_("Select file"))
browse_btn.clicked.connect(lambda: self.select_file(edit, filters))
browse_btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
if alignment == Qt.Vertical:
button_layout = QVBoxLayout()
button_layout.setContentsMargins(0, 0, 0, 0)
button_layout.addWidget(QLabel(""))
button_layout.addWidget(browse_btn)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
layout.addLayout(button_layout)
else:
# This is necessary to position browse_btn vertically centered with
# respect to the lineedit.
browse_btn.setStyleSheet("margin-top: 2px")
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(widget)
layout.addWidget(browse_btn)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
browsefile = QWidget(self)
browsefile.textbox = widget.textbox
if status_icon:
browsefile.status_action = widget.status_action
browsefile.setLayout(layout)
return browsefile
def select_file(self, edit, filters=None, **kwargs):
"""Select File"""
basedir = osp.dirname(str(edit.text()))
if not osp.isdir(basedir):
basedir = get_home_dir()
if filters is None:
filters = _("All files (*)")
title = _("Select file")
filename, _selfilter = getopenfilename(self, title, basedir, filters,
**kwargs)
if filename:
edit.setText(filename)
edit.setFocus()
def create_spinbox(self, prefix, suffix, option, default=NoDefault,
min_=None, max_=None, step=None, tip=None,
section=None):
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
widget = QWidget(self)
if prefix:
plabel = QLabel(prefix)
widget.plabel = plabel
else:
plabel = None
if suffix:
slabel = QLabel(suffix)
widget.slabel = slabel
else:
slabel = None
if step is not None:
if type(step) is int:
spinbox = QSpinBox()
else:
spinbox = QDoubleSpinBox()
spinbox.setDecimals(1)
spinbox.setSingleStep(step)
else:
spinbox = QSpinBox()
if min_ is not None:
spinbox.setMinimum(min_)
if max_ is not None:
spinbox.setMaximum(max_)
self.spinboxes[spinbox] = (section, option, default)
layout = QHBoxLayout()
for subwidget in (plabel, spinbox, slabel):
if subwidget is not None:
layout.addWidget(subwidget)
layout.addStretch(1)
layout.setContentsMargins(0, 0, 0, 0)
widget.spinbox = spinbox
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
widget.help_label = help_label
widget.setLayout(layout)
return widget
def create_coloredit(self, text, option, default=NoDefault, tip=None,
without_layout=False, section=None):
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
label = QLabel(text)
clayout = ColorLayout(QColor(Qt.black), self)
clayout.lineedit.setMaximumWidth(80)
self.coloredits[clayout] = (section, option, default)
if without_layout:
return label, clayout
layout = QHBoxLayout()
layout.addWidget(label)
layout.addLayout(clayout)
layout.addStretch(1)
layout.setContentsMargins(0, 0, 0, 0)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
widget = QWidget(self)
widget.setLayout(layout)
return widget
def create_scedit(self, text, option, default=NoDefault, tip=None,
without_layout=False, section=None):
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
label = QLabel(text)
clayout = ColorLayout(QColor(Qt.black), self)
clayout.lineedit.setMaximumWidth(80)
cb_bold = QCheckBox()
cb_bold.setIcon(ima.icon('bold'))
cb_bold.setToolTip(_("Bold"))
cb_italic = QCheckBox()
cb_italic.setIcon(ima.icon('italic'))
cb_italic.setToolTip(_("Italic"))
self.scedits[(clayout, cb_bold, cb_italic)] = (section, option,
default)
if without_layout:
return label, clayout, cb_bold, cb_italic
layout = QHBoxLayout()
layout.addWidget(label)
layout.addLayout(clayout)
layout.addSpacing(10)
layout.addWidget(cb_bold)
layout.addWidget(cb_italic)
layout.addStretch(1)
layout.setContentsMargins(0, 0, 0, 0)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
widget = QWidget(self)
widget.setLayout(layout)
return widget
def create_combobox(self, text, choices, option, default=NoDefault,
tip=None, restart=False, section=None,
items_elide_mode=None, alignment=Qt.Horizontal):
"""choices: couples (name, key)"""
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
# Widgets
label = QLabel(text)
combobox = SpyderComboBox(items_elide_mode=items_elide_mode)
for name, key in choices:
if not (name is None and key is None):
combobox.addItem(name, to_qvariant(key))
# Insert separators
count = 0
for index, item in enumerate(choices):
name, key = item
if name is None and key is None:
combobox.insertSeparator(index + count)
count += 1
self.comboboxes[combobox] = (section, option, default)
if alignment == Qt.Vertical:
layout = QVBoxLayout()
if tip is not None:
label_layout = QHBoxLayout()
label_layout.setSpacing(0)
label_layout.addWidget(label)
label_layout, help_label = self.add_help_info_label(
label_layout, tip
)
layout.addLayout(label_layout)
else:
layout.addWidget(label)
layout.addWidget(combobox)
else:
layout = QHBoxLayout()
layout.addWidget(label)
layout.addWidget(combobox)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
layout.addStretch(1)
layout.setContentsMargins(0, 0, 0, 0)
widget = QWidget(self)
widget.label = label
widget.combobox = combobox
if tip is not None:
widget.help_label = help_label
widget.setLayout(layout)
combobox.restart_required = restart
combobox.label_text = text
return widget
def create_file_combobox(self, text, choices, option, default=NoDefault,
tip=None, restart=False, filters=None,
adjust_to_contents=False,
default_line_edit=False, section=None,
validate_callback=None):
"""choices: couples (name, key)"""
if section is not None and section != self.CONF_SECTION:
self.cross_section_options[option] = section
combobox = FileComboBox(self, adjust_to_contents=adjust_to_contents,
default_line_edit=default_line_edit)
combobox.restart_required = restart
combobox.label_text = text
edit = combobox.lineEdit()
edit.label_text = text
edit.restart_required = restart
self.lineedits[edit] = (section, option, default)
combobox.addItems(choices)
combobox.choices = choices
msg = _('Invalid file path')
self.validate_data[edit] = (
validate_callback if validate_callback else osp.isfile,
msg
)
browse_btn = QPushButton(ima.icon('DirOpenIcon'), '', self)
browse_btn.setToolTip(_("Select file"))
options = QFileDialog.DontResolveSymlinks
browse_btn.clicked.connect(
lambda: self.select_file(edit, filters, options=options)
)
browse_btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(combobox)
layout.addWidget(browse_btn)
layout.addStretch()
widget = QWidget(self)
widget.combobox = combobox
widget.browse_btn = browse_btn
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
widget.help_label = help_label
widget.setLayout(layout)
return widget
def create_fontgroup(self, option=None, text=None, title=None,
tip=None, fontfilters=None, without_group=False,
restart=False):
"""Option=None -> setting plugin font"""
if title:
fontlabel = QLabel(title)
else:
fontlabel = QLabel(_("Font"))
fontbox = SpyderFontComboBox()
fontbox.restart_required = restart
fontbox.label_text = _("{} font").format(title)
if fontfilters is not None:
fontbox.setFontFilters(fontfilters)
sizebox = QSpinBox()
sizebox.setRange(7, 100)
sizebox.restart_required = restart
sizebox.label_text = _("{} font size").format(title)
self.fontboxes[(fontbox, sizebox)] = option
layout = QHBoxLayout()
for subwidget in (fontlabel, fontbox, sizebox):
layout.addWidget(subwidget)
layout.addStretch(1)
if not without_group:
if text is None:
text = _("Font style")
group = QGroupBox(text)
group.setLayout(layout)
if tip is not None:
layout, help_label = self.add_help_info_label(layout, tip)
return group
else:
widget = QWidget(self)
widget.fontlabel = fontlabel
widget.fontbox = fontbox
widget.sizebox = sizebox
widget.setLayout(layout)
return widget
def create_button(
self,
callback,
text=None,
icon=None,
tooltip=None,
set_modified_on_click=False,
):
if icon is not None:
btn = QPushButton(icon, "", parent=self)
btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
else:
btn = QPushButton(text, parent=self)
btn.clicked.connect(callback)
if tooltip is not None:
btn.setToolTip(tooltip)
if set_modified_on_click:
btn.clicked.connect(
lambda checked=False, opt="": self.has_been_modified(
self.CONF_SECTION, opt
)
)
return btn
def create_tab(self, name, widgets):
"""
Create a tab widget page.
Parameters
----------
name: str
Name of the tab
widgets: list or QWidget
List of widgets to add to the tab. This can be also a single
widget.
Notes
-----
* Widgets are added in a vertical layout.
"""
if self.tabs is None:
self.tabs = QTabWidget(self)
self.tabs.setUsesScrollButtons(True)
self.tabs.setElideMode(Qt.ElideNone)
vlayout = QVBoxLayout()
vlayout.addWidget(self.tabs)
self.setLayout(vlayout)
if not isinstance(widgets, list):
widgets = [widgets]
tab = QWidget(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# This is necessary to make Qt respect the declared vertical spacing
# for widgets. In other words, it prevents text to be cropped when the
# total height of the page is too large.
layout.setSizeConstraint(QLayout.SetFixedSize)
for w in widgets:
# We need to set a min width so that pages are not shown too thin
# due to setting the layout size constraint above.
w.setMinimumWidth(
self.MAX_WIDTH - (60 if MAC else (80 if WIN else 70))
)
layout.addWidget(w)
layout.addStretch(1)
tab.setLayout(layout)
self.tabs.addTab(tab, name)
def prompt_restart_required(self) -> bool:
"""
Prompt the user with a request to restart.
It returns ``True`` when the request is accepted, ``False`` otherwise.
"""
message = _(
"One or more of the settings you changed requires a restart to be "
"applied.<br><br>"
"Do you wish to restart now?"
)
answer = QMessageBox.information(
self,
_("Information"),
message,
QMessageBox.Yes | QMessageBox.No
)
return answer == QMessageBox.Yes
def restart(self):
"""Restart Spyder."""
self.main.restart(close_immediately=True)
def _add_tab(self, Widget):
widget = Widget(self)
if self.tabs is None:
# In case a preference page does not have any tabs, we need to
# add a tab with the widgets that already exist and then add the
# new tab.
layout = self.layout()
main_widget = QWidget(self)
main_widget.setLayout(layout)
self.create_tab(_('General'), main_widget)
self.create_tab(Widget.TITLE, widget)
else:
self.create_tab(Widget.TITLE, widget)
self.load_from_conf()
| SpyderConfigPage |
python | MongoEngine__mongoengine | tests/fields/test_enum_field.py | {
"start": 5765,
"end": 6182
} | class ____(MongoDBTestCase):
def test_enum_incompatible_bson_type_fails_during_save(self):
class FunkyColor(Enum):
YELLOW = object()
class ModelWithFunkyColor(Document):
color = EnumField(FunkyColor)
m = ModelWithFunkyColor(color=FunkyColor.YELLOW)
with pytest.raises(InvalidDocument, match="[cC]annot encode object"):
m.save()
| TestFunkyEnumField |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/associationproxy.py | {
"start": 10921,
"end": 11096
} | class ____(Protocol[_T]):
def __call__(
self,
) -> Union[
MutableSet[_T], MutableMapping[Any, _T], MutableSequence[_T]
]: ...
| _LazyCollectionProtocol |
python | python-pillow__Pillow | src/PIL/DdsImagePlugin.py | {
"start": 903,
"end": 990
} | class ____(IntFlag):
COMPLEX = 0x8
TEXTURE = 0x1000
MIPMAP = 0x400000
| DDSCAPS |
python | ray-project__ray | python/ray/_common/formatters.py | {
"start": 3939,
"end": 4585
} | class ____(AbstractFormatter):
def __init__(self, fmt=None, datefmt=None, style="%", validate=True) -> None:
super().__init__(fmt, datefmt, style, validate)
self._inner_formatter = logging.Formatter(LOGGER_FORMAT)
def format(self, record: logging.LogRecord) -> str:
s = self._inner_formatter.format(record)
record_format_attrs = self.generate_record_format_attrs(
record, exclude_default_standard_attrs=True
)
additional_attrs = " ".join(
[f"{key}={value}" for key, value in record_format_attrs.items()]
)
return f"{s} {additional_attrs}"
| TextFormatter |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_database_backend.py | {
"start": 12703,
"end": 22890
} | class ____(RuleBasedStateMachine):
"""
This is a state machine that tests agreement of GitHubArtifactDatabase
with DirectoryBasedExampleDatabase (as a reference implementation).
"""
def __init__(self):
super().__init__()
self.temp_directory = Path(tempfile.mkdtemp())
self.path = self.temp_directory / "github-artifacts"
# This is where we will store the contents for the zip file
timestamp = datetime.now(timezone.utc).isoformat().replace(":", "_")
self.zip_destination = self.path / f"{timestamp}.zip"
# And this is where we want to create it
self.zip_content_path = self.path / timestamp
self.zip_content_path.mkdir(parents=True, exist_ok=True)
# We use a DirectoryBasedExampleDatabase to create the contents
self.directory_db = DirectoryBasedExampleDatabase(str(self.zip_content_path))
self.zip_db = GitHubArtifactDatabase("mock", "mock", path=self.path)
# Create zip file for the first time
self._archive_directory_db()
self.zip_db._initialize_db()
def _make_zip(self, tree_path: Path, zip_path: Path):
destination = zip_path.parent.absolute() / zip_path.stem
make_archive(
str(destination),
"zip",
root_dir=tree_path,
)
def _archive_directory_db(self):
# Delete all of the zip files in the directory
for file in self.path.glob("*.zip"):
file.unlink()
self._make_zip(self.zip_content_path, self.zip_destination)
keys = Bundle("keys")
values = Bundle("values")
@rule(target=keys, k=st.binary())
def k(self, k):
return k
@rule(target=values, v=st.binary())
def v(self, v):
return v
@rule(k=keys, v=values)
def save(self, k, v):
self.directory_db.save(k, v)
self._archive_directory_db()
self.zip_db = GitHubArtifactDatabase("mock", "mock", path=self.path)
self.zip_db._initialize_db()
@rule(k=keys)
def values_agree(self, k):
v1 = set(self.directory_db.fetch(k))
v2 = set(self.zip_db.fetch(k))
assert v1 == v2
def teardown(self):
shutil.rmtree(self.temp_directory)
TestGADReads = GitHubArtifactMocks.TestCase
def test_gadb_coverage():
# Ensure that we always cover the nonempty-archive case, which can otherwise
# cause rare incomplete-coverage failures.
state = GitHubArtifactMocks()
state.save(b"key", b"value")
state.values_agree(b"key")
@pytest.mark.parametrize("dirs", [[], ["subdir"]])
def test_database_directory_inaccessible(dirs, tmp_path, monkeypatch):
monkeypatch.setattr(
configuration, "__hypothesis_home_directory", tmp_path.joinpath(*dirs)
)
try:
tmp_path.chmod(0o000)
with (
nullcontext()
if WINDOWS
else pytest.warns(
HypothesisWarning, match=".*the default location is unusable"
)
):
database = _db_for_path(not_set)
database.save(b"fizz", b"buzz")
finally:
tmp_path.chmod(0o600) # So that pytest can clean up tmp_path later
@skipif_emscripten
def test_background_write_database():
db = BackgroundWriteDatabase(InMemoryExampleDatabase())
db.save(b"a", b"b")
db.save(b"a", b"c")
db.save(b"a", b"d")
assert set(db.fetch(b"a")) == {b"b", b"c", b"d"}
db.move(b"a", b"a2", b"b")
assert set(db.fetch(b"a")) == {b"c", b"d"}
assert set(db.fetch(b"a2")) == {b"b"}
db.delete(b"a", b"c")
assert set(db.fetch(b"a")) == {b"d"}
@given(lists(nodes()))
# covering examples
@example(nodes_inline(True))
@example(nodes_inline(1))
@example(nodes_inline(0.0))
@example(nodes_inline(-0.0))
@example(nodes_inline("a"))
@example(nodes_inline(b"a"))
@example(nodes_inline(b"a" * 50))
@example(nodes_inline(b"1" * 100_000)) # really long bytes
def test_nodes_roundtrips(nodes1):
s1 = choices_to_bytes([n.value for n in nodes1])
assert isinstance(s1, bytes)
ir2 = choices_from_bytes(s1)
assert len(nodes1) == len(ir2)
for n1, v2 in zip(nodes1, ir2, strict=True):
assert choice_equal(n1.value, v2)
s2 = choices_to_bytes(ir2)
assert s1 == s2
@given(st.integers(min_value=0))
def test_uleb_128_roundtrips(n1):
buffer1 = _pack_uleb128(n1)
idx, n2 = _unpack_uleb128(buffer1)
assert idx == len(buffer1)
assert n1 == n2
def _database_conforms_to_listener_api(
create_db,
*,
flush=None,
supports_value_delete=True,
parent_settings=None,
):
# this function is a big mess to support a bunch of different special cases
# for different databases, sorry. In return, we get one big stateful test
# we can use to test the listener api for all of our databases.
#
# * create_db is a callable which accepts one argument (a path to a temporary
# directory) and returns a database instance.
# * flush is a callable which takes the instantiated db as an argument, and
# is called on every step as an invariant. This lets the database do things
# like, time.sleep to give time for events to fire.
# * suports_value_delete is True if the db supports passing
# the exact value of a deleted key in "delete" events. The directory database
# notably does not support this, and passes None instead.
@settings(parent_settings, suppress_health_check=[HealthCheck.too_slow])
class TestDatabaseListener(RuleBasedStateMachine):
# this tests that if we call .delete, .save, or .move in a database, and
# that operation changes the state of the database, any registered listeners
# get called a corresponding number of times.
keys = Bundle("keys")
values = Bundle("values")
def __init__(self):
super().__init__()
self.temp_dir = Path(tempfile.mkdtemp())
self.db = create_db(self.temp_dir)
self.expected_events = []
self.actual_events = []
def listener(event):
self.actual_events.append(event)
self.listener = listener
self.active_listeners = []
self.add_listener()
def _expect_event(self, event_type, args):
for _ in range(len(self.active_listeners)):
self.expected_events.append((event_type, args))
def _expect_delete(self, k, v):
if not supports_value_delete:
v = None
self._expect_event("delete", (k, v))
def _expect_save(self, k, v):
self._expect_event("save", (k, v))
@rule(target=keys, k=st.binary())
def k(self, k):
return k
@rule(target=values, v=st.binary())
def v(self, v):
return v
@precondition(lambda self: not self.active_listeners)
@rule()
def add_listener(self):
self.db.add_listener(self.listener)
self.active_listeners.append(self.listener)
@precondition(lambda self: self.listener in self.active_listeners)
@rule()
def remove_listener(self):
self.db.remove_listener(self.listener)
self.active_listeners.remove(self.listener)
@rule()
def clear_listeners(self):
self.db.clear_listeners()
self.active_listeners.clear()
@rule(k=keys)
def fetch(self, k):
# we don't expect this to do anything, but that's the point. if this
# fires a listener call then that's bad and will fail.
self.db.fetch(k)
@rule(k=keys, v=values)
def save(self, k, v):
changed = v not in set(self.db.fetch(k))
self.db.save(k, v)
if changed:
self._expect_save(k, v)
@rule(k=keys, v=values)
def delete(self, k, v):
changed = v in set(self.db.fetch(k))
self.db.delete(k, v)
if changed:
self._expect_delete(k, v)
@rule(k1=keys, k2=keys, v=values)
def move(self, k1, k2, v):
in_k1 = v in set(self.db.fetch(k1))
save_changed = v not in set(self.db.fetch(k2))
delete_changed = k1 != k2 and in_k1
self.db.move(k1, k2, v)
# A move gets emitted as a delete followed by a save. The
# delete may be omitted if k1==k2, and the save if v in db.fetch(k2).
if delete_changed:
self._expect_delete(k1, v)
if save_changed:
self._expect_save(k2, v)
# it would be nice if this was an @rule, but that runs into race condition
# failures where an event listener is removed immediately after a
# save/delete/move operation, before the listener can fire. This is only
# relevant for DirectoryBasedExampleDatabase.
@invariant()
def events_agree(self):
if flush is not None:
flush(self.db)
wait_for(
lambda: len(self.expected_events) == len(self.actual_events), timeout=60
)
# events *generally* don't arrive out of order, but we've had
# flakes reported here, especially on weirder / older machines.
# see https://github.com/HypothesisWorks/hypothesis/issues/4274
assert Counter(self.expected_events) == Counter(self.actual_events)
def teardown(self):
shutil.rmtree(self.temp_dir)
run_state_machine_as_test(TestDatabaseListener)
def test_database_listener_memory():
_database_conforms_to_listener_api(lambda path: InMemoryExampleDatabase())
@skipif_emscripten
@pytest.mark.skipif(
settings.get_current_profile_name() == "crosshair", reason="takes ages"
)
def test_database_listener_background_write():
_database_conforms_to_listener_api(
lambda path: BackgroundWriteDatabase(InMemoryExampleDatabase()),
flush=lambda db: db._join(),
)
def test_can_remove_nonexistent_listener():
db = InMemoryExampleDatabase()
db.remove_listener(lambda event: event)
| GitHubArtifactMocks |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_transitive_extends.py | {
"start": 609,
"end": 668
} | class ____:
def foo(self, attribute):
...
| Test2_C |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 37225,
"end": 40375
} | class ____(_Loss):
r"""Measures the loss given an input tensor :math:`x` and a labels tensor :math:`y`
(containing 1 or -1).
This is usually used for measuring whether two inputs are similar or
dissimilar, e.g. using the L1 pairwise distance as :math:`x`, and is typically
used for learning nonlinear embeddings or semi-supervised learning.
The loss function for :math:`n`-th sample in the mini-batch is
.. math::
l_n = \begin{cases}
x_n, & \text{if}\; y_n = 1,\\
\max \{0, margin - x_n\}, & \text{if}\; y_n = -1,
\end{cases}
and the total loss functions is
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
where :math:`L = \{l_1,\dots,l_N\}^\top`.
Args:
margin (float, optional): Has a default value of `1`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(*)` where :math:`*` means, any number of dimensions. The sum operation
operates over all the elements.
- Target: :math:`(*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input
"""
__constants__ = ["margin", "reduction"]
margin: float
def __init__(
self,
margin: float = 1.0,
size_average=None,
reduce=None,
reduction: str = "mean",
) -> None:
super().__init__(size_average, reduce, reduction)
self.margin = margin
def forward(self, input: Tensor, target: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.hinge_embedding_loss(
input, target, margin=self.margin, reduction=self.reduction
)
| HingeEmbeddingLoss |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_task_runner.py | {
"start": 116430,
"end": 131270
} | class ____:
class _Failure(Exception):
"""Exception raised in a failed execution and received by the failure callback."""
def _execute_success(self, context):
self.results.append("execute success")
def _execute_skipped(self, context):
from airflow.sdk.exceptions import AirflowSkipException
self.results.append("execute skipped")
raise AirflowSkipException
def _execute_failure(self, context):
self.results.append("execute failure")
raise self._Failure("sorry!")
@pytest.mark.parametrize(
("execute_impl", "should_retry", "expected_state", "expected_results"),
[
pytest.param(
_execute_success,
False,
TaskInstanceState.SUCCESS,
["on-execute callback", "execute success", "on-success callback"],
id="success",
),
pytest.param(
_execute_skipped,
False,
TaskInstanceState.SKIPPED,
["on-execute callback", "execute skipped", "on-skipped callback"],
id="skipped",
),
pytest.param(
_execute_failure,
False,
TaskInstanceState.FAILED,
["on-execute callback", "execute failure", "on-failure callback"],
id="failure",
),
pytest.param(
_execute_failure,
True,
TaskInstanceState.UP_FOR_RETRY,
["on-execute callback", "execute failure", "on-retry callback"],
id="retry",
),
],
)
def test_task_runner_calls_callback(
self,
create_runtime_ti,
execute_impl,
should_retry,
expected_state,
expected_results,
):
collected_results = []
def custom_callback(context, *, kind):
collected_results.append(f"on-{kind} callback")
def failure_callback(context):
custom_callback(context, kind="failure")
assert isinstance(context["exception"], self._Failure)
class CustomOperator(BaseOperator):
results = collected_results
execute = execute_impl
task = CustomOperator(
task_id="task",
on_execute_callback=functools.partial(custom_callback, kind="execute"),
on_skipped_callback=functools.partial(custom_callback, kind="skipped"),
on_success_callback=functools.partial(custom_callback, kind="success"),
on_failure_callback=failure_callback,
on_retry_callback=functools.partial(custom_callback, kind="retry"),
)
runtime_ti = create_runtime_ti(dag_id="dag", task=task, should_retry=should_retry)
log = mock.MagicMock()
context = runtime_ti.get_template_context()
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
assert state == expected_state
assert collected_results == expected_results
@pytest.mark.parametrize(
("base_url", "expected_url"),
[
("http://localhost:8080/", "http://localhost:8080/dags/test_dag/runs/test_run/tasks/test_task"),
("http://localhost:8080", "http://localhost:8080/dags/test_dag/runs/test_run/tasks/test_task"),
(
"https://airflow.example.com/",
"https://airflow.example.com/dags/test_dag/runs/test_run/tasks/test_task",
),
(
"https://airflow.example.com",
"https://airflow.example.com/dags/test_dag/runs/test_run/tasks/test_task",
),
],
ids=["localhost_with_slash", "localhost_no_slash", "domain_with_slash", "domain_no_slash"],
)
def test_runtime_task_instance_log_url_property(self, create_runtime_ti, base_url, expected_url):
"""Test that RuntimeTaskInstance.log_url property correctly handles various base_url formats."""
task = BaseOperator(task_id="test_task")
runtime_ti = create_runtime_ti(task=task, dag_id="test_dag", run_id="test_run", try_number=0)
with conf_vars({("api", "base_url"): base_url}):
log_url = runtime_ti.log_url
assert log_url == expected_url
def test_task_runner_on_failure_callback_context(self, create_runtime_ti):
"""Test that on_failure_callback context has end_date and duration."""
def failure_callback(context):
ti = context["task_instance"]
assert isinstance(ti.end_date, datetime)
duration = (ti.end_date - ti.start_date).total_seconds()
assert duration is not None
assert duration >= 0
class FailingOperator(BaseOperator):
def execute(self, context):
raise AirflowException("Failing task")
task = FailingOperator(task_id="failing_task", on_failure_callback=failure_callback)
runtime_ti = create_runtime_ti(dag_id="dag", task=task)
log = mock.MagicMock()
context = runtime_ti.get_template_context()
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
assert state == TaskInstanceState.FAILED
def test_task_runner_on_success_callback_context(self, create_runtime_ti):
"""Test that on_success_callback context has end_date and duration."""
callback_data = {} # Store callback data for inspection
def success_callback(context):
ti = context["task_instance"]
callback_data["end_date"] = ti.end_date
callback_data["duration"] = (ti.end_date - ti.start_date).total_seconds() if ti.end_date else None
callback_data["start_date"] = ti.start_date
class SuccessOperator(BaseOperator):
def execute(self, context):
return "success"
task = SuccessOperator(task_id="success_task", on_success_callback=success_callback)
runtime_ti = create_runtime_ti(dag_id="dag", task=task)
log = mock.MagicMock()
context = runtime_ti.get_template_context()
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
assert state == TaskInstanceState.SUCCESS
# Verify callback was called and data was captured
assert "end_date" in callback_data, "Success callback should have been called"
assert isinstance(callback_data["end_date"], datetime), (
f"end_date should be datetime, got {type(callback_data['end_date'])}"
)
assert callback_data["duration"] is not None, (
f"duration should not be None, got {callback_data['duration']}"
)
assert callback_data["duration"] >= 0, f"duration should be >= 0, got {callback_data['duration']}"
def test_task_runner_both_callbacks_have_timing_info(self, create_runtime_ti):
"""Test that both success and failure callbacks receive accurate timing information."""
import time
success_data = {}
failure_data = {}
def success_callback(context):
ti = context["task_instance"]
success_data["end_date"] = ti.end_date
success_data["start_date"] = ti.start_date
success_data["duration"] = (ti.end_date - ti.start_date).total_seconds() if ti.end_date else None
def failure_callback(context):
ti = context["task_instance"]
failure_data["end_date"] = ti.end_date
failure_data["start_date"] = ti.start_date
failure_data["duration"] = (ti.end_date - ti.start_date).total_seconds() if ti.end_date else None
# Test success callback
class SuccessOperator(BaseOperator):
def execute(self, context):
time.sleep(0.01) # Add small delay to ensure measurable duration
return "success"
success_task = SuccessOperator(task_id="success_task", on_success_callback=success_callback)
success_runtime_ti = create_runtime_ti(dag_id="dag", task=success_task)
success_log = mock.MagicMock()
success_context = success_runtime_ti.get_template_context()
success_state, _, success_error = run(success_runtime_ti, success_context, success_log)
finalize(success_runtime_ti, success_state, success_context, success_log, success_error)
# Test failure callback
class FailureOperator(BaseOperator):
def execute(self, context):
time.sleep(0.01) # Add small delay to ensure measurable duration
raise AirflowException("Test failure")
failure_task = FailureOperator(task_id="failure_task", on_failure_callback=failure_callback)
failure_runtime_ti = create_runtime_ti(dag_id="dag", task=failure_task)
failure_log = mock.MagicMock()
failure_context = failure_runtime_ti.get_template_context()
failure_state, _, failure_error = run(failure_runtime_ti, failure_context, failure_log)
finalize(failure_runtime_ti, failure_state, failure_context, failure_log, failure_error)
# Assertions for success callback
assert success_state == TaskInstanceState.SUCCESS
assert "end_date" in success_data, "Success callback should have been called"
assert isinstance(success_data["end_date"], datetime)
assert isinstance(success_data["start_date"], datetime)
assert success_data["duration"] is not None
assert success_data["duration"] >= 0.01, (
f"Success duration should be >= 0.01, got {success_data['duration']}"
)
# Assertions for failure callback
assert failure_state == TaskInstanceState.FAILED
assert "end_date" in failure_data, "Failure callback should have been called"
assert isinstance(failure_data["end_date"], datetime)
assert isinstance(failure_data["start_date"], datetime)
assert failure_data["duration"] is not None
assert failure_data["duration"] >= 0.01, (
f"Failure duration should be >= 0.01, got {failure_data['duration']}"
)
@pytest.mark.parametrize(
(
"callback_to_test",
"execute_impl",
"should_retry",
"expected_state",
"expected_results",
"extra_exceptions",
),
[
pytest.param(
"on_success_callback",
_execute_success,
False,
TaskInstanceState.SUCCESS,
["on-execute 1", "on-execute 3", "execute success", "on-success 1", "on-success 3"],
[],
id="success",
),
pytest.param(
"on_skipped_callback",
_execute_skipped,
False,
TaskInstanceState.SKIPPED,
["on-execute 1", "on-execute 3", "execute skipped", "on-skipped 1", "on-skipped 3"],
[],
id="skipped",
),
pytest.param(
"on_failure_callback",
_execute_failure,
False,
TaskInstanceState.FAILED,
["on-execute 1", "on-execute 3", "execute failure", "on-failure 1", "on-failure 3"],
[(1, mock.call("Task failed with exception"))],
id="failure",
),
pytest.param(
"on_retry_callback",
_execute_failure,
True,
TaskInstanceState.UP_FOR_RETRY,
["on-execute 1", "on-execute 3", "execute failure", "on-retry 1", "on-retry 3"],
[(1, mock.call("Task failed with exception"))],
id="retry",
),
],
)
def test_task_runner_not_fail_on_failed_callback(
self,
create_runtime_ti,
callback_to_test,
execute_impl,
should_retry,
expected_state,
expected_results,
extra_exceptions,
):
collected_results = []
def custom_callback_1(context, *, kind):
collected_results.append(f"on-{kind} 1")
def custom_callback_2(context, *, kind):
raise Exception("sorry!")
def custom_callback_3(context, *, kind):
collected_results.append(f"on-{kind} 3")
class CustomOperator(BaseOperator):
results = collected_results
execute = execute_impl
task = CustomOperator(
task_id="task",
on_execute_callback=[
functools.partial(custom_callback_1, kind="execute"),
functools.partial(custom_callback_2, kind="execute"),
functools.partial(custom_callback_3, kind="execute"),
],
on_skipped_callback=[
functools.partial(custom_callback_1, kind="skipped"),
functools.partial(custom_callback_2, kind="skipped"),
functools.partial(custom_callback_3, kind="skipped"),
],
on_success_callback=[
functools.partial(custom_callback_1, kind="success"),
functools.partial(custom_callback_2, kind="success"),
functools.partial(custom_callback_3, kind="success"),
],
on_failure_callback=[
functools.partial(custom_callback_1, kind="failure"),
functools.partial(custom_callback_2, kind="failure"),
functools.partial(custom_callback_3, kind="failure"),
],
on_retry_callback=[
functools.partial(custom_callback_1, kind="retry"),
functools.partial(custom_callback_2, kind="retry"),
functools.partial(custom_callback_3, kind="retry"),
],
)
runtime_ti = create_runtime_ti(dag_id="dag", task=task, should_retry=should_retry)
log = mock.MagicMock()
context = runtime_ti.get_template_context()
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
assert state == expected_state, error
assert collected_results == expected_results
expected_exception_logs = [
mock.call("Failed to run task callback", kind="on_execute_callback", index=1, callback=mock.ANY),
mock.call("Failed to run task callback", kind=callback_to_test, index=1, callback=mock.ANY),
]
for index, calls in extra_exceptions:
expected_exception_logs.insert(index, calls)
assert log.exception.mock_calls == expected_exception_logs
| TestTaskRunnerCallsCallbacks |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/ignore_names/N804.py | {
"start": 26,
"end": 246
} | class ____:
def __init_subclass__(self, default_name, **kwargs):
...
@classmethod
def badAllowed(self, x, /, other):
...
@classmethod
def stillBad(self, x, /, other):
...
| Class |
python | ray-project__ray | rllib/examples/envs/classes/fast_image_env.py | {
"start": 88,
"end": 574
} | class ____(gym.Env):
def __init__(self, config):
self.zeros = np.zeros((84, 84, 4))
self.action_space = Discrete(2)
self.observation_space = Box(0.0, 1.0, shape=(84, 84, 4), dtype=np.float32)
self.i = 0
def reset(self, *, seed=None, options=None):
self.i = 0
return self.zeros, {}
def step(self, action):
self.i += 1
done = truncated = self.i > 1000
return self.zeros, 1, done, truncated, {}
| FastImageEnv |
python | numpy__numpy | numpy/distutils/npy_pkg_config.py | {
"start": 1857,
"end": 3943
} | class ____:
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
| LibraryInfo |
python | matplotlib__matplotlib | lib/matplotlib/sphinxext/plot_directive.py | {
"start": 12507,
"end": 17031
} | class ____(EnvironmentCollector):
def process_doc(self, app, doctree):
pass
def clear_doc(self, app, env, docname):
if docname in env.mpl_plot_image_basenames:
del env.mpl_plot_image_basenames[docname]
def merge_other(self, app, env, docnames, other):
for docname in other.mpl_plot_image_basenames:
env.mpl_plot_image_basenames[docname].update(
other.mpl_plot_image_basenames[docname])
# -----------------------------------------------------------------------------
# Doctest handling
# -----------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def _split_code_at_show(text, function_name):
"""Split code at plt.show()."""
is_doctest = contains_doctest(text)
if function_name is None:
parts = []
part = []
for line in text.split("\n"):
if ((not is_doctest and line.startswith('plt.show(')) or
(is_doctest and line.strip() == '>>> plt.show()')):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
else:
parts = [text]
return is_doctest, parts
# -----------------------------------------------------------------------------
# Template
# -----------------------------------------------------------------------------
_SOURCECODE = """
{{ source_code }}
.. only:: html
{% if src_name or (html_show_formats and not multi_image) %}
(
{%- if src_name -%}
:download:`Source code <{{ build_dir }}/{{ src_name }}>`
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if src_name or not loop.first -%}, {% endif -%}
:download:`{{ fmt }} <{{ build_dir }}/{{ img.basename }}.{{ fmt }}>`
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
"""
TEMPLATE_SRCSET = _SOURCECODE + """
{% for img in images %}
.. figure-mpl:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{% for option in options -%}
{{ option }}
{% endfor %}
{%- if caption -%}
{{ caption }} {# appropriate leading whitespace added beforehand #}
{% endif -%}
{%- if srcset -%}
:srcset: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{%- for sr in srcset -%}
, {{ build_dir }}/{{ img.basename }}.{{ sr }}.{{ default_fmt }} {{sr}}
{%- endfor -%}
{% endif %}
{% if html_show_formats and multi_image %}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
:download:`{{ fmt }} <{{ build_dir }}/{{ img.basename }}.{{ fmt }}>`
{%- endfor -%}
)
{% endif %}
{% endfor %}
.. only:: not html
{% for img in images %}
.. figure-mpl:: {{ build_dir }}/{{ img.basename }}.*
{% for option in options -%}
{{ option }}
{% endfor -%}
{{ caption }} {# appropriate leading whitespace added beforehand #}
{% endfor %}
"""
TEMPLATE = _SOURCECODE + """
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
:download:`{{ fmt }} <{{ build_dir }}/{{ img.basename }}.{{ fmt }}>`
{%- endfor -%}
)
{%- endif -%}
{{ caption }} {# appropriate leading whitespace added beforehand #}
{% endfor %}
.. only:: not html
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.*
{% for option in options -%}
{{ option }}
{% endfor -%}
{{ caption }} {# appropriate leading whitespace added beforehand #}
{% endfor %}
"""
exception_template = """
.. only:: html
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
| _FilenameCollector |
python | mlflow__mlflow | mlflow/tracing/utils/search.py | {
"start": 2493,
"end": 3010
} | class ____(NamedTuple):
"""
Represents a parsed field from a string of the form 'span_name.[inputs|outputs]' or
'span_name.[inputs|outputs].field_name'.
"""
span_name: str
field_type: Literal["inputs", "outputs"]
field_name: str | None
def __str__(self) -> str:
return (
f"{self.span_name}.{self.field_type}.{self.field_name}"
if self.field_name is not None
else f"{self.span_name}.{self.field_type}"
)
_BACKTICK = "`"
| _ParsedField |
python | sanic-org__sanic | sanic/mixins/listeners.py | {
"start": 307,
"end": 863
} | class ____(str, Enum):
def _generate_next_value_(name: str, *args) -> str: # type: ignore
return name.lower()
BEFORE_SERVER_START = "server.init.before"
AFTER_SERVER_START = "server.init.after"
BEFORE_SERVER_STOP = "server.shutdown.before"
AFTER_SERVER_STOP = "server.shutdown.after"
MAIN_PROCESS_START = auto()
MAIN_PROCESS_READY = auto()
MAIN_PROCESS_STOP = auto()
RELOAD_PROCESS_START = auto()
RELOAD_PROCESS_STOP = auto()
BEFORE_RELOAD_TRIGGER = auto()
AFTER_RELOAD_TRIGGER = auto()
| ListenerEvent |
python | sanic-org__sanic | sanic/handlers/directory.py | {
"start": 422,
"end": 3592
} | class ____:
"""Serve files from a directory.
Args:
uri (str): The URI to serve the files at.
directory (Path): The directory to serve files from.
directory_view (bool): Whether to show a directory listing or not.
index (Optional[Union[str, Sequence[str]]]): The index file(s) to
serve if the directory is requested. Defaults to None.
"""
def __init__(
self,
uri: str,
directory: Path,
directory_view: bool = False,
index: Optional[Union[str, Sequence[str]]] = None,
) -> None:
if isinstance(index, str):
index = [index]
elif index is None:
index = []
self.base = uri.strip("/")
self.directory = directory
self.directory_view = directory_view
self.index = tuple(index)
async def handle(self, request: Request, path: str):
"""Handle the request.
Args:
request (Request): The incoming request object.
path (str): The path to the file to serve.
Raises:
NotFound: If the file is not found.
IsADirectoryError: If the path is a directory and directory_view is False.
Returns:
Response: The response object.
""" # noqa: E501
current = path.strip("/")[len(self.base) :].strip("/") # noqa: E203
for file_name in self.index:
index_file = self.directory / current / file_name
if index_file.is_file():
return await file(index_file)
if self.directory_view:
return self._index(
self.directory / current, path, request.app.debug
)
if self.index:
raise NotFound("File not found")
raise IsADirectoryError(f"{self.directory.as_posix()} is a directory")
def _index(self, location: Path, path: str, debug: bool):
# Remove empty path elements, append slash
if "//" in path or not path.endswith("/"):
return redirect(
"/" + "".join([f"{p}/" for p in path.split("/") if p])
)
# Render file browser
page = DirectoryPage(self._iter_files(location), path, debug)
return html(page.render())
def _prepare_file(self, path: Path) -> dict[str, Union[int, str]]:
stat = path.stat()
modified = (
datetime.fromtimestamp(stat.st_mtime)
.isoformat()[:19]
.replace("T", " ")
)
is_dir = S_ISDIR(stat.st_mode)
icon = "📁" if is_dir else "📄"
file_name = path.name
if is_dir:
file_name += "/"
return {
"priority": is_dir * -1,
"file_name": file_name,
"icon": icon,
"file_access": modified,
"file_size": stat.st_size,
}
def _iter_files(self, location: Path) -> Iterable[FileInfo]:
prepared = [self._prepare_file(f) for f in location.iterdir()]
for item in sorted(prepared, key=itemgetter("priority", "file_name")):
del item["priority"]
yield cast(FileInfo, item)
| DirectoryHandler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.