language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0075_change_mkdocs_name.py | {
"start": 149,
"end": 1068
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0074_backport_indexes"),
]
operations = [
migrations.AlterField(
model_name="project",
name="documentation_type",
field=models.CharField(
choices=[
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
],
default="sphinx",
help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info on sphinx builders</a>.',
max_length=20,
verbose_name="Documentation type",
),
),
]
| Migration |
python | cookiecutter__cookiecutter | cookiecutter/prompt.py | {
"start": 1267,
"end": 5042
} | class ____(Confirm):
"""A prompt that returns a boolean for yes/no questions."""
yes_choices = ["1", "true", "t", "yes", "y", "on"]
no_choices = ["0", "false", "f", "no", "n", "off"]
def process_response(self, value: str) -> bool:
"""Convert choices to a bool."""
value = value.strip().lower()
if value in self.yes_choices:
return True
if value in self.no_choices:
return False
raise InvalidResponse(self.validate_error_message)
def read_user_yes_no(var_name, default_value, prompts=None, prefix: str = ""):
"""Prompt the user to reply with 'yes' or 'no' (or equivalent values).
- These input values will be converted to ``True``:
"1", "true", "t", "yes", "y", "on"
- These input values will be converted to ``False``:
"0", "false", "f", "no", "n", "off"
Actual parsing done by :func:`prompt`; Check this function codebase change in
case of unexpected behaviour.
:param str question: Question to the user
:param default_value: Value that will be returned if no input happens
"""
question = (
prompts[var_name]
if prompts and var_name in prompts and prompts[var_name]
else var_name
)
return YesNoPrompt.ask(f"{prefix}{question}", default=default_value)
def read_repo_password(question: str) -> str:
"""Prompt the user to enter a password.
:param question: Question to the user
"""
return Prompt.ask(question, password=True)
def read_user_choice(var_name: str, options: list, prompts=None, prefix: str = ""):
"""Prompt the user to choose from several options for the given variable.
The first item will be returned if no input happens.
:param var_name: Variable as specified in the context
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
if not options:
raise ValueError
choice_map = OrderedDict((f'{i}', value) for i, value in enumerate(options, 1))
choices = choice_map.keys()
question = f"Select {var_name}"
choice_lines: Iterator[str] = starmap(
" [bold magenta]{}[/] - [bold]{}[/]".format, choice_map.items()
)
# Handle if human-readable prompt is provided
if prompts and var_name in prompts:
if isinstance(prompts[var_name], str):
question = prompts[var_name]
else:
if "__prompt__" in prompts[var_name]:
question = prompts[var_name]["__prompt__"]
choice_lines = (
f" [bold magenta]{i}[/] - [bold]{prompts[var_name][p]}[/]"
if p in prompts[var_name]
else f" [bold magenta]{i}[/] - [bold]{p}[/]"
for i, p in choice_map.items()
)
prompt = '\n'.join(
(
f"{prefix}{question}",
"\n".join(choice_lines),
" Choose from",
)
)
user_choice = Prompt.ask(prompt, choices=list(choices), default=next(iter(choices)))
return choice_map[user_choice]
DEFAULT_DISPLAY = 'default'
def process_json(user_value: str):
"""Load user-supplied value as a JSON dict.
:param user_value: User-supplied value to load as a JSON dict
"""
try:
user_dict = json.loads(user_value, object_pairs_hook=OrderedDict)
except Exception as error:
# Leave it up to click to ask the user again
msg = 'Unable to decode to JSON.'
raise InvalidResponse(msg) from error
if not isinstance(user_dict, dict):
# Leave it up to click to ask the user again
msg = 'Requires JSON dict.'
raise InvalidResponse(msg)
return user_dict
| YesNoPrompt |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 17414,
"end": 20240
} | class ____(BaseField):
"""Datetime field.
Uses the python-dateutil library if available alternatively use time.strptime
to parse the dates. Note: python-dateutil's parser is fully featured and when
installed you can utilise it to convert varying types of date formats into valid
python datetime objects.
Note: To default the field to the current datetime, use: DateTimeField(default=datetime.utcnow)
Note: Microseconds are rounded to the nearest millisecond.
Pre UTC microsecond support is effectively broken.
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
need accurate microsecond support.
"""
def validate(self, value):
new_value = self.to_mongo(value)
if not isinstance(new_value, (datetime.datetime, datetime.date)):
self.error('cannot parse date "%s"' % value)
def to_mongo(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if callable(value):
return value()
if isinstance(value, str):
return self._parse_datetime(value)
else:
return None
@staticmethod
def _parse_datetime(value):
# Attempt to parse a datetime from a string
value = value.strip()
if not value:
return None
if dateutil:
try:
return dateutil.parser.parse(value)
except (TypeError, ValueError, OverflowError):
return None
# split usecs, because they are not recognized by strptime.
if "." in value:
try:
value, usecs = value.split(".")
usecs = int(usecs)
except ValueError:
return None
else:
usecs = 0
kwargs = {"microsecond": usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(
*time.strptime(value, "%Y-%m-%d %H:%M:%S")[:6], **kwargs
)
except ValueError:
try: # Try without seconds.
return datetime.datetime(
*time.strptime(value, "%Y-%m-%d %H:%M")[:5], **kwargs
)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(
*time.strptime(value, "%Y-%m-%d")[:3], **kwargs
)
except ValueError:
return None
def prepare_query_value(self, op, value):
return super().prepare_query_value(op, self.to_mongo(value))
| DateTimeField |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 95959,
"end": 96931
} | class ____(system_info):
section = 'x11'
notfounderror = X11NotFoundError
_lib_names = ['X11']
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
opt = self.get_option_single('x11_libs', 'libraries')
x11_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
| x11_info |
python | getsentry__sentry-python | sentry_sdk/integrations/argv.py | {
"start": 268,
"end": 911
} | class ____(Integration):
identifier = "argv"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def processor(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if sentry_sdk.get_client().get_integration(ArgvIntegration) is not None:
extra = event.setdefault("extra", {})
# If some event processor decided to set extra to e.g. an
# `int`, don't crash. Not here.
if isinstance(extra, dict):
extra["sys.argv"] = sys.argv
return event
| ArgvIntegration |
python | ijl__orjson | test/test_jsonchecker.py | {
"start": 1192,
"end": 6218
} | class ____:
def _run_fail_json(self, filename, exc=orjson.JSONDecodeError):
data = read_fixture_str(filename, "jsonchecker")
pytest.raises(exc, orjson.loads, data)
def _run_pass_json(self, filename, match=""):
data = read_fixture_str(filename, "jsonchecker")
assert orjson.dumps(orjson.loads(data)) == match
def test_fail01(self):
"""
fail01.json
"""
self._run_pass_json(
"fail01.json",
b'"A JSON payload should be an object or array, not a string."',
)
def test_fail02(self):
"""
fail02.json
"""
self._run_fail_json("fail02.json", orjson.JSONDecodeError) # EOF
def test_fail03(self):
"""
fail03.json
"""
self._run_fail_json("fail03.json")
def test_fail04(self):
"""
fail04.json
"""
self._run_fail_json("fail04.json")
def test_fail05(self):
"""
fail05.json
"""
self._run_fail_json("fail05.json")
def test_fail06(self):
"""
fail06.json
"""
self._run_fail_json("fail06.json")
def test_fail07(self):
"""
fail07.json
"""
self._run_fail_json("fail07.json")
def test_fail08(self):
"""
fail08.json
"""
self._run_fail_json("fail08.json")
def test_fail09(self):
"""
fail09.json
"""
self._run_fail_json("fail09.json")
def test_fail10(self):
"""
fail10.json
"""
self._run_fail_json("fail10.json")
def test_fail11(self):
"""
fail11.json
"""
self._run_fail_json("fail11.json")
def test_fail12(self):
"""
fail12.json
"""
self._run_fail_json("fail12.json")
def test_fail13(self):
"""
fail13.json
"""
self._run_fail_json("fail13.json")
def test_fail14(self):
"""
fail14.json
"""
self._run_fail_json("fail14.json")
def test_fail15(self):
"""
fail15.json
"""
self._run_fail_json("fail15.json")
def test_fail16(self):
"""
fail16.json
"""
self._run_fail_json("fail16.json")
def test_fail17(self):
"""
fail17.json
"""
self._run_fail_json("fail17.json")
def test_fail18(self):
"""
fail18.json
"""
self._run_pass_json(
"fail18.json",
b'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
)
def test_fail19(self):
"""
fail19.json
"""
self._run_fail_json("fail19.json")
def test_fail20(self):
"""
fail20.json
"""
self._run_fail_json("fail20.json")
def test_fail21(self):
"""
fail21.json
"""
self._run_fail_json("fail21.json")
def test_fail22(self):
"""
fail22.json
"""
self._run_fail_json("fail22.json")
def test_fail23(self):
"""
fail23.json
"""
self._run_fail_json("fail23.json")
def test_fail24(self):
"""
fail24.json
"""
self._run_fail_json("fail24.json")
def test_fail25(self):
"""
fail25.json
"""
self._run_fail_json("fail25.json")
def test_fail26(self):
"""
fail26.json
"""
self._run_fail_json("fail26.json")
def test_fail27(self):
"""
fail27.json
"""
self._run_fail_json("fail27.json")
def test_fail28(self):
"""
fail28.json
"""
self._run_fail_json("fail28.json")
def test_fail29(self):
"""
fail29.json
"""
self._run_fail_json("fail29.json")
def test_fail30(self):
"""
fail30.json
"""
self._run_fail_json("fail30.json")
def test_fail31(self):
"""
fail31.json
"""
self._run_fail_json("fail31.json")
def test_fail32(self):
"""
fail32.json
"""
self._run_fail_json("fail32.json", orjson.JSONDecodeError) # EOF
def test_fail33(self):
"""
fail33.json
"""
self._run_fail_json("fail33.json")
def test_pass01(self):
"""
pass01.json
"""
self._run_pass_json("pass01.json", PATTERN_1)
def test_pass02(self):
"""
pass02.json
"""
self._run_pass_json(
"pass02.json",
b'[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]',
)
def test_pass03(self):
"""
pass03.json
"""
self._run_pass_json(
"pass03.json",
b'{"JSON Test Pattern pass3":{"The outermost value":"must be '
b'an object or array.","In this test":"It is an object."}}',
)
| TestJsonChecker |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_indexing.py | {
"start": 27107,
"end": 27899
} | class ____:
def test_asof_locs_mismatched_type(self):
dti = date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("h")
mask = np.array([0, 1, 0], dtype=bool)
msg = "must be DatetimeIndex or PeriodIndex"
with pytest.raises(TypeError, match=msg):
pi.asof_locs(pd.Index(pi.asi8, dtype=np.int64), mask)
with pytest.raises(TypeError, match=msg):
pi.asof_locs(pd.Index(pi.asi8, dtype=np.float64), mask)
with pytest.raises(TypeError, match=msg):
# TimedeltaIndex
pi.asof_locs(dti - dti, mask)
msg = "Input has different freq=h"
with pytest.raises(libperiod.IncompatibleFrequency, match=msg):
pi.asof_locs(pi2, mask)
| TestAsOfLocs |
python | pytorch__pytorch | test/test_sparse.py | {
"start": 189579,
"end": 192303
} | class ____(TestCase):
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_from_cpu(self):
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device"):
torch.sparse_coo_tensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4),
[3, 4, 4])
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device"):
torch.sparse_coo_tensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0),
[3, 4, 4, 0])
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device"):
torch.sparse_coo_tensor(torch.empty(1, 0).long().cuda(),
torch.randn(0, 4, 4, 0),
[0, 4, 4, 0])
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_cuda_sparse_cpu_dense_add(self):
x = torch.zeros(3, 4, 4)
sparse_y = torch.sparse_coo_tensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4).cuda(),
[3, 4, 4])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(3, 4, 4, 0)
sparse_y = torch.sparse_coo_tensor(torch.zeros(1, 4).long().cuda(),
torch.randn(4, 4, 4, 0).cuda(),
[3, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
x = torch.zeros(0, 4, 4, 0)
sparse_y = torch.sparse_coo_tensor(torch.empty(1, 0).long().cuda(),
torch.randn(0, 4, 4, 0).cuda(),
[0, 4, 4, 0])
with self.assertRaisesRegex(RuntimeError, "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"):
x + sparse_y
def _sparse_to_dense(tensor):
if tensor.dtype != torch.bool:
return tensor.to_dense(masked_grad=True)
# to_dense uses coalesce which isn't implemented for bool
return tensor.to(torch.int8).to_dense().to(torch.bool)
_sparse_unary_ops = ops(mps_ops_modifier(sparse_unary_ufuncs, sparse=True), dtypes=OpDTypes.supported,
allowed_dtypes=all_types_and_complex())
| TestSparseOneOff |
python | apache__airflow | providers/atlassian/jira/tests/unit/atlassian/jira/operators/test_jira.py | {
"start": 1607,
"end": 4447
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, monkeypatch):
monkeypatch.setenv(
"AIRFLOW_CONN_JIRA_DEFAULT",
connection_as_json(
Connection(
conn_id="jira_default",
conn_type="jira",
host="https://localhost/jira/",
port=443,
extra='{"verify": false, "project": "AIRFLOW"}',
)
),
)
# Mock task instance for xcom_push
mock_ti = mock.Mock()
mock_ti.xcom_push = mock.Mock(return_value=None)
self.mock_ti = mock_ti
self.mock_context = {"task_instance": mock_ti}
def test_operator_init_with_optional_args(self):
jira_operator = JiraOperator(task_id="jira_list_issue_types", jira_method="issue_types")
assert jira_operator.jira_method_args == {}
assert jira_operator.result_processor is None
assert jira_operator.get_jira_resource_method is None
def test_project_issue_count(self, mocked_jira_client):
mocked_jira_client.return_value.get_project_issues_count.return_value = 10
op = JiraOperator(
task_id="get-issue-count",
jira_method="get_project_issues_count",
jira_method_args={"project": "ABC"},
)
op.execute(self.mock_context)
assert mocked_jira_client.called
assert mocked_jira_client.return_value.get_project_issues_count.called
self.mock_ti.xcom_push.assert_called_once_with(key="id", value=None)
def test_issue_search(self, mocked_jira_client):
jql_str = "issuekey=TEST-1226"
mocked_jira_client.return_value.jql_get_list_of_tickets.return_value = MINIMAL_TEST_TICKET
op = JiraOperator(
task_id="search-ticket-test",
jira_method="jql_get_list_of_tickets",
jira_method_args={"jql": jql_str, "limit": "1"},
)
op.execute(self.mock_context)
assert mocked_jira_client.called
assert mocked_jira_client.return_value.jql_get_list_of_tickets.called
self.mock_ti.xcom_push.assert_called_once_with(key="id", value="911539")
def test_update_issue(self, mocked_jira_client):
mocked_jira_client.return_value.issue_add_comment.return_value = MINIMAL_TEST_TICKET
op = JiraOperator(
task_id="add_comment_test",
jira_method="issue_add_comment",
jira_method_args={"issue_key": MINIMAL_TEST_TICKET.get("key"), "comment": "this is test comment"},
)
op.execute(self.mock_context)
assert mocked_jira_client.called
assert mocked_jira_client.return_value.issue_add_comment.called
self.mock_ti.xcom_push.assert_called_once_with(key="id", value="911539")
| TestJiraOperator |
python | huggingface__transformers | src/transformers/models/gptj/modeling_gptj.py | {
"start": 40282,
"end": 43995
} | class ____(GPTJPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPTJModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1).to(start_logits.device)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1).to(end_logits.device)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"GPTJForCausalLM",
"GPTJForQuestionAnswering",
"GPTJForSequenceClassification",
"GPTJModel",
"GPTJPreTrainedModel",
]
| GPTJForQuestionAnswering |
python | qdrant__qdrant-client | tests/congruence_tests/test_sparse_idf_search.py | {
"start": 493,
"end": 3227
} | class ____:
__test__ = False
def __init__(self):
self.query_text = generate_random_sparse_vector(sparse_text_vector_size, density=0.1)
def simple_search_text(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=True,
with_vectors=["sparse-text"],
limit=10,
).points
def test_simple_search():
fixture_points = generate_sparse_fixtures(
vectors_sizes={"sparse-text": sparse_text_vector_size},
even_sparse=False,
with_payload=False,
)
searcher = TestSimpleSparseSearcher()
local_client = init_local()
init_client(
local_client,
fixture_points,
sparse_vectors_config=sparse_vectors_idf_config,
vectors_config={},
)
assert (
local_client.get_collection(COLLECTION_NAME)
.config.params.sparse_vectors["sparse-text"]
.modifier
== models.Modifier.IDF
)
remote_client = init_remote()
init_client(
remote_client,
fixture_points,
sparse_vectors_config=sparse_vectors_idf_config,
vectors_config={},
)
compare_client_results(local_client, remote_client, searcher.simple_search_text)
local_client.update_collection(
collection_name=COLLECTION_NAME,
sparse_vectors_config={
"sparse-text": models.SparseVectorParams(
modifier=models.Modifier.NONE,
)
},
)
assert (
local_client.get_collection(COLLECTION_NAME)
.config.params.sparse_vectors["sparse-text"]
.modifier
== models.Modifier.NONE
)
def test_search_with_persistence():
import tempfile
fixture_points = generate_sparse_fixtures(
vectors_sizes={"sparse-text": sparse_text_vector_size},
even_sparse=False,
with_payload=False,
)
searcher = TestSimpleSparseSearcher()
with tempfile.TemporaryDirectory() as tmpdir:
local_client = init_local(tmpdir)
init_client(
local_client,
fixture_points,
sparse_vectors_config=sparse_vectors_idf_config,
vectors_config={},
)
del local_client
local_client_2 = init_local(tmpdir)
remote_client = init_remote()
init_client(
remote_client,
fixture_points,
sparse_vectors_config=sparse_vectors_idf_config,
vectors_config={},
)
compare_client_results(local_client_2, remote_client, searcher.simple_search_text)
| TestSimpleSparseSearcher |
python | tensorflow__tensorflow | tensorflow/python/distribute/tpu_strategy.py | {
"start": 36742,
"end": 79888
} | class ____(distribute_lib.StrategyExtendedV1):
"""Implementation of TPUStrategy."""
def __init__(
self,
container_strategy,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None,
use_spmd_for_xla_partitioning=False,
):
super().__init__(container_strategy)
if tpu_cluster_resolver is None:
tpu_cluster_resolver = tpu_cluster_resolver_lib.TPUClusterResolver("")
if steps_per_run is None:
# TODO(frankchn): Warn when we are being used by DS/Keras and this is
# not specified.
steps_per_run = 1
# `self._tpu_function_cache` is a dict of `tf.function`s, thus if a
# `tf.function` is passed into `strategy.run` in eager mode, the
# `tf.function` won't get retraced.
self._tpu_function_cache = weakref.WeakKeyDictionary()
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = self._tpu_cluster_resolver.get_tpu_system_metadata()
self._device_assignment = device_assignment
tpu_devices_flat = [
d.name for d in self._tpu_metadata.devices if "device:TPU:" in d.name]
# `self._tpu_devices` is a two-dimensional NumPy array of strings. It is
# indexed using `[replica_id][logical_device_id]`.
if device_assignment is None:
self._tpu_devices = np.array(
[[d] for d in tpu_devices_flat], dtype=object)
else:
job_name = device_spec.DeviceSpecV2.from_string(tpu_devices_flat[0]).job
tpu_devices = []
for replica_id in range(device_assignment.num_replicas):
replica_devices = []
for logical_core in range(device_assignment.num_cores_per_replica):
replica_devices.append(
device_util.canonicalize(
device_assignment.tpu_device(
replica=replica_id,
logical_core=logical_core,
job=job_name)))
tpu_devices.append(replica_devices)
self._tpu_devices = np.array(tpu_devices, dtype=object)
self._host_device = device_util.get_host_for_device(self._tpu_devices[0][0])
# Preload the data onto the TPUs. Currently we always preload onto logical
# device 0 for each replica.
# TODO(cjfj): Create `InputWorkers` lazily, allowing users to place the
# input onto a different logical device?
self._device_input_worker_devices = collections.OrderedDict()
self._host_input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices[:, 0]:
host_device = device_util.get_host_for_device(tpu_device)
self._device_input_worker_devices.setdefault(host_device, [])
self._device_input_worker_devices[host_device].append(tpu_device)
self._host_input_worker_devices.setdefault(host_device, [])
self._host_input_worker_devices[host_device].append(host_device)
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
self.experimental_enable_get_next_as_optional = True
self._logical_device_stack = [0]
if context.executing_eagerly():
# In async remote eager, we want to sync the executors before exiting the
# program.
atexit.register(context.async_wait)
# Flag to turn on VariablePolicy. Var policy is deprecated because there is
# another effort unifying DistributedVariables (see values_v2.py). SPMD XLA
# partitioning is not implemented for var policies.
# TODO(b/202048882): remove var policy from TPUStrategy.
self._use_var_policy = not use_spmd_for_xla_partitioning
# Flag to enable XLA SPMD partitioning.
self._use_spmd_for_xla_partitioning = use_spmd_for_xla_partitioning
self._using_custom_device = False
devices = self._tpu_devices[:, self._logical_device_stack[-1]]
for d in devices:
if context.is_custom_device(d):
self._using_custom_device = True
break
# This is a flag to enable data reorder which is used
# to match IteratorGetNext's device with the TPUExecute device.
self._enable_data_reorder = False
def _place_input_on_local_cpu_devices(self):
"""Place input on local CPU devices.
For example, if the tpu_devices are:
'/job:worker/replica:0/task:0/device:TPU:0',
'/job:worker/replica:0/task:1/device:TPU:0',
'/job:worker/replica:0/task:1/device:TPU:1',
'/job:worker/replica:0/task:0/device:TPU:1',
the host_input_worker_devices will be:
{
'/job:worker/replica:0/task:0/device:CPU:0': [
'/job:worker/replica:0/task:0/device:TPU:0',
],
'/job:worker/replica:0/task:1/device:CPU:0', [
'/job:worker/replica:0/task:1/device:TPU:0',
],
'/job:worker/replica:0/task:1/device:CPU:1': [
'/job:worker/replica:0/task:1/device:TPU:1',
],
'/job:worker/replica:0/task:0/device:CPU:1': [
'/job:worker/replica:0/task:0/device:TPU:1',
],
}
This will make sure that the input is placed on the corresponding host CPU
device if the device assignment is set.
"""
self._device_input_worker_devices = collections.OrderedDict()
self._host_input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices[:, 0]:
host_device = device_util.get_host_for_device(
tpu_device,
device_index=tf_device.DeviceSpec.from_string(
tpu_device
).device_index,
)
self._device_input_worker_devices.setdefault(host_device, [])
self._device_input_worker_devices[host_device].append(tpu_device)
self._host_input_worker_devices.setdefault(host_device, [])
self._host_input_worker_devices[host_device].append(host_device)
def _get_replica_order(self):
"""Get the replica order based on the tpu device order.
For example, if the tpu_devices are:
'/job:worker/replica:0/task:0/device:TPU:0',
'/job:worker/replica:0/task:0/device:TPU:2',
'/job:worker/replica:0/task:1/device:TPU:0',
'/job:worker/replica:0/task:1/device:TPU:2',
'/job:worker/replica:0/task:1/device:TPU:6',
'/job:worker/replica:0/task:1/device:TPU:4',
'/job:worker/replica:0/task:0/device:TPU:6',
'/job:worker/replica:0/task:0/device:TPU:4',
the returned replica order will be:
[0, 1, 7, 6, 2, 3, 5, 4]
This replica order will be used to reorder the data returned by the
iterators,
so that they can be placed on the same node as their computation graphs.
Returns:
A list containing the order ids of corresponding TPU devices.
"""
if not self._enable_data_reorder:
return None
tpu_devices = self._tpu_devices[:, 0]
devices_with_ids = []
for i, tpu_device in enumerate(tpu_devices):
spec = tf_device.DeviceSpec.from_string(tpu_device)
devices_with_ids.append((
(
spec.job,
spec.replica,
spec.device_type,
spec.task,
spec.device_index,
),
i,
))
return [i for _, i in sorted(devices_with_ids)]
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
input_workers = input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
return input_lib_v1.DatasetIterator(
dataset,
input_workers,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
input_workers = input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(
distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib_v1.InputFunctionIterator(input_fn, input_workers,
input_contexts,
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._host_device),
session)
def _get_input_workers(self, options):
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers(
tuple(self._device_input_worker_devices.items()))
else:
return input_lib.InputWorkers(
tuple(self._host_input_worker_devices.items()))
def _check_spec(self, element_spec):
if isinstance(element_spec, values.PerReplicaSpec):
element_spec = element_spec._component_specs # pylint: disable=protected-access
specs = nest.flatten_with_joined_string_paths(element_spec)
for path, spec in specs:
if isinstance(spec, (sparse_tensor.SparseTensorSpec,
ragged_tensor.RaggedTensorSpec)):
raise ValueError(
"Found tensor {} with spec {}. TPUStrategy does not support "
"distributed datasets with device prefetch when using sparse or "
"ragged tensors. If you intend to use sparse or ragged tensors, "
"please pass a tf.distribute.InputOptions object with "
"experimental_fetch_to_device set to False to your dataset "
"distribution function.".format(path, type(spec)))
def _experimental_distribute_dataset(self, dataset, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function`."
)
if options is None or options.experimental_fetch_to_device:
self._check_spec(dataset.element_spec)
return input_util.get_distributed_dataset(
dataset,
self._get_input_workers(options),
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
options=options,
replica_order=self._get_replica_order(),
)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
" `experimental_distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
input_workers = self._get_input_workers(options)
input_contexts = []
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
distributed_dataset = input_util.get_distributed_datasets_from_function(
dataset_fn,
input_workers,
input_contexts,
self._container_strategy(),
options=options,
replica_order=self._get_replica_order(),
)
# We can only check after the dataset_fn is called.
if options is None or options.experimental_fetch_to_device:
self._check_spec(distributed_dataset.element_spec)
return distributed_dataset
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
for replica_id in range(self._num_replicas_in_sync):
per_replica_values.append(
value_fn(distribute_lib.ValueContext(replica_id,
self._num_replicas_in_sync)))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def run_fn(inputs):
"""Single step on the TPU device."""
fn_result = fn(ctx, inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
per_replica_inputs = multi_worker_iterator.get_next()
replicate_inputs = []
for replica_id in range(self._num_replicas_in_sync):
select_replica = lambda x: distribute_utils.select_replica( # pylint: disable=g-long-lambda
replica_id, x) # pylint: disable=cell-var-from-loop
replicate_inputs.append((nest.map_structure(
select_replica, per_replica_inputs),))
replicate_outputs = tpu.replicate(
run_fn,
replicate_inputs,
device_assignment=self._device_assignment,
xla_options=tpu.XLAOptions(use_spmd_for_xla_partitioning=self
._use_spmd_for_xla_partitioning))
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the
# output type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on TPU host 0.
with ops.device(self._host_device):
if self.steps_per_run == 1:
replicate_outputs = rewrite_fn()
else:
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
_set_last_step_outputs(ctx, last_step_tensor_outputs)
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
@contextlib.contextmanager
def experimental_logical_device(self, logical_device_id):
"""Places variables and ops on the specified logical device."""
num_logical_devices_per_replica = self._tpu_devices.shape[1]
if logical_device_id >= num_logical_devices_per_replica:
raise ValueError(
"`logical_device_id` not in range (was {}, but there are only {} "
"logical devices per replica).".format(
logical_device_id, num_logical_devices_per_replica))
self._logical_device_stack.append(logical_device_id)
try:
if tpu_util.enclosing_tpu_context() is None:
yield
else:
with ops.device(tpu.core(logical_device_id)):
yield
finally:
self._logical_device_stack.pop()
def _experimental_initialize_system(self):
"""Experimental method added to be used by Estimator.
This is a private method only to be used by Estimator. Other frameworks
should directly be calling `tf.tpu.experimental.initialize_tpu_system`
"""
tpu_cluster_resolver_lib.initialize_tpu_system(self._tpu_cluster_resolver)
def _create_variable(self, next_creator, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
# TODO(bfontain): Replace all uses of skip_mirrored_creator with
# a trivial custom_tpu_variable_creator.
if kwargs.pop("skip_mirrored_creator", False):
return next_creator(**kwargs)
custom_tpu_variable_creator = kwargs.pop(
"custom_tpu_variable_creator", None
)
if custom_tpu_variable_creator is not None:
return custom_tpu_variable_creator(next_creator, **kwargs)
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
devices = self._tpu_devices[:, self._logical_device_stack[-1]]
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
devices = colocate_with._devices # pylint: disable=protected-access
num_replicas, num_cores_per_replica = self._tpu_devices.shape
def _create_mirrored_tpu_variables(**kwargs):
"""Returns a list of `tf.Variable`s.
The list contains `number_replicas` `tf.Variable`s and can be used to
initialize a `TPUMirroredVariable`.
Args:
**kwargs: the keyword arguments for creating a variable
"""
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(**kwargs)
assert not isinstance(v, tpu_values.TPUMirroredVariable)
value_list.append(v)
return value_list
def _create_mirrored_tpu_replicated_variables(**kwargs):
"""Returns a list of `TPUReplicatedVariable`s.
The list consists of `num_replicas` `TPUReplicatedVariable`s and can be
used to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable`
contains a list of `tf.Variable`s which are replicated to
`num_cores_per_replica` logical cores to enable XLA SPMD compilation.
Args:
**kwargs: the keyword arguments for creating a variable
"""
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
mirrored_replicated_var_list = []
for replica_id in range(num_replicas):
replicated_var_list = []
for logic_core_id in range(num_cores_per_replica):
with ops.device(self._tpu_devices[replica_id][logic_core_id]):
kwargs["initial_value"] = initial_value
v = next_creator(**kwargs)
replicated_var_list.append(v)
replica_name = "{}/r:{}".format(kwargs["name"], replica_id)
tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(
variables=replicated_var_list, name=replica_name)
mirrored_replicated_var_list.append(tpu_replicated_var)
return mirrored_replicated_var_list
# TODO(b/271767559): Consider either changing the innermost default_creator
# to uninitialized_variable_creator or only swapping the next_creator with
# uninitialized_variable_creator if the next_creator is the default_creator.
def uninitialized_variable_creator(**kwargs):
uninitialized_variable = tpu_util.TPUUninitializedVariable(**kwargs)
self.lazy_variable_tracker.add_uninitialized_var(
uninitialized_variable
)
setattr(uninitialized_variable, "_lazy_scope", self.lazy_variable_tracker)
return uninitialized_variable
def _create_uninitialized_mirrored_tpu_variables(**kwargs):
"""Returns a list of `tf.Variable`s.
The list contains `number_replicas` `tf.Variable`s and can be used to
initialize a `TPUMirroredVariable`.
Args:
**kwargs: the keyword arguments for creating a variable
"""
if kwargs.get("initial_value", None) is None:
return _create_mirrored_tpu_variables(**kwargs)
value_list = []
initial_value = None
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs.get("initial_value", None)
with maybe_init_scope():
if initial_value is not None:
if callable(initial_value):
initial_value = initial_value()
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None)
)
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
if kwargs.get("dtype", None) is None:
kwargs["dtype"] = kwargs["initial_value"].dtype
if kwargs.get("shape", None) is None:
kwargs["shape"] = kwargs["initial_value"].shape
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = uninitialized_variable_creator(**kwargs)
assert not isinstance(v, tpu_values.TPUMirroredVariable)
value_list.append(v)
return value_list
def _create_uninitialized_mirrored_tpu_replicated_variables(**kwargs):
"""Returns a list of `TPUReplicatedVariable`s.
The list consists of `num_replicas` `TPUReplicatedVariable`s and can be
used to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable`
contains a list of `tf.Variable`s which are replicated to
`num_cores_per_replica` logical cores to enable XLA SPMD compilation.
Args:
**kwargs: the keyword arguments for creating a variable
"""
dtype = kwargs.get("dtype", None)
shape = kwargs.get("shape", None)
initial_value = kwargs.get("initial_value", None)
if initial_value is None:
return _create_mirrored_tpu_replicated_variables(**kwargs)
with maybe_init_scope():
if initial_value is not None:
if callable(initial_value):
initial_value = initial_value()
initial_value = ops.convert_to_tensor(
initial_value, dtype=dtype
)
kwargs["initial_value"] = initial_value
if dtype is None:
kwargs["dtype"] = kwargs["initial_value"].dtype
if shape is None:
kwargs["shape"] = kwargs["initial_value"].shape
mirrored_replicated_var_list = []
for replica_id in range(num_replicas):
replicated_var_list = []
for logic_core_id in range(num_cores_per_replica):
with ops.device(self._tpu_devices[replica_id][logic_core_id]):
v = uninitialized_variable_creator(**kwargs)
replicated_var_list.append(v)
replica_name = "{}/r:{}".format(kwargs["name"], replica_id)
tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(
variables=replicated_var_list, name=replica_name
)
mirrored_replicated_var_list.append(tpu_replicated_var)
return mirrored_replicated_var_list
if not self._using_custom_device and enable_batch_variable_initialization():
if self._use_spmd_for_xla_partitioning and num_cores_per_replica > 1:
real_creator = _create_uninitialized_mirrored_tpu_replicated_variables
else:
real_creator = _create_uninitialized_mirrored_tpu_variables
kwargs["experimental_batch_initialization"] = True
else:
if self._use_spmd_for_xla_partitioning and num_cores_per_replica > 1:
real_creator = _create_mirrored_tpu_replicated_variables
else:
real_creator = _create_mirrored_tpu_variables
mirrored_variable = distribute_utils.create_mirrored_variable(
self._container_strategy(),
real_creator,
distribute_utils.TPU_VARIABLE_CLASS_MAPPING,
distribute_utils.TPU_VARIABLE_POLICY_MAPPING,
**kwargs,
)
if not self._using_custom_device and enable_batch_variable_initialization():
setattr(mirrored_variable, "_lazy_scope", self.lazy_variable_tracker)
return mirrored_variable
@property
def lazy_variable_tracker(self):
if not getattr(self, "_lazy_variable_tracker", None):
self._lazy_variable_tracker = tpu_util.LazyVariableTracker()
return self._lazy_variable_tracker
def _resource_creator_scope(self):
def lookup_creator(next_creator, *args, **kwargs):
host_to_table = collections.OrderedDict()
for host_device in self._device_input_worker_devices.keys():
with ops.device(host_device):
host_to_table[host_device] = next_creator(*args, **kwargs)
return values.PerWorkerResource(self._container_strategy(), host_to_table)
# TODO(b/194362531): Define creator(s) for other resources.
return ops.resource_creator_scope("StaticHashTable", lookup_creator)
def _gather_to_implementation(self, value, destinations, axis, options):
if not isinstance(value, values.DistributedValues):
return value
value_list = list(value.values)
# pylint: disable=protected-access
if isinstance(
value,
values.DistributedVariable) and value._packed_variable is not None:
value_list = list(
value._packed_variable.on_device(d)
for d in value._packed_variable.devices)
# pylint: enable=protected-access
# Currently XLA op by op mode has a limit for the number of inputs for a
# single op, thus we break one `add_n` op into a group of `add_n` ops to
# work around the constraint.
if len(value.values) <= _XLA_OP_BY_OP_INPUTS_LIMIT:
output = array_ops.concat(value_list, axis=axis)
else:
output = array_ops.concat(
value_list[:_XLA_OP_BY_OP_INPUTS_LIMIT], axis=axis)
for i in range(_XLA_OP_BY_OP_INPUTS_LIMIT, len(value_list),
_XLA_OP_BY_OP_INPUTS_LIMIT - 1):
output = array_ops.concat(
[output] + value_list[i:i + _XLA_OP_BY_OP_INPUTS_LIMIT - 1],
axis=axis)
output = self._broadcast_output(destinations, output)
return output
def _broadcast_output(self, destinations, output):
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
# If necessary, copy to requested destination.
dest_canonical = device_util.canonicalize(devices[0])
host_canonical = device_util.canonicalize(self._host_device)
if dest_canonical != host_canonical:
with ops.device(dest_canonical):
output = array_ops.identity(output)
else:
output = cross_device_ops_lib.simple_broadcast(output, destinations)
return output
def _reduce_to(self, reduce_op, value, destinations, options):
if (isinstance(value, values.DistributedValues) or
tensor_util.is_tf_type(value)
) and tpu_util.enclosing_tpu_context() is not None:
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
# scalar_mul maintains the type of value: tensor or IndexedSlices.
value = math_ops.scalar_mul((1./self._num_replicas_in_sync), value)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
f"`reduce_op`={reduce_op} is not supported. Currently we only "
"support ReduceOp.SUM and ReduceOp.MEAN in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
value_list = value.values
# pylint: disable=protected-access
if isinstance(
value,
values.DistributedVariable) and value._packed_variable is not None:
value_list = tuple(
value._packed_variable.on_device(d)
for d in value._packed_variable.devices)
# pylint: enable=protected-access
# Currently XLA op by op mode has a limit for the number of inputs for a
# single op, thus we break one `add_n` op into a group of `add_n` ops to
# work around the constraint.
# TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.
if len(value.values) <= _XLA_OP_BY_OP_INPUTS_LIMIT:
output = math_ops.add_n(value_list)
else:
output = array_ops.zeros_like(value_list[0], dtype=value_list[0].dtype)
for i in range(0, len(value_list), _XLA_OP_BY_OP_INPUTS_LIMIT):
output += math_ops.add_n(value_list[i:i + _XLA_OP_BY_OP_INPUTS_LIMIT])
if reduce_op == reduce_util.ReduceOp.MEAN:
output *= (1. / len(value_list))
output = self._broadcast_output(destinations, output)
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, tpu_values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
if tpu_util.enclosing_tpu_context() is not None:
if group:
return fn(var, *args, **kwargs)
else:
return (fn(var, *args, **kwargs),)
# Inside `tf.function`, we don't expand PackedVariable in python as it will
# be expanded later during function instantiation in the runtime.
packed_var = var._packed_variable # pylint: disable=protected-access
if packed_var is not None and not context.executing_eagerly():
if group:
return fn(packed_var, *args, **kwargs)
else:
return (fn(packed_var, *args, **kwargs),)
# Otherwise, we revert to MirroredStrategy behavior and update the variable
# on each replica directly.
updates = []
values_and_devices = []
if packed_var is not None:
for device in packed_var.devices:
values_and_devices.append((packed_var, device))
else:
for value in var.values:
values_and_devices.append((value, value.device))
if (var.synchronization != variables_lib.VariableSynchronization.ON_READ and
var.aggregation != variables_lib.VariableAggregation.NONE):
distribute_utils.assert_mirrored(args)
distribute_utils.assert_mirrored(kwargs)
for i, value_and_device in enumerate(values_and_devices):
value = value_and_device[0]
device = value_and_device[1]
name = "update_%d" % i
with ops.device(device), \
distribute_lib.UpdateContext(i), \
ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(
fn(value, *distribute_utils.select_replica(i, args),
**distribute_utils.select_replica(i, kwargs)))
return distribute_utils.update_regroup(self, updates, group)
def read_var(self, var):
assert isinstance(var, tpu_values.TPUVariableMixin) or isinstance(
var, resource_variable_ops.BaseResourceVariable)
return var.read_value()
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if tpu_util.enclosing_tpu_context() is not None:
broadcast_tensor = [tensor for _ in range(self._num_replicas_in_sync)]
result = tpu_ops.all_to_all(
broadcast_tensor,
concat_dimension=0,
split_dimension=0,
split_count=self._num_replicas_in_sync)
# This uses the broadcasted value from the first replica because the only
# caller of this is for ONLY_FIRST_REPLICA variables aggregation.
return result[0]
return tensor
@property
def num_hosts(self):
if self._device_assignment is None:
return self._tpu_metadata.num_hosts
return len(set([self._device_assignment.host_device(r)
for r in range(self._device_assignment.num_replicas)]))
@property
def num_replicas_per_host(self):
if self._device_assignment is None:
return self._tpu_metadata.num_of_cores_per_host
# TODO(sourabhbajaj): Remove this method we use inputs and remove infeed
# as the computation of num_replicas_per_host is not a constant
# when using device_assignment. This is a temporary workaround to support
# StatefulRNN as everything is 1 in that case.
# This method needs to take host_id as input for correct computation.
max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //
self._device_assignment.num_cores_per_replica)
return min(self._device_assignment.num_replicas, max_models_per_host)
@property
def _num_replicas_in_sync(self):
if self._device_assignment is None:
return self._tpu_metadata.num_cores
return self._device_assignment.num_replicas
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return tuple(self._tpu_devices[:, self._logical_device_stack[-1]])
@property
def parameter_devices(self):
return self.worker_devices
@property
def tpu_hardware_feature(self):
"""Return the `tf.tpu.experimental.HardwareFeature` class."""
return tpu_hardware_feature.HardwareFeature(
self._tpu_cluster_resolver.tpu_hardware_feature)
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(None):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def tpu_run(self, fn, args, kwargs, options=None):
func = self._tpu_function_creator(fn, options)
return func(args, kwargs)
def _tpu_function_creator(self, fn, options):
if context.executing_eagerly() and fn in self._tpu_function_cache:
return self._tpu_function_cache[fn]
strategy = self._container_strategy()
def tpu_function(args, kwargs):
"""TF Function used to replicate the user computation."""
logging.vlog(1,
"`TPUStrategy.run` is called with [args: %s] [kwargs: %s]",
args, kwargs)
if kwargs is None:
kwargs = {}
# Used to re-structure flattened output tensors from `tpu.replicate()`
# into a structured format.
result = [[]]
def replicated_fn(replica_id, replica_args, replica_kwargs):
"""Wraps user function to provide replica ID and `Tensor` inputs."""
with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
result[0] = fn(*replica_args, **replica_kwargs)
return result[0]
replicate_inputs = [] # By replica.
for i in range(strategy.num_replicas_in_sync):
replicate_inputs.append(
[constant_op.constant(i, dtype=dtypes.int32),
distribute_utils.select_replica(i, args),
distribute_utils.select_replica(i, kwargs)])
# Construct and pass `maximum_shapes` so that we could support dynamic
# shapes using dynamic padder.
if options.experimental_enable_dynamic_batch_size and replicate_inputs:
maximum_shapes = []
flattened_list = nest.flatten(replicate_inputs[0])
for input_tensor in flattened_list:
if tensor_util.is_tf_type(input_tensor):
rank = input_tensor.shape.rank
else:
rank = np.ndim(input_tensor)
if rank is None:
raise ValueError(
"input tensor {} to TPUStrategy.run() has unknown rank, "
"which is not allowed".format(input_tensor))
maximum_shape = tensor_shape.TensorShape([None] * rank)
maximum_shapes.append(maximum_shape)
maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
maximum_shapes)
else:
maximum_shapes = None
if options.experimental_bucketizing_dynamic_shape:
padding_spec = tpu.PaddingSpec.POWER_OF_TWO
else:
padding_spec = None
with strategy.scope():
xla_options = options.experimental_xla_options or tpu.XLAOptions(
use_spmd_for_xla_partitioning=self._use_spmd_for_xla_partitioning)
replicate_outputs = tpu.replicate(
replicated_fn,
replicate_inputs,
device_assignment=self._device_assignment,
maximum_shapes=maximum_shapes,
padding_spec=padding_spec,
xla_options=xla_options)
# Remove all no ops that may have been added during 'tpu.replicate()'
filter_ops = lambda x: [o for o in x if not isinstance(o, ops.Operation)]
if isinstance(result[0], list):
result[0] = filter_ops(result[0])
# Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
if result[0] is None or isinstance(result[0], ops.Operation):
replicate_outputs = [None] * len(replicate_outputs)
else:
replicate_outputs = [
nest.pack_sequence_as(result[0], filter_ops(nest.flatten(output)))
for output in replicate_outputs
]
return distribute_utils.regroup(replicate_outputs)
if context.executing_eagerly():
tpu_function = def_function.function(tpu_function)
self._tpu_function_cache[fn] = tpu_function
return tpu_function
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
# TPUStrategy has different distributed training structure that the whole
# cluster should be treated as single worker from higher-level (e.g. Keras)
# library's point of view.
# TODO(rchao): Revisit this as we design a fault-tolerance solution for
# TPUStrategy.
return False
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
def _make_axis_nonnegative(axis, rank):
# Convert a potentially negative `axis` to a non-negative one.
if isinstance(axis, int):
if axis >= 0:
return axis
else:
return axis + rank
else:
return array_ops.where_v2(
math_ops.greater_equal(axis, 0),
axis,
axis + rank)
# List of Tensor dtypes supported by cross_replica_sum().
_DTYPES_SUPPORTED_BY_CROSS_REPLICA_SUM = (
dtypes.bfloat16,
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint32,
)
| TPUExtended |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingTypeIs1.py | {
"start": 2294,
"end": 2475
} | class ____:
def __init__(self, x): ...
def func9[T: H](x: type[T], y: H) -> T:
if type(y) == x:
reveal_type(y, expected_text="H*")
return y
return x(y)
| H |
python | kamyu104__LeetCode-Solutions | Python/maximum-total-reward-using-operations-i.py | {
"start": 896,
"end": 1395
} | class ____(object):
def maxTotalReward(self, rewardValues):
"""
:type rewardValues: List[int]
:rtype: int
"""
mx = max(rewardValues)
dp = [False]*((mx-1)+1)
dp[0] = True
for v in sorted(set(rewardValues)):
for x in xrange(min(v, mx-v)):
dp[x+v] |= dp[x]
return mx+next(x for x in reversed(xrange(len(dp))) if dp[x])
# Time: O(nlogn + r^2), r = max(rewardValues)
# Space: O(r)
# sort, dp
| Solution3 |
python | sympy__sympy | sympy/logic/boolalg.py | {
"start": 17434,
"end": 23252
} | class ____(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, returning false immediately
when an argument is false and true if they are all true.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import And
>>> x & y
x & y
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will produce different results if
``a`` and ``b`` are integers.
>>> And(x, y).subs(x, 1)
y
"""
zero = false
identity = true
nargs = None
if TYPE_CHECKING:
def __new__(cls, *args: Boolean | bool, evaluate: bool | None = None) -> Boolean: # type: ignore
...
@property
def args(self) -> tuple[Boolean, ...]:
...
@classmethod
def _from_args(cls, args, is_commutative=None):
return super(AssocOp, cls).__new__(cls, *args)
@classmethod
def _new_args_filter(cls, args):
args = BooleanFunction.binary_check_and_simplify(*args)
args = LatticeOp._new_args_filter(args, And)
newargs = []
rel = set()
for x in ordered(args):
if x.is_Relational:
c = x.canonical
if c in rel:
continue
elif c.negated.canonical in rel:
return [false]
else:
rel.add(c)
newargs.append(x)
return newargs
def _eval_subs(self, old, new):
args = []
bad = None
for i in self.args:
try:
i = i.subs(old, new)
except TypeError:
# store TypeError
if bad is None:
bad = i
continue
if i == False:
return false
elif i != True:
args.append(i)
if bad is not None:
# let it raise
bad.subs(old, new)
# If old is And, replace the parts of the arguments with new if all
# are there
if isinstance(old, And):
old_set = set(old.args)
if old_set.issubset(args):
args = set(args) - old_set
args.add(new)
return self.func(*args)
def _eval_simplify(self, **kwargs):
from sympy.core.relational import Equality, Relational
from sympy.solvers.solveset import linear_coeffs
# standard simplify
rv = super()._eval_simplify(**kwargs)
if not isinstance(rv, And):
return rv
# simplify args that are equalities involving
# symbols so x == 0 & x == y -> x==0 & y == 0
Rel, nonRel = sift(rv.args, lambda i: isinstance(i, Relational),
binary=True)
if not Rel:
return rv
eqs, other = sift(Rel, lambda i: isinstance(i, Equality), binary=True)
measure = kwargs['measure']
if eqs:
ratio = kwargs['ratio']
reps = {}
sifted = {}
# group by length of free symbols
sifted = sift(ordered([
(i.free_symbols, i) for i in eqs]),
lambda x: len(x[0]))
eqs = []
nonlineqs = []
while 1 in sifted:
for free, e in sifted.pop(1):
x = free.pop()
if (e.lhs != x or x in e.rhs.free_symbols) and x not in reps:
try:
m, b = linear_coeffs(
Add(e.lhs, -e.rhs, evaluate=False), x)
enew = e.func(x, -b/m)
if measure(enew) <= ratio*measure(e):
e = enew
else:
eqs.append(e)
continue
except ValueError:
pass
if x in reps:
eqs.append(e.subs(x, reps[x]))
elif e.lhs == x and x not in e.rhs.free_symbols:
reps[x] = e.rhs
eqs.append(e)
else:
# x is not yet identified, but may be later
nonlineqs.append(e)
resifted = defaultdict(list)
for k in sifted:
for f, e in sifted[k]:
e = e.xreplace(reps)
f = e.free_symbols
resifted[len(f)].append((f, e))
sifted = resifted
for k in sifted:
eqs.extend([e for f, e in sifted[k]])
nonlineqs = [ei.subs(reps) for ei in nonlineqs]
other = [ei.subs(reps) for ei in other]
rv = rv.func(*([i.canonical for i in (eqs + nonlineqs + other)] + nonRel))
patterns = _simplify_patterns_and()
threeterm_patterns = _simplify_patterns_and3()
return _apply_patternbased_simplification(rv, patterns,
measure, false,
threeterm_patterns=threeterm_patterns)
def _eval_as_set(self):
from sympy.sets.sets import Intersection
return Intersection(*[arg.as_set() for arg in self.args])
def _eval_rewrite_as_Nor(self, *args, **kwargs):
return Nor(*[Not(arg) for arg in self.args])
def to_anf(self, deep=True):
if deep:
result = And._to_anf(*self.args, deep=deep)
return distribute_xor_over_and(result)
return self
| And |
python | pytorch__pytorch | test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_device.py | {
"start": 156,
"end": 1290
} | class ____(TestCase):
def test_device_count(self):
count = torch.accelerator.device_count()
self.assertEqual(count, 2)
def test_device_switch(self):
torch.accelerator.set_device_index(1)
self.assertEqual(torch.accelerator.current_device_index(), 1)
torch.accelerator.set_device_index(0)
self.assertEqual(torch.accelerator.current_device_index(), 0)
def test_device_context(self):
device = torch.accelerator.current_device_index()
with torch.accelerator.device_index(None):
self.assertEqual(torch.accelerator.current_device_index(), device)
self.assertEqual(torch.accelerator.current_device_index(), device)
with torch.accelerator.device_index(1):
self.assertEqual(torch.accelerator.current_device_index(), 1)
self.assertEqual(torch.accelerator.current_device_index(), device)
def test_invalid_device_index(self):
with self.assertRaisesRegex(RuntimeError, "The device index is out of range"):
torch.accelerator.set_device_index(2)
if __name__ == "__main__":
run_tests()
| TestDevice |
python | sqlalchemy__sqlalchemy | examples/asyncio/async_orm_writeonly.py | {
"start": 1099,
"end": 2906
} | class ____(Base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
data: Mapped[Optional[str]]
async def async_main():
"""Main program function."""
engine = create_async_engine(
"postgresql+asyncpg://scott:tiger@localhost/test",
echo=True,
)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async_session = async_sessionmaker(engine, expire_on_commit=False)
async with async_session() as session:
async with session.begin():
# WriteOnlyMapped may be populated using any iterable,
# e.g. lists, sets, etc.
session.add_all(
[
A(bs=[B(), B()], data="a1"),
A(bs=[B()], data="a2"),
A(bs=[B(), B()], data="a3"),
]
)
stmt = select(A)
result = await session.scalars(stmt)
for a1 in result:
print(a1)
print(f"created at: {a1.create_date}")
# to iterate a collection, emit a SELECT statement
for b1 in await session.scalars(a1.bs.select()):
print(b1)
result = await session.stream(stmt)
async for a1 in result.scalars():
print(a1)
# similar using "streaming" (server side cursors)
async for b1 in (await session.stream(a1.bs.select())).scalars():
print(b1)
await session.commit()
result = await session.scalars(select(A).order_by(A.id))
a1 = result.first()
a1.data = "new data"
asyncio.run(async_main())
| B |
python | tiangolo__fastapi | fastapi/params.py | {
"start": 7879,
"end": 10929
} | class ____(Param): # type: ignore[misc]
in_ = ParamTypes.query
def __init__(
self,
default: Any = Undefined,
*,
default_factory: Union[Callable[[], Any], None] = _Unset,
annotation: Optional[Any] = None,
alias: Optional[str] = None,
alias_priority: Union[int, None] = _Unset,
# TODO: update when deprecating Pydantic v1, import these types
# validation_alias: str | AliasPath | AliasChoices | None
validation_alias: Union[str, None] = None,
serialization_alias: Union[str, None] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
pattern: Optional[str] = None,
regex: Annotated[
Optional[str],
deprecated(
"Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
),
] = None,
discriminator: Union[str, None] = None,
strict: Union[bool, None] = _Unset,
multiple_of: Union[float, None] = _Unset,
allow_inf_nan: Union[bool, None] = _Unset,
max_digits: Union[int, None] = _Unset,
decimal_places: Union[int, None] = _Unset,
examples: Optional[List[Any]] = None,
example: Annotated[
Optional[Any],
deprecated(
"Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
"although still supported. Use examples instead."
),
] = _Unset,
openapi_examples: Optional[Dict[str, Example]] = None,
deprecated: Union[deprecated, str, bool, None] = None,
include_in_schema: bool = True,
json_schema_extra: Union[Dict[str, Any], None] = None,
**extra: Any,
):
super().__init__(
default=default,
default_factory=default_factory,
annotation=annotation,
alias=alias,
alias_priority=alias_priority,
validation_alias=validation_alias,
serialization_alias=serialization_alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
pattern=pattern,
regex=regex,
discriminator=discriminator,
strict=strict,
multiple_of=multiple_of,
allow_inf_nan=allow_inf_nan,
max_digits=max_digits,
decimal_places=decimal_places,
deprecated=deprecated,
example=example,
examples=examples,
openapi_examples=openapi_examples,
include_in_schema=include_in_schema,
json_schema_extra=json_schema_extra,
**extra,
)
| Query |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP050.py | {
"start": 95,
"end": 142
} | class ____(
metaclass=type
#
):
...
| A |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 1317,
"end": 3956
} | class ____(
NamedTuple(
"_ExecutionPlanSnapshotArgs",
[
("job_origin", RemoteJobOrigin),
("op_selection", Sequence[str]),
("run_config", Mapping[str, object]),
("step_keys_to_execute", Optional[Sequence[str]]),
("job_snapshot_id", str),
("known_state", Optional[KnownExecutionState]),
("instance_ref", Optional[InstanceRef]),
("asset_selection", Optional[AbstractSet[AssetKey]]),
("asset_check_selection", Optional[AbstractSet[AssetCheckKey]]),
("mode", str),
],
)
):
def __new__(
cls,
job_origin: RemoteJobOrigin,
op_selection: Sequence[str],
run_config: Mapping[str, object],
step_keys_to_execute: Optional[Sequence[str]],
job_snapshot_id: str,
known_state: Optional[KnownExecutionState] = None,
instance_ref: Optional[InstanceRef] = None,
asset_selection: Optional[AbstractSet[AssetKey]] = None,
asset_check_selection: Optional[AbstractSet[AssetCheckKey]] = None,
mode: str = DEFAULT_MODE_NAME,
):
return super().__new__(
cls,
job_origin=check.inst_param(job_origin, "job_origin", RemoteJobOrigin),
op_selection=check.opt_sequence_param(op_selection, "op_selection", of_type=str),
run_config=check.mapping_param(run_config, "run_config", key_type=str),
mode=check.str_param(mode, "mode"),
step_keys_to_execute=check.opt_nullable_sequence_param(
step_keys_to_execute, "step_keys_to_execute", of_type=str
),
job_snapshot_id=check.str_param(job_snapshot_id, "job_snapshot_id"),
known_state=check.opt_inst_param(known_state, "known_state", KnownExecutionState),
instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef),
asset_selection=check.opt_nullable_set_param(
asset_selection, "asset_selection", of_type=AssetKey
),
asset_check_selection=check.opt_nullable_set_param(
asset_check_selection, "asset_check_selection", of_type=AssetCheckKey
),
)
def _get_entry_point(origin: JobPythonOrigin):
return (
origin.repository_origin.entry_point
if origin.repository_origin.entry_point
else get_python_environment_entry_point(origin.executable_path)
)
@whitelist_for_serdes(
storage_field_names={
"job_origin": "pipeline_origin",
"run_id": "pipeline_run_id",
}
)
| ExecutionPlanSnapshotArgs |
python | scipy__scipy | scipy/stats/tests/test_sampling.py | {
"start": 23280,
"end": 27825
} | class ____:
# DAU fails on these probably because of large domains and small
# computation errors in PMF. Mean/SD match but chi-squared test fails.
basic_fail_dists = {
'nchypergeom_fisher', # numerical errors on tails
'nchypergeom_wallenius', # numerical errors on tails
'randint' # fails on 32-bit ubuntu
}
@pytest.mark.parametrize("distname, params", distdiscrete)
def test_basic(self, distname, params):
if distname in self.basic_fail_dists:
msg = ("DAU fails on these probably because of large domains "
"and small computation errors in PMF.")
pytest.skip(msg)
if not isinstance(distname, str):
dist = distname
else:
dist = getattr(stats, distname)
dist = dist(*params)
domain = dist.support()
if not np.isfinite(domain[1] - domain[0]):
# DAU only works with finite domain. So, skip the distributions
# with infinite tails.
pytest.skip("DAU only works with a finite domain.")
k = np.arange(domain[0], domain[1]+1)
pv = dist.pmf(k)
mv_ex = dist.stats('mv')
rng = DiscreteAliasUrn(dist, random_state=42)
check_discr_samples(rng, pv, mv_ex)
# Can't use bad_pmf_common here as we evaluate PMF early on to avoid
# unhelpful errors from UNU.RAN.
bad_pmf = [
# inf returned
(lambda x: np.inf, ValueError,
r"must contain only finite / non-nan values"),
# nan returned
(lambda x: np.nan, ValueError,
r"must contain only finite / non-nan values"),
# all zeros
(lambda x: 0.0, ValueError,
r"must contain at least one non-zero value"),
# Undefined name inside the function
(lambda x: foo, NameError, # type: ignore[name-defined] # noqa: F821
r"name 'foo' is not defined"),
# Returning wrong type.
(lambda x: [], ValueError,
r"setting an array element with a sequence."),
# probabilities < 0
(lambda x: -x, UNURANError,
r"50 : probability < 0"),
# signature of PMF wrong
(lambda: 1.0, TypeError,
r"takes 0 positional arguments but 1 was given")
]
@pytest.mark.parametrize("pmf, err, msg", bad_pmf)
def test_bad_pmf(self, pmf, err, msg):
class dist:
pass
dist.pmf = pmf
with pytest.raises(err, match=msg):
DiscreteAliasUrn(dist, domain=(1, 10))
@pytest.mark.parametrize("pv", [[0.18, 0.02, 0.8],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]])
def test_sampling_with_pv(self, pv):
pv = np.asarray(pv, dtype=np.float64)
rng = DiscreteAliasUrn(pv, random_state=123)
rng.rvs(100_000)
pv = pv / pv.sum()
variates = np.arange(0, len(pv))
# test if the first few moments match
m_expected = np.average(variates, weights=pv)
v_expected = np.average((variates - m_expected) ** 2, weights=pv)
mv_expected = m_expected, v_expected
check_discr_samples(rng, pv, mv_expected)
@pytest.mark.parametrize("pv, msg", bad_pv_common)
def test_bad_pv(self, pv, msg):
with pytest.raises(ValueError, match=msg):
DiscreteAliasUrn(pv)
# DAU doesn't support infinite tails. So, it should throw an error when
# inf is present in the domain.
inf_domain = [(-np.inf, np.inf), (np.inf, np.inf), (-np.inf, -np.inf),
(0, np.inf), (-np.inf, 0)]
@pytest.mark.parametrize("domain", inf_domain)
def test_inf_domain(self, domain):
with pytest.raises(ValueError, match=r"must be finite"):
DiscreteAliasUrn(stats.binom(10, 0.2), domain=domain)
def test_bad_urn_factor(self):
with pytest.warns(RuntimeWarning, match=r"relative urn size < 1."):
DiscreteAliasUrn([0.5, 0.5], urn_factor=-1)
def test_bad_args(self):
msg = (r"`domain` must be provided when the "
r"probability vector is not available.")
class dist:
def pmf(self, x):
return x
with pytest.raises(ValueError, match=msg):
DiscreteAliasUrn(dist)
def test_gh19359(self):
pv = special.softmax(np.ones((1533,)))
rng = DiscreteAliasUrn(pv, random_state=42)
# check the correctness
check_discr_samples(rng, pv, (1532 / 2, (1532**2 - 1) / 12),
rtol=5e-3)
| TestDiscreteAliasUrn |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/concrete_function_test.py | {
"start": 1370,
"end": 5501
} | class ____(test.TestCase, parameterized.TestCase):
def concrete_function_with_attrs(self, attrs):
func_graph = func_graph_module.FuncGraph("f")
return cf.ConcreteFunction.from_func_graph(func_graph, None, attrs=attrs)
@parameterized.parameters(
({"api_implements": True}, attr_value_pb2.AttrValue(b=True)),
({"api_implements": 1}, attr_value_pb2.AttrValue(i=1)),
({"api_implements": 1.0}, attr_value_pb2.AttrValue(f=1.0)),
(
{"api_implements": "test"},
attr_value_pb2.AttrValue(s=compat.as_bytes("test")),
),
)
def test_parses_func_attr_scalar_values(self, attrs, expected):
self.assertEqual(
self.concrete_function_with_attrs(attrs=attrs).function_def.attr[
"api_implements"
],
expected,
)
def test_parses_func_attr_list_values(self):
self.assertProtoEquals(
r"""
list {
s: 'test'
b: True
i: 1
f: 1.0
}
""",
self.concrete_function_with_attrs(
attrs={"api_implements": ["test", True, 1, 1.0]}
).function_def.attr["api_implements"],
)
def test_raises_value_error_for_invalid_attr(self):
with self.assertRaisesRegex(ValueError, "Attribute api_implements must be"):
self.concrete_function_with_attrs(attrs={"api_implements": None})
def test_generate_from_atomic(self):
@polymorphic_function.function
def add_dicts(dict_a, dict_b):
result = {}
for key in dict_a.keys():
result[key] = dict_a[key] + dict_b[key]
return result
dict_a = {
"tensor": constant_op.constant(1),
"variable": variables.Variable(2),
"ragged_tensor": ragged_tensor.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]
),
"python_int": 4,
}
dict_b = {
"tensor": constant_op.constant(2),
"variable": variables.Variable(5),
"ragged_tensor": ragged_tensor.RaggedTensor.from_row_splits(
values=[4, 2, 4, 1, 6, 9, 3, 6], row_splits=[0, 4, 4, 7, 8, 8]
),
"python_int": 5,
}
original_concrete_fn = add_dicts.get_concrete_function(dict_a, dict_b)
# Get the atomic function and delete everything else.
atomic_fn = original_concrete_fn._inference_function
del add_dicts
del original_concrete_fn
# Regenerate the ConcreteFunction.
concrete_fn = cf.ConcreteFunction(atomic_fn)
result = concrete_fn(dict_a, dict_b)
# Call and check results.
self.assertEqual(result["tensor"].numpy(), 3)
self.assertEqual(result["variable"].numpy(), 7)
self.assertEqual(
result["ragged_tensor"].flat_values.numpy().tolist(),
[7, 3, 8, 2, 11, 18, 5, 12],
)
self.assertEqual(result["python_int"].numpy(), 9)
def test_generate_from_def(self):
@polymorphic_function.function
def add_dicts(dict_a, dict_b):
result = {}
for key in dict_a.keys():
result[key] = dict_a[key] + dict_b[key]
return result
dict_a = {
"tensor": constant_op.constant(1),
"variable": variables.Variable(2),
"python_int": 4,
}
dict_b = {
"tensor": constant_op.constant(2),
"variable": variables.Variable(5),
"python_int": 5,
}
original_concrete_fn = add_dicts.get_concrete_function(dict_a, dict_b)
# Get FunctionDef + FunctionType and delete everything else.
function_def = original_concrete_fn.function_def
function_type = original_concrete_fn.function_type
del add_dicts
del original_concrete_fn
# Regenerate the ConcreteFunction.
atomic_fn = atomic_function.from_function_def(function_def, function_type)
concrete_fn = cf.ConcreteFunction(atomic_fn)
result = concrete_fn(dict_a, dict_b)
# Call and check results.
self.assertEqual(result["tensor"].numpy(), 3)
self.assertEqual(result["variable"].numpy(), 7)
self.assertEqual(result["python_int"].numpy(), 9)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| ConcreteFunctionTest |
python | great-expectations__great_expectations | great_expectations/render/renderer/microsoft_teams_renderer.py | {
"start": 616,
"end": 8518
} | class ____(Renderer):
"""
Responsible for formatting validation results and data docs links into a Microsoft Teams webhook
message payload.
Relevant links/documentation:
* Payload schema: https://adaptivecards.io/explorer/
* Interactive UI editor: https://adaptivecards.io/designer/
"""
_MICROSOFT_TEAMS_CONTENT_TYPE = "application/vnd.microsoft.card.adaptive"
_MICROSOFT_TEAMS_SCHEMA_URL = "http://adaptivecards.io/schemas/adaptive-card.json"
_MICROSOFT_TEAMS_SCHEMA_VERSION = "1.5"
_GX_LOGO_URL = "https://www.greatexpectations.io/image/gx-logo-mark-400"
_SUCCESS_EMOJI = "✅"
_FAILURE_EMOJI = "❌"
_NO_VALUE_PLACEHOLDER = "--"
@override
def render(
self,
checkpoint_result: CheckpointResult,
data_docs_pages: dict[ValidationResultIdentifier, dict[str, str]] | None = None,
) -> dict:
blocks: list[dict] = []
blocks.append(
self._build_header_block(
checkpoint_name=checkpoint_result.name,
success=checkpoint_result.success or False,
run_time=checkpoint_result.run_id.run_time,
)
)
for idx, (validation_result_identifier, validation_result) in enumerate(
checkpoint_result.run_results.items(), start=1
):
validation_blocks = self._build_validation_result_blocks(
idx=idx,
total_validation_count=len(checkpoint_result.run_results),
validation_result_identifier=validation_result_identifier,
validation_result=validation_result,
)
blocks.extend(validation_blocks)
return self._build_payload(blocks=blocks, data_docs_pages=data_docs_pages)
def _build_header_block(
self, checkpoint_name: str, success: bool, run_time: dt.datetime
) -> dict:
success_text = (
f"Success {self._SUCCESS_EMOJI}" if success else f"Failure {self._FAILURE_EMOJI}"
)
return {
"type": "ColumnSet",
"columns": [
{
"type": "Column",
"items": [
{
"type": "Image",
"url": self._GX_LOGO_URL,
"altText": checkpoint_name,
"size": "small",
}
],
"width": "auto",
},
{
"type": "Column",
"items": [
{
"type": "TextBlock",
"weight": "bolder",
"text": f"{checkpoint_name} - {success_text}",
"wrap": True,
},
{
"type": "TextBlock",
"spacing": "None",
"text": f"Ran {run_time}",
"isSubtle": True,
"wrap": True,
},
],
"width": "stretch",
},
],
}
def _build_validation_result_blocks(
self,
idx: int,
total_validation_count: int,
validation_result_identifier: ValidationResultIdentifier,
validation_result: ExpectationSuiteValidationResult,
) -> list[dict]:
success = validation_result.success
success_text = self._SUCCESS_EMOJI if success else self._FAILURE_EMOJI
color = "Good" if success else "Attention"
blocks = [
{
"type": "TextBlock",
"text": f"Validation Result ({idx} of {total_validation_count}) {success_text}",
"wrap": True,
"weight": "Bolder",
"style": "columnHeader",
"isSubtle": True,
"color": color,
},
{
"type": "FactSet",
"facts": self._build_validation_result_facts(
validation_result_identifier=validation_result_identifier,
validation_result=validation_result,
),
"separator": True,
},
]
if validation_result.result_url:
blocks.append(
{
"type": "ActionSet",
"actions": [
{
"type": "Action.OpenUrl",
"title": "View Result",
"url": validation_result.result_url,
}
],
}
)
return blocks
def _build_validation_result_facts(
self,
validation_result_identifier: ValidationResultIdentifier,
validation_result: ExpectationSuiteValidationResult,
) -> list[dict]:
asset_name = validation_result.asset_name or self._NO_VALUE_PLACEHOLDER
suite_name = validation_result.suite_name
run_name = validation_result_identifier.run_id.run_name or self._NO_VALUE_PLACEHOLDER
n_checks_succeeded = validation_result.statistics["successful_expectations"]
n_checks = validation_result.statistics["evaluated_expectations"]
check_details_text = f"*{n_checks_succeeded}* of *{n_checks}* Expectations were met"
return [
{"title": "Data Asset name: ", "value": asset_name},
{"title": "Suite name: ", "value": suite_name},
{"title": "Run name: ", "value": run_name},
{"title": "Summary:", "value": check_details_text},
]
def _get_data_docs_page_links(
self, data_docs_pages: dict[ValidationResultIdentifier, dict[str, str]] | None
) -> list[str]:
links: list[str] = []
if not data_docs_pages:
return links
for data_docs_page in data_docs_pages.values():
for docs_link_key, docs_link in data_docs_page.items():
if docs_link_key == "class":
continue
links.append(docs_link)
return links
def _build_payload(
self,
blocks: list[dict],
data_docs_pages: dict[ValidationResultIdentifier, dict[str, str]] | None,
) -> dict:
data_docs_page_links = self._get_data_docs_page_links(data_docs_pages)
actions = [
# We would normally use Action.OpenUrl here, but Teams does not support
# non HTTP/HTTPS URI schemes. As Data Docs utilze file:///, we use Action.ShowCard
# to display the link in a card.
{
"type": "Action.ShowCard",
"title": "View Data Docs URL",
"card": {
"type": "AdaptiveCard",
"body": [
{
"type": "TextBlock",
"text": link,
"wrap": True,
},
],
"$schema": self._MICROSOFT_TEAMS_SCHEMA_URL,
},
}
for link in data_docs_page_links
]
return {
"type": "message",
"attachments": [
{
"contentType": self._MICROSOFT_TEAMS_CONTENT_TYPE,
"content": {
"type": "AdaptiveCard",
"$schema": self._MICROSOFT_TEAMS_SCHEMA_URL,
"version": self._MICROSOFT_TEAMS_SCHEMA_VERSION,
"body": blocks,
"actions": actions,
},
}
],
}
| MicrosoftTeamsRenderer |
python | wandb__wandb | wandb/vendor/pygments/lexers/capnproto.py | {
"start": 413,
"end": 2188
} | class ____(RegexLexer):
"""
For `Cap'n Proto <https://capnproto.org>`_ source.
.. versionadded:: 2.2
"""
name = 'Cap\'n Proto'
filenames = ['*.capnp']
aliases = ['capnp']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|'
r'extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[\w.]+', Name),
(r'[^#@=:$\w]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
default('#pop'),
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
default('#pop'),
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
default('#pop'),
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
default('#pop'),
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
default('#pop'),
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
default('#pop'),
],
}
| CapnProtoLexer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/autoVariance1.py | {
"start": 3976,
"end": 4327
} | class ____[T]:
def __init__(self, value: T) -> None:
pass
def set_value(self, value: T) -> None:
pass
# This should generate an error based on variance.
vcontra1_1: ShouldBeContravariant1[float] = ShouldBeContravariant1[int](1)
vcontra1_2: ShouldBeContravariant1[int] = ShouldBeContravariant1[float](1.2)
| ShouldBeContravariant1 |
python | encode__django-rest-framework | rest_framework/exceptions.py | {
"start": 6087,
"end": 6449
} | class ____(APIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = _('Could not satisfy the request Accept header.')
default_code = 'not_acceptable'
def __init__(self, detail=None, code=None, available_renderers=None):
self.available_renderers = available_renderers
super().__init__(detail, code)
| NotAcceptable |
python | django__django | tests/test_runner_apps/tagged/tests.py | {
"start": 74,
"end": 276
} | class ____(TestCase):
@tag("fast")
def test_single_tag(self):
self.assertEqual(1, 1)
@tag("fast", "core")
def test_multiple_tags(self):
self.assertEqual(1, 1)
| TaggedTestCase |
python | ApeWorX__ape | src/ape/cli/choices.py | {
"start": 13273,
"end": 14175
} | class ____(Enum):
"""
An enum representing output formats, such as ``TREE`` or ``YAML``.
Use this to select a subset of common output formats to use
when creating a :meth:`~ape.cli.choices.output_format_choice`.
"""
TREE = "TREE"
"""A rich text tree view of the data."""
YAML = "YAML"
"""A standard .yaml format of the data."""
def output_format_choice(options: Optional[list[OutputFormat]] = None) -> Choice:
"""
Returns a ``click.Choice()`` type for the given options.
Args:
options (list[:class:`~ape.choices.OutputFormat`], optional):
Limit the formats to accept. Defaults to allowing all formats.
Returns:
:class:`click.Choice`
"""
options = options or list(OutputFormat)
# Uses `str` form of enum for CLI choices.
return click.Choice([o.value for o in options], case_sensitive=False)
| OutputFormat |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_log.py | {
"start": 1561,
"end": 18805
} | class ____:
DAG_ID = "dag_for_testing_log_endpoint"
RUN_ID = "dag_run_id_for_testing_log_endpoint"
TASK_ID = "task_for_testing_log_endpoint"
MAPPED_TASK_ID = "mapped_task_for_testing_log_endpoint"
TRY_NUMBER = 1
default_time = "2020-06-10T20:00:00+00:00"
@pytest.fixture(autouse=True)
def setup_attrs(self, test_client, configure_loggers, dag_maker, session) -> None:
self.app = test_client.app
self.client = test_client
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
with dag_maker(self.DAG_ID, start_date=timezone.parse(self.default_time), session=session) as dag:
EmptyOperator(task_id=self.TASK_ID)
@task(task_id=self.MAPPED_TASK_ID)
def add_one(x: int):
return x + 1
add_one.expand(x=[1, 2, 3])
dr = dag_maker.create_dagrun(
run_id=self.RUN_ID,
run_type=DagRunType.SCHEDULED,
logical_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
)
for ti in dr.task_instances:
ti.try_number = 1
ti.hostname = "localhost"
session.merge(ti)
dag.clear()
for ti in dr.task_instances:
ti.try_number = 2
ti.id = str(uuid7())
ti.hostname = "localhost"
session.merge(ti)
# Commit changes to avoid locks
session.commit()
# Add dummy dag for checking picking correct log with same task_id and different dag_id case.
with dag_maker(
f"{self.DAG_ID}_copy", start_date=timezone.parse(self.default_time), session=session
) as dummy_dag:
EmptyOperator(task_id=self.TASK_ID)
dr2 = dag_maker.create_dagrun(
run_id=self.RUN_ID,
run_type=DagRunType.SCHEDULED,
logical_date=timezone.parse(self.default_time),
start_date=timezone.parse(self.default_time),
)
for ti in dr2.task_instances:
ti.try_number = 1
ti.hostname = "localhost"
session.merge(ti)
dummy_dag.clear()
for ti in dr2.task_instances:
ti.try_number = 2
ti.id = str(uuid7())
ti.hostname = "localhost"
session.merge(ti)
# Final commit to ensure all changes are persisted
session.commit()
dagbag = create_dag_bag()
test_client.app.dependency_overrides[dag_bag_from_app] = lambda: dagbag
@pytest.fixture
def configure_loggers(self, tmp_path, create_log_template):
self.log_dir = tmp_path
# TASK_ID
dir_path = tmp_path / f"dag_id={self.DAG_ID}" / f"run_id={self.RUN_ID}" / f"task_id={self.TASK_ID}"
dir_path.mkdir(parents=True)
log = dir_path / "attempt=1.log"
log.write_text("Log for testing.")
# try number 2
log = dir_path / "attempt=2.log"
log.write_text("Log for testing 2.")
# MAPPED_TASK_ID
for map_index in range(3):
dir_path = (
tmp_path
/ f"dag_id={self.DAG_ID}"
/ f"run_id={self.RUN_ID}"
/ f"task_id={self.MAPPED_TASK_ID}"
/ f"map_index={map_index}"
)
dir_path.mkdir(parents=True)
log = dir_path / "attempt=1.log"
log.write_text("Log for testing.")
# try number 2
log = dir_path / "attempt=2.log"
log.write_text("Log for testing 2.")
with mock.patch(
"airflow.utils.log.file_task_handler.FileTaskHandler.local_base",
new_callable=mock.PropertyMock,
create=True,
) as local_base:
local_base.return_value = self.log_dir
yield
def teardown_method(self):
clear_db_runs()
@pytest.mark.parametrize("try_number", [1, 2])
def test_should_respond_200_json(self, try_number):
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": False})
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.TASK_ID}/logs/{try_number}",
params={"token": token},
headers={"Accept": "application/json"},
)
expected_filename = f"{self.log_dir}/dag_id={self.DAG_ID}/run_id={self.RUN_ID}/task_id={self.TASK_ID}/attempt={try_number}.log"
log_content = "Log for testing." if try_number == 1 else "Log for testing 2."
assert response.status_code == 200, response.json()
resp_contnt = response.json()["content"]
assert expected_filename in resp_contnt[0]["sources"]
assert log_content in resp_contnt[2]["event"]
assert response.json()["continuation_token"] is None
assert response.status_code == 200
@pytest.mark.parametrize(
("request_url", "expected_filename", "extra_query_string", "try_number"),
[
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{TASK_ID}/logs/1",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={TASK_ID}/attempt=1.log",
{},
1,
),
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{MAPPED_TASK_ID}/logs/1",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={MAPPED_TASK_ID}/map_index=0/attempt=1.log",
{"map_index": 0},
1,
),
# try_number 2
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{TASK_ID}/logs/2",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={TASK_ID}/attempt=2.log",
{},
2,
),
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{MAPPED_TASK_ID}/logs/2",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={MAPPED_TASK_ID}/map_index=0/attempt=2.log",
{"map_index": 0},
2,
),
],
)
def test_should_respond_200_ndjson(self, request_url, expected_filename, extra_query_string, try_number):
expected_filename = expected_filename.replace("LOG_DIR", str(self.log_dir))
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": True})
response = self.client.get(
request_url,
params={"token": token, **extra_query_string},
headers={"Accept": "application/x-ndjson"},
)
assert response.status_code == 200
log_content = "Log for testing." if try_number == 1 else "Log for testing 2."
resp_content = response.content.decode("utf-8")
assert expected_filename in resp_content
assert log_content in resp_content
# check content is in ndjson format
for line in resp_content.splitlines():
log = json.loads(line)
assert "event" in log
assert "timestamp" in log
@pytest.mark.parametrize(
("request_url", "expected_filename", "extra_query_string", "try_number"),
[
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{TASK_ID}/logs/1",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={TASK_ID}/attempt=1.log",
{},
1,
),
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{MAPPED_TASK_ID}/logs/1",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={MAPPED_TASK_ID}/map_index=0/attempt=1.log",
{"map_index": 0},
1,
),
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{TASK_ID}/logs/2",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={TASK_ID}/attempt=2.log",
{},
2,
),
(
f"/dags/{DAG_ID}/dagRuns/{RUN_ID}/taskInstances/{MAPPED_TASK_ID}/logs/2",
f"LOG_DIR/dag_id={DAG_ID}/run_id={RUN_ID}/task_id={MAPPED_TASK_ID}/map_index=0/attempt=2.log",
{"map_index": 0},
2,
),
],
)
def test_get_logs_of_removed_task(self, request_url, expected_filename, extra_query_string, try_number):
expected_filename = expected_filename.replace("LOG_DIR", str(self.log_dir))
# Recreate DAG without tasks
dagbag = create_dag_bag()
dag = DAG(self.DAG_ID, schedule=None, start_date=timezone.parse(self.default_time))
sync_dag_to_db(dag)
self.app.dependency_overrides[dag_bag_from_app] = lambda: dagbag
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": True})
response = self.client.get(
request_url,
params={"token": token, **extra_query_string},
headers={"Accept": "application/x-ndjson"},
)
assert response.status_code == 200
log_content = "Log for testing." if try_number == 1 else "Log for testing 2."
resp_content = response.content.decode("utf-8")
assert expected_filename in resp_content
assert log_content in resp_content
@pytest.mark.parametrize("try_number", [1, 2])
def test_get_logs_response_with_ti_equal_to_none(self, try_number):
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": True})
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/Invalid-Task-ID/logs/{try_number}",
params={"token": token},
)
assert response.status_code == 404
assert response.json() == {"detail": "TaskInstance not found"}
@pytest.mark.parametrize("try_number", [1, 2])
def test_get_logs_with_metadata_as_download_large_file(self, try_number):
from airflow.utils.log.file_task_handler import StructuredLogMessage
with mock.patch("airflow.utils.log.file_task_handler.FileTaskHandler.read") as read_mock:
first_return = (convert_list_to_stream([StructuredLogMessage(event="", message="1st line")]), {})
second_return = (
convert_list_to_stream([StructuredLogMessage(event="", message="2nd line")]),
{"end_of_log": False},
)
third_return = (
convert_list_to_stream([StructuredLogMessage(event="", message="3rd line")]),
{"end_of_log": True},
)
fourth_return = (
convert_list_to_stream([StructuredLogMessage(event="", message="should never be read")]),
{"end_of_log": True},
)
read_mock.side_effect = [first_return, second_return, third_return, fourth_return]
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/"
f"taskInstances/{self.TASK_ID}/logs/{try_number}?full_content=True",
headers={"Accept": "application/x-ndjson"},
)
assert "1st line" in response.content.decode("utf-8")
assert "2nd line" in response.content.decode("utf-8")
assert "3rd line" in response.content.decode("utf-8")
assert "should never be read" not in response.content.decode("utf-8")
@pytest.mark.parametrize("try_number", [1, 2])
@mock.patch("airflow.api_fastapi.core_api.routes.public.log.TaskLogReader")
def test_get_logs_for_handler_without_read_method(self, mock_log_reader, try_number):
type(mock_log_reader.return_value).supports_read = PropertyMock(return_value=False)
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": False})
# check guessing
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.TASK_ID}/logs/{try_number}",
params={"token": token},
headers={"Content-Type": "application/jso"},
)
assert response.status_code == 400
assert "Task log handler does not support read logs." in response.content.decode("utf-8")
def test_bad_signature_raises(self):
token = {"download_logs": False}
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.TASK_ID}/logs/1",
params={"token": token},
headers={"Accept": "application/json"},
)
# assert response.status_code == 400
assert response.json() == {"detail": "Bad Signature. Please use only the tokens provided by the API."}
def test_should_raises_401_unauthenticated(self, unauthenticated_test_client):
response = unauthenticated_test_client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.TASK_ID}/logs/1",
headers={"Accept": "application/json"},
)
assert response.status_code == 401
def test_should_raises_403_unauthorized(self, unauthorized_test_client):
response = unauthorized_test_client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.TASK_ID}/logs/1",
headers={"Accept": "application/json"},
)
assert response.status_code == 403
def test_raises_404_for_invalid_dag_run_id(self):
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/NO_DAG_RUN/" # invalid run_id
f"taskInstances/{self.TASK_ID}/logs/1?",
headers={"Accept": "application/json"},
)
assert response.status_code == 404
assert response.json() == {"detail": "TaskInstance not found"}
def test_should_raise_404_when_missing_map_index_param_for_mapped_task(self):
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": True})
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.MAPPED_TASK_ID}/logs/1",
params={"token": token},
headers={"Accept": "application/x-ndjson"},
)
assert response.status_code == 404
assert response.json()["detail"] == "TaskInstance not found"
def test_should_raise_404_when_filtering_on_map_index_for_unmapped_task(self):
key = self.app.state.secret_key
serializer = URLSafeSerializer(key)
token = serializer.dumps({"download_logs": True})
response = self.client.get(
f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{self.TASK_ID}/logs/1",
params={"token": token, "map_index": 0},
headers={"Accept": "application/x-ndjson"},
)
assert response.status_code == 404
assert response.json()["detail"] == "TaskInstance not found"
@pytest.mark.parametrize(
("supports_external_link", "task_id", "expected_status", "expected_response", "mock_external_url"),
[
(
True,
"task_for_testing_log_endpoint",
200,
{"url": "https://external-logs.example.com/log/123"},
True,
),
(
False,
"task_for_testing_log_endpoint",
400,
{"detail": "Task log handler does not support external logs."},
False,
),
(True, "INVALID_TASK", 404, {"detail": "TaskInstance not found"}, False),
],
ids=[
"external_links_supported_task_exists",
"external_links_not_supported",
"external_links_supported_task_not_found",
],
)
def test_get_external_log_url(
self, supports_external_link, task_id, expected_status, expected_response, mock_external_url
):
with (
mock.patch(
"airflow.utils.log.log_reader.TaskLogReader.supports_external_link",
new_callable=mock.PropertyMock,
return_value=supports_external_link,
),
mock.patch("airflow.utils.log.log_reader.TaskLogReader.log_handler") as mock_log_handler,
):
url = f"/dags/{self.DAG_ID}/dagRuns/{self.RUN_ID}/taskInstances/{task_id}/externalLogUrl/{self.TRY_NUMBER}"
if mock_external_url:
mock_log_handler.get_external_log_url.return_value = (
"https://external-logs.example.com/log/123"
)
response = self.client.get(url)
if expected_status == 200:
mock_log_handler.get_external_log_url.assert_called_once()
else:
mock_log_handler.get_external_log_url.assert_not_called()
assert response.status_code == expected_status
assert response.json() == expected_response
| TestTaskInstancesLog |
python | gevent__gevent | src/gevent/testing/testrunner.py | {
"start": 3220,
"end": 8497
} | class ____(object):
TIME_WAIT_REAP = 0.1
TIME_WAIT_SPAWN = 0.05
def __init__(self,
tests,
*,
allowed_return_codes=(),
configured_failing_tests=(),
failfast=False,
quiet=False,
configured_run_alone_tests=(),
worker_count=DEFAULT_NWORKERS,
second_chance=False):
"""
:keyword allowed_return_codes: Return codes other than
0 that are counted as a success. Needed because some versions
of Python give ``unittest`` weird return codes.
:keyword quiet: Set to True or False to explicitly choose. Set to
`None` to use the default, which may come from the environment variable
``GEVENTTEST_QUIET``.
"""
self._tests = tests
self._configured_failing_tests = configured_failing_tests
self._quiet = quiet
self._configured_run_alone_tests = configured_run_alone_tests
assert not (failfast and second_chance)
self._failfast = failfast
self._second_chance = second_chance
self.results = ResultCollector()
self.results.total = len(self._tests)
self._running_jobs = []
self._worker_count = min(len(tests), worker_count) or 1
self._allowed_return_codes = allowed_return_codes
def _run_one(self, cmd, **kwargs):
kwargs['allowed_return_codes'] = self._allowed_return_codes
if self._quiet is not None:
kwargs['quiet'] = self._quiet
result = util.run(cmd, **kwargs)
if not result and self._second_chance:
self.results <<= result
util.log("> %s", result.name, color='warning')
result = util.run(cmd, **kwargs)
if not result and self._failfast:
# Under Python 3.9 (maybe older versions?), raising the
# SystemExit here (a background thread belonging to the
# pool) doesn't seem to work well. It gets stuck waiting
# for a lock? The job never shows up as finished.
raise FailFast(cmd)
self.results += result
def _reap(self):
"Clean up the list of running jobs, returning how many are still outstanding."
for r in self._running_jobs[:]:
if not r.ready():
continue
if r.successful():
self._running_jobs.remove(r)
else:
r.get()
sys.exit('Internal error in testrunner.py: %r' % (r, ))
return len(self._running_jobs)
def _reap_all(self):
util.log("Reaping %d jobs", len(self._running_jobs), color="debug")
while self._running_jobs:
if not self._reap():
break
util.sleep(self.TIME_WAIT_REAP)
def _spawn(self, pool, cmd, options):
while True:
if self._reap() < self._worker_count:
job = pool.apply_async(self._run_one, (cmd, ), options or {})
self._running_jobs.append(job)
return
util.sleep(self.TIME_WAIT_SPAWN)
def __call__(self):
util.log("Running tests in parallel with concurrency %s %s." % (
self._worker_count,
util._colorize('number', '(concurrency available: %d)' % AVAIL_NWORKERS)
),)
# Setting global state, in theory we can be used multiple times.
# This is fine as long as we are single threaded and call these
# sequentially.
util.BUFFER_OUTPUT = self._worker_count > 1 or self._quiet
start = util.perf_counter()
try:
self._run_tests()
except KeyboardInterrupt:
self._report(util.perf_counter() - start, exit=False)
util.log('(partial results)\n')
raise
except:
traceback.print_exc()
raise
self._reap_all()
self._report(util.perf_counter() - start, exit=True)
def _run_tests(self):
"Runs the tests, produces no report."
run_alone = []
tests = self._tests
pool = ThreadPool(self._worker_count)
try:
for cmd, options in tests:
options = options or {}
if matches(self._configured_run_alone_tests, cmd):
run_alone.append((cmd, options))
else:
self._spawn(pool, cmd, options)
pool.close()
pool.join()
if run_alone:
util.log("Running tests marked standalone")
for cmd, options in run_alone:
self._run_one(cmd, **options)
except KeyboardInterrupt:
try:
util.log('Waiting for currently running to finish...')
self._reap_all()
except KeyboardInterrupt:
pool.terminate()
raise
except:
pool.terminate()
raise
def _report(self, elapsed_time, exit=False):
results = self.results
report(
results,
exit=exit,
took=elapsed_time,
configured_failing_tests=self._configured_failing_tests,
)
| Runner |
python | celery__celery | celery/app/task.py | {
"start": 2079,
"end": 5155
} | class ____:
"""Task request variables (Task.request)."""
_children = None # see property
_protected = 0
args = None
callbacks = None
called_directly = True
chain = None
chord = None
correlation_id = None
delivery_info = None
errbacks = None
eta = None
expires = None
group = None
group_index = None
headers = None
hostname = None
id = None
ignore_result = False
is_eager = False
kwargs = None
logfile = None
loglevel = None
origin = None
parent_id = None
properties = None
retries = 0
reply_to = None
replaced_task_nesting = 0
root_id = None
shadow = None
taskset = None # compat alias to group
timelimit = None
utc = None
stamped_headers = None
stamps = None
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
if self.headers is None:
self.headers = self._get_custom_headers(*args, **kwargs)
def _get_custom_headers(self, *args, **kwargs):
headers = {}
headers.update(*args, **kwargs)
celery_keys = {*Context.__dict__.keys(), 'lang', 'task', 'argsrepr', 'kwargsrepr', 'compression'}
for key in celery_keys:
headers.pop(key, None)
if not headers:
return None
return headers
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
def clear(self):
return self.__dict__.clear()
def get(self, key, default=None):
return getattr(self, key, default)
def __repr__(self):
return f'<Context: {vars(self)!r}>'
def as_execution_options(self):
limit_hard, limit_soft = self.timelimit or (None, None)
execution_options = {
'task_id': self.id,
'root_id': self.root_id,
'parent_id': self.parent_id,
'group_id': self.group,
'group_index': self.group_index,
'shadow': self.shadow,
'chord': self.chord,
'chain': self.chain,
'link': self.callbacks,
'link_error': self.errbacks,
'expires': self.expires,
'soft_time_limit': limit_soft,
'time_limit': limit_hard,
'headers': self.headers,
'retries': self.retries,
'reply_to': self.reply_to,
'replaced_task_nesting': self.replaced_task_nesting,
'origin': self.origin,
}
if hasattr(self, 'stamps') and hasattr(self, 'stamped_headers'):
if self.stamps is not None and self.stamped_headers is not None:
execution_options['stamped_headers'] = self.stamped_headers
for k, v in self.stamps.items():
execution_options[k] = v
return execution_options
@property
def children(self):
# children must be an empty list for every thread
if self._children is None:
self._children = []
return self._children
@abstract.CallableTask.register
| Context |
python | apache__airflow | airflow-core/src/airflow/models/backfill.py | {
"start": 2186,
"end": 2346
} | class ____(AirflowException):
"""
Raised when attempting to create backfill for a DAG with no schedule.
:meta private:
"""
| DagNoScheduleException |
python | apache__airflow | airflow-core/src/airflow/models/dagbag.py | {
"start": 4432,
"end": 6686
} | class ____(Base):
"""Model to store the dag parsing requests that will be prioritized when parsing files."""
__tablename__ = "dag_priority_parsing_request"
# Adding a unique constraint to fileloc results in the creation of an index and we have a limitation
# on the size of the string we can use in the index for MySQL DB. We also have to keep the fileloc
# size consistent with other tables. This is a workaround to enforce the unique constraint.
id: Mapped[str] = mapped_column(
String(32), primary_key=True, default=generate_md5_hash, onupdate=generate_md5_hash
)
bundle_name: Mapped[str] = mapped_column(StringID(), nullable=False)
# The location of the file containing the DAG object
# Note: Do not depend on fileloc pointing to a file; in the case of a
# packaged DAG, it will point to the subpath of the DAG within the
# associated zip.
relative_fileloc: Mapped[str] = mapped_column(String(2000), nullable=False)
def __init__(self, bundle_name: str, relative_fileloc: str) -> None:
super().__init__()
self.bundle_name = bundle_name
self.relative_fileloc = relative_fileloc
def __repr__(self) -> str:
return f"<DagPriorityParsingRequest: bundle_name={self.bundle_name} relative_fileloc={self.relative_fileloc}>"
def __getattr__(name: str) -> Any:
"""
Backwards-compat shim: importing DagBag from airflow.models.dagbag is deprecated.
Emits DeprecationWarning and re-exports DagBag from airflow.dag_processing.dagbag
to preserve compatibility for external callers.
"""
if name in {"DagBag", "FileLoadStat", "timeout"}:
import warnings
from airflow.utils.deprecation_tools import DeprecatedImportWarning
warnings.warn(
f"Importing {name} from airflow.models.dagbag is deprecated and will be removed in a future "
"release. Please import from airflow.dag_processing.dagbag instead.",
DeprecatedImportWarning,
stacklevel=2,
)
# Import on demand to avoid import-time side effects
from airflow.dag_processing import dagbag as _dagbag
return getattr(_dagbag, name)
raise AttributeError(name)
| DagPriorityParsingRequest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 5496,
"end": 5682
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepSkippedEvent"
| GrapheneExecutionStepSkippedEvent |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/ColorGradientPlots.py | {
"start": 220,
"end": 1622
} | class ____(object):
""" source of buffered demonstration data """
def __init__(self, sample_rate=200., signal_period=0.55, negative_period=None, max_length=300):
""" prepare, but don't start yet """
self.rate = sample_rate
self.period = signal_period
self.neg_period = negative_period
self.start_time = 0.
self.sample_idx = 0 # number of next sample to be taken
def start(self, timestamp):
""" start acquiring simulated data """
self.start_time = timestamp
self.sample_idx = 0
def get_data(self, timestamp, max_length=6000):
""" return all data acquired since last get_data call """
next_idx = int( (timestamp - self.start_time) * self.rate )
if next_idx - self.sample_idx > max_length:
self.sample_idx = next_idx - max_length # catch up if needed
# create some mildly intersting data:
sample_phases = np.arange( self.sample_idx, next_idx, dtype=np.float64 )
self.sample_idx = next_idx
sample_phase_pos = sample_phases / (self.period*self.rate)
sample_phase_pos %= 1.0
if self.neg_period is None:
return sample_phase_pos**4
sample_phase_neg = sample_phases / (self.neg_period*self.rate)
sample_phase_neg %= 1.0
return sample_phase_pos**4 - sample_phase_neg**4
| DataSource |
python | getsentry__sentry | tests/sentry/search/events/builder/test_span_metrics.py | {
"start": 12095,
"end": 12730
} | class ____(MetricsEnhancedPerformanceTestCase):
def test_split_granularity(self) -> None:
params: ParamsType = {
"organization_id": self.organization.id,
"project_id": [self.project.id],
"start": datetime.datetime(2015, 5, 18, 23, 3, 0, tzinfo=timezone.utc),
"end": datetime.datetime(2015, 5, 21, 1, 57, 0, tzinfo=timezone.utc),
}
query = TimeseriesSpansMetricsQueryBuilder(params, 86400)
condition, granularity = query.resolve_split_granularity()
assert granularity == query.granularity
assert condition == []
| TimeseriesMetricQueryBuilder |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 3629,
"end": 3729
} | class ____(MilvusException):
"""Raise when consistency level is invalid"""
| InvalidConsistencyLevel |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 366931,
"end": 370805
} | class ____(torch.nn.Module):
def forward(self, s17: "Sym(s17)", s94: "Sym(s94)", L_y_: "f32[s17, s94]", L_z_: "f32[s17, s94]", L_x_: "f32[s17, s94]"):
l_y_ = L_y_
l_z_ = L_z_
l_x_ = L_x_
sum_1: "f32[]" = l_x_.sum()
gt: "b8[]" = sum_1 > 0; sum_1 = None
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(gt, cond_true_0, cond_false_0, (l_x_, s94, s17, s17, l_z_)); gt = cond_true_0 = cond_false_0 = l_x_ = s94 = s17 = l_z_ = None
getitem_5: "f32[u0, s94]" = cond[0]
sym_size_int: "Sym(u0)" = torch.ops.aten.sym_size.int(getitem_5, 0); getitem_5 = None
ge: "Sym(u0 >= 0)" = sym_size_int >= 0; sym_size_int = None
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 0 on node 'ge'"); ge = _assert_scalar_default = None
ret: "f32[u0, s94]" = cond[0]; cond = None
sum_2: "f32[]" = l_y_.sum(); l_y_ = None
sub: "f32[u0, s94]" = sum_2 - ret; sum_2 = ret = None
return (sub,)
class cond_true_0(torch.nn.Module):
def forward(self, l_x_: "f32[s17, s94]", s94: "Sym(s94)", s17_true_branch: "Sym(s17)", getitem_2_false_branch: "Sym(s17)", l_z__false_branch: "f32[s17, s94]"):
l_x__1 = l_x_
s94_1 = s94
add: "f32[s17, s94]" = l_x__1 + s17_true_branch; l_x__1 = s17_true_branch = None
getitem: "f32[s17 - 2, s94]" = add[slice(2, None, None)]; add = None
clone: "f32[s17 - 2, s94]" = getitem.clone(); getitem = None
return (clone,)
class cond_false_0(torch.nn.Module):
def forward(self, l_x_: "f32[s17, s94]", s94: "Sym(s94)", s17_true_branch: "Sym(s17)", getitem_2_false_branch: "Sym(s17)", l_z__false_branch: "f32[s17, s94]"):
l_x__1 = l_x_
s94_1 = s94
mul: "f32[s17, s94]" = getitem_2_false_branch * l_z__false_branch; getitem_2_false_branch = l_z__false_branch = None
add: "f32[s17, s94]" = l_x__1 + mul; l_x__1 = mul = None
getitem: "f32[2, s94]" = add[slice(None, 2, None)]; add = None
clone: "f32[2, s94]" = getitem.clone(); getitem = None
return (clone,)
""", # noqa: B950
)
@parametrize("dynamic", [True, False])
@parametrize("backend", ["eager", "aot_eager"])
def test_cond_mismatched_branch_strided_output(self, dynamic, backend):
class M(torch.nn.Module):
def forward(self, x, y):
def true_fn(x, y):
return (
(x.swapaxes(-1, 0) + 1)
.unsqueeze(1)
.expand(-1, 5, -1, -1, -1, -1, -1),
torch.empty_strided((3, 3), (0, 1)),
)
def false_fn(x, y):
return (
(y.swapaxes(-1, 0) + 1)
.unsqueeze(1)
.expand(-1, 4, -1, -1, -1, -1, -1),
torch.empty_strided((4, 5), (0, 1)),
)
ret = torch.cond(x.sum() > 0, true_fn, false_fn, (x, y))
return y.sum() + ret[0]
m = M()
x, y = torch.randn(1, 6, 1, 5, 4, 3), torch.randn(1, 4, 5, 1, 3, 8)
out = m(x, y)
compiled_out = torch.compile(
m, backend=backend, dynamic=dynamic, fullgraph=True
)(x, y)
self.assertEqual(compiled_out, out)
_hop_schema_test_schema_types = [
"bool",
"int",
"float",
"str",
"Tensor",
"SymInt",
"SymBool",
"GraphModule",
"ScriptObj",
]
@skipIfTorchDynamo("We don't expect users to torch.compile hop schema generation.")
@unittest.skipIf(IS_WINDOWS, "Windows not supported for this test")
| GraphModule |
python | pytest-dev__pytest-cov | src/pytest_cov/__init__.py | {
"start": 719,
"end": 908
} | class ____(Exception):
"""
Raised when dynamic_context is set to test_function and xdist is also used.
See: https://github.com/pytest-dev/pytest-cov/issues/604
"""
| DistCovError |
python | joblib__joblib | joblib/externals/loky/backend/context.py | {
"start": 11447,
"end": 13200
} | class ____(BaseContext):
"""Context relying on the LokyProcess."""
_name = "loky"
Process = LokyProcess
cpu_count = staticmethod(cpu_count)
def Queue(self, maxsize=0, reducers=None):
"""Returns a queue object"""
from .queues import Queue
return Queue(maxsize, reducers=reducers, ctx=self.get_context())
def SimpleQueue(self, reducers=None):
"""Returns a queue object"""
from .queues import SimpleQueue
return SimpleQueue(reducers=reducers, ctx=self.get_context())
if sys.platform != "win32":
"""For Unix platform, use our custom implementation of synchronize
ensuring that we use the loky.backend.resource_tracker to clean-up
the semaphores in case of a worker crash.
"""
def Semaphore(self, value=1):
"""Returns a semaphore object"""
from .synchronize import Semaphore
return Semaphore(value=value)
def BoundedSemaphore(self, value):
"""Returns a bounded semaphore object"""
from .synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Lock(self):
"""Returns a lock object"""
from .synchronize import Lock
return Lock()
def RLock(self):
"""Returns a recurrent lock object"""
from .synchronize import RLock
return RLock()
def Condition(self, lock=None):
"""Returns a condition object"""
from .synchronize import Condition
return Condition(lock)
def Event(self):
"""Returns an event object"""
from .synchronize import Event
return Event()
| LokyContext |
python | kamyu104__LeetCode-Solutions | Python/find-the-winner-of-the-circular-game.py | {
"start": 315,
"end": 618
} | class ____(object):
def findTheWinner(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
def f(idx, n, k):
if n == 1:
return 0
return (k+f((idx+k)%n, n-1, k))%n
return f(0, n, k)+1
| Solution2 |
python | Textualize__textual | docs/examples/app/question_title01.py | {
"start": 95,
"end": 645
} | class ____(App[str]):
CSS_PATH = "question02.tcss"
TITLE = "A Question App"
SUB_TITLE = "The most important question"
def compose(self) -> ComposeResult:
yield Header()
yield Label("Do you love Textual?", id="question")
yield Button("Yes", id="yes", variant="primary")
yield Button("No", id="no", variant="error")
def on_button_pressed(self, event: Button.Pressed) -> None:
self.exit(event.button.id)
if __name__ == "__main__":
app = MyApp()
reply = app.run()
print(reply)
| MyApp |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_filters.py | {
"start": 110,
"end": 2031
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-filters"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def get_filter_spec(
self, response_data: Iterable[dict[str, Any]], spec_id: str
) -> dict[str, Any]:
"""
looks in a successful response data for the specified spec_id and returns it (if found)
"""
for spec in response_data:
if spec["id"] == spec_id:
return spec
raise AssertionError(f"spec not found: {spec_id}")
def test_get(self) -> None:
org = self.create_organization(name="baz", slug="1", owner=self.user)
team = self.create_team(organization=org, name="foo", slug="foo")
project = self.create_project(name="Bar", slug="bar", teams=[team])
project.update_option("filters:browser-extension", "0")
response = self.get_success_response(org.slug, project.slug)
self.insta_snapshot(response.data)
def test_health_check_filter(self) -> None:
"""
Tests setting health check filters ( aka filtered-transactions)
"""
org = self.create_organization(name="baz", slug="1", owner=self.user)
team = self.create_team(organization=org, name="foo", slug="foo")
project = self.create_project(name="Bar", slug="bar", teams=[team])
project.update_option("filters:filtered-transaction", "0")
response = self.get_success_response(org.slug, project.slug)
health_check = self.get_filter_spec(response.data, "filtered-transaction")
assert health_check["active"] is False
project.update_option("filters:filtered-transaction", "1")
response = self.get_success_response(org.slug, project.slug)
health_check = self.get_filter_spec(response.data, "filtered-transaction")
assert health_check["active"] is True
| ProjectFiltersTest |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 19297,
"end": 22063
} | class ____(BertForMaskedLM):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "ernie.embeddings.word_embeddings.weight",
}
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
task_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
r"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.ernie(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
task_type_ids=task_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| ErnieForMaskedLM |
python | dagster-io__dagster | python_modules/dagster/dagster/components/testing/test_cases.py | {
"start": 4751,
"end": 5812
} | class ____(TestTranslation):
"""This version of the TestTranslation class is used to test the translation of
asset attributes, applying all customizations in parallel to speed up tests for
components which might be expensive to construct.
"""
@pytest.fixture()
def translation_test_case(self, request):
deep_merge_all_attributes = {}
for case in test_cases:
deep_merge_all_attributes = deep_merge_dicts(deep_merge_all_attributes, case.attributes)
merged_assertion = lambda asset_spec: all(case.assertion(asset_spec) for case in test_cases)
# successively apply key modifiers
def _merged_key_modifier(key):
for case in test_cases:
if case.key_modifier:
key = case.key_modifier(key)
return key
return TranslationTestCase(
name="merged",
attributes=deep_merge_all_attributes,
assertion=merged_assertion,
key_modifier=_merged_key_modifier,
)
| TestTranslationBatched |
python | tensorflow__tensorflow | tensorflow/python/distribute/parallel_device/parallel_device_test.py | {
"start": 3399,
"end": 4538
} | class ____(test.TestCase):
def setUp(self):
super(_VirtualDeviceTestCase, self).setUp()
ctx = context.context()
if ctx.list_physical_devices("TPU"):
self.device_type = "TPU"
tpu_cluster_resolver.initialize_tpu_system()
elif ctx.list_physical_devices("GPU"):
self.device_type = "GPU"
gpus = ctx.list_physical_devices(self.device_type)
ctx.set_logical_device_configuration(gpus[0], [
context.LogicalDeviceConfiguration(memory_limit=100),
context.LogicalDeviceConfiguration(memory_limit=100),
])
else:
self.device_type = "CPU"
cpus = ctx.list_physical_devices("CPU")
ctx.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
])
self.device = parallel_device.ParallelDevice(components=[
"/job:localhost/device:{}:0".format(self.device_type),
self.device_type + ":1"
])
self.assertIn(self.device_type + ":0", self.device.components[0])
self.assertIn(self.device_type + ":1", self.device.components[1])
| _VirtualDeviceTestCase |
python | hyperopt__hyperopt | hyperopt/base.py | {
"start": 6945,
"end": 23275
} | class ____:
"""Database interface supporting data-driven model-based optimization.
The model-based optimization algorithms used by hyperopt's fmin function
work by analyzing samples of a response surface--a history of what points
in the search space were tested, and what was discovered by those tests.
A Trials instance stores that history and makes it available to fmin and
to the various optimization algorithms.
This class (`base.Trials`) is a pure-Python implementation of the database
in terms of lists of dictionaries. Subclass `mongoexp.MongoTrials`
implements the same API in terms of a mongodb database running in another
process. Other subclasses may be implemented in future.
The elements of `self.trials` represent all of the completed, in-progress,
and scheduled evaluation points from an e.g. `fmin` call.
Each element of `self.trials` is a dictionary with *at least* the following
keys:
* **tid**: a unique trial identification object within this Trials instance
usually it is an integer, but it isn't obvious that other sortable,
hashable objects couldn't be used at some point.
* **result**: a sub-dictionary representing what was returned by the fmin
evaluation function. This sub-dictionary has a key 'status' with a value
from `STATUS_STRINGS` and the status is `STATUS_OK`, then there should be
a 'loss' key as well with a floating-point value. Other special keys in
this sub-dictionary may be used by optimization algorithms (see them
for details). Other keys in this sub-dictionary can be used by the
evaluation function to store miscellaneous diagnostics and debugging
information.
* **misc**: despite generic name, this is currently where the trial's
hyperparameter assignments are stored. This sub-dictionary has two
elements: `'idxs'` and `'vals'`. The `vals` dictionary is
a sub-sub-dictionary mapping each hyperparameter to either `[]` (if the
hyperparameter is inactive in this trial), or `[<val>]` (if the
hyperparameter is active). The `idxs` dictionary is technically
redundant -- it is the same as `vals` but it maps hyperparameter names
to either `[]` or `[<tid>]`.
"""
asynchronous = False
def __init__(self, exp_key=None, refresh=True):
self._ids = set()
self._dynamic_trials = []
self._exp_key = exp_key
self.attachments = {}
if refresh:
self.refresh()
def view(self, exp_key=None, refresh=True):
rval = object.__new__(self.__class__)
rval._exp_key = exp_key
rval._ids = self._ids
rval._dynamic_trials = self._dynamic_trials
rval.attachments = self.attachments
if refresh:
rval.refresh()
return rval
def aname(self, trial, name):
return "ATTACH::{}::{}".format(trial["tid"], name)
def trial_attachments(self, trial):
"""
Support syntax for load: self.trial_attachments(doc)[name]
# -- does this work syntactically?
# (In any event a 2-stage store will work)
Support syntax for store: self.trial_attachments(doc)[name] = value
"""
# don't offer more here than in MongoCtrl
class Attachments:
def __contains__(_self, name):
return self.aname(trial, name) in self.attachments
def __getitem__(_self, name):
return self.attachments[self.aname(trial, name)]
def __setitem__(_self, name, value):
self.attachments[self.aname(trial, name)] = value
def __delitem__(_self, name):
del self.attachments[self.aname(trial, name)]
return Attachments()
def __iter__(self):
try:
return iter(self._trials)
except AttributeError:
print("You have to refresh before you iterate", file=sys.stderr)
raise
def __len__(self):
try:
return len(self._trials)
except AttributeError:
print("You have to refresh before you compute len", file=sys.stderr)
raise
def __getitem__(self, item):
# -- how to make it obvious whether indexing is by _trials position
# or by tid if both are integers?
raise NotImplementedError("")
def refresh(self):
# In MongoTrials, this method fetches from database
if self._exp_key is None:
self._trials = [
tt for tt in self._dynamic_trials if tt["state"] in JOB_VALID_STATES
]
else:
self._trials = [
tt
for tt in self._dynamic_trials
if (tt["state"] in JOB_VALID_STATES and tt["exp_key"] == self._exp_key)
]
self._ids.update([tt["tid"] for tt in self._trials])
@property
def trials(self):
return self._trials
@property
def tids(self):
return [tt["tid"] for tt in self._trials]
@property
def specs(self):
return [tt["spec"] for tt in self._trials]
@property
def results(self):
return [tt["result"] for tt in self._trials]
@property
def miscs(self):
return [tt["misc"] for tt in self._trials]
@property
def idxs_vals(self):
return miscs_to_idxs_vals(self.miscs)
@property
def idxs(self):
return self.idxs_vals[0]
@property
def vals(self):
return self.idxs_vals[1]
def assert_valid_trial(self, trial):
if not (hasattr(trial, "keys") and hasattr(trial, "values")):
raise InvalidTrial("trial should be dict-like", trial)
for key in TRIAL_KEYS:
if key not in trial:
raise InvalidTrial("trial missing key %s", key)
for key in TRIAL_MISC_KEYS:
if key not in trial["misc"]:
raise InvalidTrial('trial["misc"] missing key', key)
if trial["tid"] != trial["misc"]["tid"]:
raise InvalidTrial("tid mismatch between root and misc", trial)
# -- check for SON-encodable
if have_bson:
try:
bson.BSON.encode(trial)
except:
# TODO: save the trial object somewhere to inspect, fix, re-insert
# so that precious data is not simply deallocated and lost.
print("-" * 80)
print("CAN'T ENCODE")
print("-" * 80)
raise
if trial["exp_key"] != self._exp_key:
raise InvalidTrial("wrong exp_key", (trial["exp_key"], self._exp_key))
# XXX how to assert that tids are unique?
return trial
def _insert_trial_docs(self, docs):
"""insert with no error checking"""
rval = [doc["tid"] for doc in docs]
self._dynamic_trials.extend(docs)
return rval
def insert_trial_doc(self, doc):
"""insert trial after error checking
Does not refresh. Call self.refresh() for the trial to appear in
self.specs, self.results, etc.
"""
doc = self.assert_valid_trial(SONify(doc))
return self._insert_trial_docs([doc])[0]
# refreshing could be done fast in this base implementation, but with
# a real DB the steps should be separated.
def insert_trial_docs(self, docs):
"""trials - something like is returned by self.new_trial_docs()"""
docs = [self.assert_valid_trial(SONify(doc)) for doc in docs]
return self._insert_trial_docs(docs)
def new_trial_ids(self, n):
aa = len(self._ids)
rval = list(range(aa, aa + n))
self._ids.update(rval)
return rval
def new_trial_docs(self, tids, specs, results, miscs):
assert len(tids) == len(specs) == len(results) == len(miscs)
trials_docs = []
for tid, spec, result, misc in zip(tids, specs, results, miscs):
doc = {
"state": JOB_STATE_NEW,
"tid": tid,
"spec": spec,
"result": result,
"misc": misc,
"exp_key": self._exp_key,
"owner": None,
"version": 0,
"book_time": None,
"refresh_time": None,
}
trials_docs.append(doc)
return trials_docs
def source_trial_docs(self, tids, specs, results, miscs, sources):
assert len(tids) == len(specs) == len(results) == len(miscs) == len(sources)
rval = []
for tid, spec, result, misc, source in zip(
tids, specs, results, miscs, sources
):
doc = dict(
version=0,
tid=tid,
spec=spec,
result=result,
misc=misc,
state=source["state"],
exp_key=source["exp_key"],
owner=source["owner"],
book_time=source["book_time"],
refresh_time=source["refresh_time"],
)
# -- ensure that misc has the following fields,
# some of which may already by set correctly.
assign = ("tid", tid), ("cmd", None), ("from_tid", source["tid"])
for k, v in assign:
assert doc["misc"].setdefault(k, v) == v
rval.append(doc)
return rval
def delete_all(self):
self._dynamic_trials = []
self.attachments = {}
self.refresh()
def count_by_state_synced(self, arg, trials=None):
"""
Return trial counts by looking at self._trials
"""
if trials is None:
trials = self._trials
if arg in JOB_STATES:
queue = [doc for doc in trials if doc["state"] == arg]
elif hasattr(arg, "__iter__"):
states = set(arg)
assert all([x in JOB_STATES for x in states])
queue = [doc for doc in trials if doc["state"] in states]
else:
raise TypeError(arg)
rval = len(queue)
return rval
def count_by_state_unsynced(self, arg):
"""
Return trial counts that count_by_state_synced would return if we
called refresh() first.
"""
if self._exp_key is not None:
exp_trials = [
tt for tt in self._dynamic_trials if tt["exp_key"] == self._exp_key
]
else:
exp_trials = self._dynamic_trials
return self.count_by_state_synced(arg, trials=exp_trials)
def losses(self, bandit=None):
if bandit is None:
return [r.get("loss") for r in self.results]
return list(map(bandit.loss, self.results, self.specs))
def statuses(self, bandit=None):
if bandit is None:
return [r.get("status") for r in self.results]
return list(map(bandit.status, self.results, self.specs))
def average_best_error(self, bandit=None):
"""Return the average best error of the experiment
Average best error is defined as the average of bandit.true_loss,
weighted by the probability that the corresponding bandit.loss is best.
For domains with loss measurement variance of 0, this function simply
returns the true_loss corresponding to the result with the lowest loss.
"""
if bandit is None:
results = self.results
loss = [r["loss"] for r in results if r["status"] == STATUS_OK]
loss_v = [
r.get("loss_variance", 0) for r in results if r["status"] == STATUS_OK
]
true_loss = [
r.get("true_loss", r["loss"])
for r in results
if r["status"] == STATUS_OK
]
else:
def fmap(f):
rval = np.asarray(
[
f(r, s)
for (r, s) in zip(self.results, self.specs)
if bandit.status(r) == STATUS_OK
]
).astype("float")
if not np.all(np.isfinite(rval)):
raise ValueError()
return rval
loss = fmap(bandit.loss)
loss_v = fmap(bandit.loss_variance)
true_loss = fmap(bandit.true_loss)
loss3 = list(zip(loss, loss_v, true_loss))
if not loss3:
raise ValueError("Empty loss vector")
loss3.sort()
loss3 = np.asarray(loss3)
if np.all(loss3[:, 1] == 0):
best_idx = np.argmin(loss3[:, 0])
return loss3[best_idx, 2]
else:
cutoff = 0
sigma = np.sqrt(loss3[0][1])
while cutoff < len(loss3) and loss3[cutoff][0] < loss3[0][0] + 3 * sigma:
cutoff += 1
pmin = pmin_sampled(loss3[:cutoff, 0], loss3[:cutoff, 1])
avg_true_loss = (pmin * loss3[:cutoff, 2]).sum()
return avg_true_loss
@property
def best_trial(self):
"""
Trial with lowest non-NaN loss and status=STATUS_OK.
If no such trial exists, returns None.
"""
candidates = [
t
for t in self.trials
if t["result"]["status"] == STATUS_OK and not np.isnan(t["result"]["loss"])
]
if not candidates:
raise AllTrialsFailed
losses = [float(t["result"]["loss"]) for t in candidates]
if len(losses) == 0:
return None
best = np.nanargmin(losses)
return candidates[best]
@property
def argmin(self):
best_trial = self.best_trial
vals = best_trial["misc"]["vals"]
# unpack the one-element lists to values
# and skip over the 0-element lists
rval = {}
for k, v in list(vals.items()):
if v:
rval[k] = v[0]
return rval
def fmin(
self,
fn,
space,
algo=None,
max_evals=None,
timeout=None,
loss_threshold=None,
max_queue_len=1,
rstate=None,
verbose=False,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
return_argmin=True,
show_progressbar=True,
early_stop_fn=None,
trials_save_file="",
):
"""Minimize a function over a hyperparameter space.
For most parameters, see `hyperopt.fmin.fmin`.
Parameters
----------
catch_eval_exceptions : bool, default False
If set to True, exceptions raised by either the evaluation of the
configuration space from hyperparameters or the execution of `fn`
, will be caught by fmin, and recorded in self._dynamic_trials as
error jobs (JOB_STATE_ERROR). If set to False, such exceptions
will not be caught, and so they will propagate to calling code.
show_progressbar : bool or context manager, default True.
Show a progressbar. See `hyperopt.progress` for customizing progress reporting.
"""
# -- Stop-gap implementation!
# fmin should have been a Trials method in the first place
# but for now it's still sitting in another file.
from .fmin import fmin
return fmin(
fn,
space,
algo=algo,
max_evals=max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
trials=self,
rstate=rstate,
verbose=verbose,
max_queue_len=max_queue_len,
allow_trials_fmin=False, # -- prevent recursion
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin,
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file=trials_save_file,
)
def trials_from_docs(docs, validate=True, **kwargs):
"""Construct a Trials base class instance from a list of trials documents"""
rval = Trials(**kwargs)
if validate:
rval.insert_trial_docs(docs)
else:
rval._insert_trial_docs(docs)
rval.refresh()
return rval
| Trials |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 13572,
"end": 16160
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'service_mgr']
valid_subsets = ['service_mgr']
fact_namespace = 'ansible_service_mgr'
collector_class = ServiceMgrFactCollector
# TODO: dedupe some of this test code
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
def test_no_proc1_ps_random_init(self, mock_gfc):
# no /proc/1/comm, ps returns '/sbin/sys11' which we dont know
# should end up return 'sys11'
module = self._mock_module()
module.run_command = Mock(return_value=(0, '/sbin/sys11', ''))
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['service_mgr'], 'sys11')
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value='runit-init')
def test_service_mgr_runit(self, mock_gfc):
# /proc/1/comm contains 'runit-init', ps fails, service manager is runit
# should end up return 'runit'
module = self._mock_module()
module.run_command = Mock(return_value=(1, '', ''))
collected_facts = {'ansible_system': 'Linux'}
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module,
collected_facts=collected_facts)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['service_mgr'], 'runit')
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
@patch('ansible.module_utils.facts.system.service_mgr.os.path.islink', side_effect=lambda x: x == '/sbin/init')
@patch('ansible.module_utils.facts.system.service_mgr.os.readlink', side_effect=lambda x: '/sbin/runit-init' if x == '/sbin/init' else '/bin/false')
def test_service_mgr_runit_no_comm(self, mock_gfc, mock_opl, mock_orl):
# no /proc/1/comm, ps returns 'COMMAND\n', service manager is runit
# should end up return 'runit'
module = self._mock_module()
module.run_command = Mock(return_value=(1, 'COMMAND\n', ''))
collected_facts = {'ansible_system': 'Linux'}
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module,
collected_facts=collected_facts)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['service_mgr'], 'runit')
| TestServiceMgrFacts |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol5.py | {
"start": 968,
"end": 1209
} | class ____(Protocol):
__name__: str
__module__: str
__qualname__: str
__annotations__: dict[str, Any]
__slots__ = ()
def __call__(self) -> None: ...
def func2() -> None: ...
v: CallbackProto2 = func2
| CallbackProto2 |
python | realpython__materials | python-callable-instances/serializing.py | {
"start": 208,
"end": 408
} | class ____:
def __init__(self, serializer_strategy):
self.serializer_strategy = serializer_strategy
def serialize(self, data):
return self.serializer_strategy(data)
| DataSerializer |
python | ethereum__web3.py | web3/_utils/module_testing/go_ethereum_txpool_module.py | {
"start": 760,
"end": 1223
} | class ____:
def test_geth_txpool_inspect(self, w3: "Web3") -> None:
test_data = w3.geth.txpool.inspect()
assert "pending" in test_data
def test_geth_txpool_content(self, w3: "Web3") -> None:
test_data = w3.geth.txpool.content()
assert "pending" in test_data
def test_geth_txpool_status(self, w3: "Web3") -> None:
test_data = w3.geth.txpool.status()
assert "pending" in test_data
| GoEthereumTxPoolModuleTest |
python | kamyu104__LeetCode-Solutions | Python/wiggle-sort.py | {
"start": 502,
"end": 804
} | class ____(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums.sort()
med = (len(nums) - 1) // 2
nums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]
| Solution2 |
python | hynek__structlog | tests/test_config.py | {
"start": 2032,
"end": 4561
} | class ____:
def test_get_config_is_configured(self):
"""
Return value of structlog.get_config() works as input for
structlog.configure(). is_configured() reflects the state of
configuration.
"""
assert False is structlog.is_configured()
structlog.configure(**structlog.get_config())
assert True is structlog.is_configured()
structlog.reset_defaults()
assert False is structlog.is_configured()
def test_configure_all(self, proxy):
"""
All configurations are applied and land on the bound logger.
"""
x = stub()
configure(processors=[x], context_class=dict)
b = proxy.bind()
assert [x] == b._processors
assert dict is b._context.__class__
def test_reset(self, proxy):
"""
Reset resets all settings to their default values.
"""
x = stub()
configure(processors=[x], context_class=dict, wrapper_class=Wrapper)
structlog.reset_defaults()
b = proxy.bind()
assert [x] != b._processors
assert _BUILTIN_DEFAULT_PROCESSORS == b._processors
assert isinstance(b, _BUILTIN_DEFAULT_WRAPPER_CLASS)
assert _BUILTIN_DEFAULT_CONTEXT_CLASS == b._context.__class__
assert _BUILTIN_DEFAULT_LOGGER_FACTORY is _CONFIG.logger_factory
def test_just_processors(self, proxy):
"""
It's possible to only configure processors.
"""
x = stub()
configure(processors=[x])
b = proxy.bind()
assert [x] == b._processors
assert _BUILTIN_DEFAULT_PROCESSORS != b._processors
assert _BUILTIN_DEFAULT_CONTEXT_CLASS == b._context.__class__
def test_just_context_class(self, proxy):
"""
It's possible to only configure the context class.
"""
configure(context_class=dict)
b = proxy.bind()
assert dict is b._context.__class__
assert _BUILTIN_DEFAULT_PROCESSORS == b._processors
def test_configure_sets_is_configured(self):
"""
After configure() is_configured() returns True.
"""
assert False is _CONFIG.is_configured
configure()
assert True is _CONFIG.is_configured
def test_configures_logger_factory(self):
"""
It's possible to configure the logger factory.
"""
def f():
pass
configure(logger_factory=f)
assert f is _CONFIG.logger_factory
| TestConfigure |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 29455,
"end": 29591
} | class ____(BaseModel):
ti_id: UUID
type: Literal["ValidateInletsAndOutlets"] = "ValidateInletsAndOutlets"
| ValidateInletsAndOutlets |
python | numba__numba | numba/cuda/tests/nocuda/test_library_lookup.py | {
"start": 555,
"end": 2285
} | class ____(SerialMixin, unittest.TestCase):
def setUp(self):
ctx = mp.get_context('spawn')
qrecv = ctx.Queue()
qsend = ctx.Queue()
self.qsend = qsend
self.qrecv = qrecv
self.child_process = ctx.Process(
target=check_lib_lookup,
args=(qrecv, qsend),
daemon=True,
)
self.child_process.start()
def tearDown(self):
self.qsend.put(self.do_terminate)
self.child_process.join(3)
# Ensure the process is terminated
self.assertIsNotNone(self.child_process)
def remote_do(self, action):
self.qsend.put(action)
out = self.qrecv.get()
self.assertNotIsInstance(out, BaseException)
return out
@staticmethod
def do_terminate():
return False, None
def remove_env(name):
try:
del os.environ[name]
except KeyError:
return False
else:
return True
def check_lib_lookup(qout, qin):
status = True
while status:
try:
action = qin.get()
except Exception as e:
qout.put(e)
status = False
else:
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", NumbaWarning)
status, result = action()
qout.put(result + (w,))
except Exception as e:
qout.put(e)
status = False
@skip_on_cudasim('Library detection unsupported in the simulator')
@unittest.skipUnless(has_mp_get_context, 'mp.get_context not available')
@skip_unless_conda_cudatoolkit('test assumes conda installed cudatoolkit')
| LibraryLookupBase |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/dagster/asset_versioning_and_caching/observable_source_asset_path_with_io_managers.py | {
"start": 248,
"end": 2080
} | class ____(dg.IOManager):
def __init__(self, root_dir: str):
self.root_dir = root_dir
@staticmethod
def with_directory(root_dir: str):
mkdir_p(root_dir)
return NumberTextFileIOManager(root_dir=root_dir)
def load_input(self, context: "dg.InputContext") -> int:
asset_key_str = context.asset_key.to_user_string()
full_path = os.path.join(self.root_dir, f"{asset_key_str}.txt")
with open(full_path) as ff:
return int(ff.read())
def handle_output(self, context: "dg.OutputContext", obj: int) -> None:
# without writing a custom input manager and setting the key on if from the dg.asset in
# this function gets called by the observable source dg.asset
# dagster._core.errors.DagsterInvariantViolationError:
# Attempting to access asset_key, but it was not provided when constructing the dg.OutputContext
# Even when you override the io_manager_key on the source dg.asset, this is still called
# The other option is to use the default i/o manager for the source dg.asset
# to that it writes pickled None somewhere and then use a custom i/o
# manager key for all the other assets. Both are pretty bad.
if context.op_def.name == "input_number":
return
asset_key_str = context.asset_key.to_user_string()
full_path = os.path.join(self.root_dir, f"{asset_key_str}.txt")
with open(full_path, "w") as ff:
ff.write(str(obj))
def sha256_digest_from_str(string: str) -> str:
hash_sig = sha256()
hash_sig.update(bytearray(string, "utf8"))
return hash_sig.hexdigest()
FILE_PATH = dg.file_relative_path(__file__, "input_number.txt")
# knows how to load file that is dropped somewhere by an external process
| NumberTextFileIOManager |
python | python-openxml__python-docx | src/docx/image/tiff.py | {
"start": 8196,
"end": 8663
} | class ____(_IfdEntry):
"""IFD entry having the form of a NULL-terminated ASCII string."""
@classmethod
def _parse_value(cls, stream_rdr, offset, value_count, value_offset):
"""Return the ASCII string parsed from `stream_rdr` at `value_offset`.
The length of the string, including a terminating '\x00' (NUL) character, is in
`value_count`.
"""
return stream_rdr.read_str(value_count - 1, value_offset)
| _AsciiIfdEntry |
python | tornadoweb__tornado | tornado/routing.py | {
"start": 7247,
"end": 7788
} | class ____(Router):
"""Abstract router interface for routers that can handle named routes
and support reversing them to original urls.
"""
def reverse_url(self, name: str, *args: Any) -> Optional[str]:
"""Returns url string for a given route name and arguments
or ``None`` if no match is found.
:arg str name: route name.
:arg args: url parameters.
:returns: parametrized url string for a given route name (or ``None``).
"""
raise NotImplementedError()
| ReversibleRouter |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/buffer.py | {
"start": 9572,
"end": 20000
} | class ____(MutableMapping):
"""
AgentBuffer contains a dictionary of AgentBufferFields. Each agent has his own AgentBuffer.
The keys correspond to the name of the field. Example: state, action
"""
# Whether or not to validate the types of keys at runtime
# This should be off for training, but enabled for testing
CHECK_KEY_TYPES_AT_RUNTIME = False
def __init__(self):
self.last_brain_info = None
self.last_take_action_outputs = None
self._fields: DefaultDict[AgentBufferKey, AgentBufferField] = defaultdict(
AgentBufferField
)
def __str__(self):
return ", ".join([f"'{k}' : {str(self[k])}" for k in self._fields.keys()])
def reset_agent(self) -> None:
"""
Resets the AgentBuffer
"""
for f in self._fields.values():
f.reset_field()
self.last_brain_info = None
self.last_take_action_outputs = None
@staticmethod
def _check_key(key):
if isinstance(key, BufferKey):
return
if isinstance(key, tuple):
key0, key1 = key
if isinstance(key0, ObservationKeyPrefix):
if isinstance(key1, int):
return
raise KeyError(f"{key} has type ({type(key0)}, {type(key1)})")
if isinstance(key0, RewardSignalKeyPrefix):
if isinstance(key1, str):
return
raise KeyError(f"{key} has type ({type(key0)}, {type(key1)})")
raise KeyError(f"{key} is a {type(key)}")
@staticmethod
def _encode_key(key: AgentBufferKey) -> str:
"""
Convert the key to a string representation so that it can be used for serialization.
"""
if isinstance(key, BufferKey):
return key.value
prefix, suffix = key
return f"{prefix.value}:{suffix}"
@staticmethod
def _decode_key(encoded_key: str) -> AgentBufferKey:
"""
Convert the string representation back to a key after serialization.
"""
# Simple case: convert the string directly to a BufferKey
try:
return BufferKey(encoded_key)
except ValueError:
pass
# Not a simple key, so split into two parts
prefix_str, _, suffix_str = encoded_key.partition(":")
# See if it's an ObservationKeyPrefix first
try:
return ObservationKeyPrefix(prefix_str), int(suffix_str)
except ValueError:
pass
# If not, it had better be a RewardSignalKeyPrefix
try:
return RewardSignalKeyPrefix(prefix_str), suffix_str
except ValueError:
raise ValueError(f"Unable to convert {encoded_key} to an AgentBufferKey")
def __getitem__(self, key: AgentBufferKey) -> AgentBufferField:
if self.CHECK_KEY_TYPES_AT_RUNTIME:
self._check_key(key)
return self._fields[key]
def __setitem__(self, key: AgentBufferKey, value: AgentBufferField) -> None:
if self.CHECK_KEY_TYPES_AT_RUNTIME:
self._check_key(key)
self._fields[key] = value
def __delitem__(self, key: AgentBufferKey) -> None:
if self.CHECK_KEY_TYPES_AT_RUNTIME:
self._check_key(key)
self._fields.__delitem__(key)
def __iter__(self):
return self._fields.__iter__()
def __len__(self) -> int:
return self._fields.__len__()
def __contains__(self, key):
if self.CHECK_KEY_TYPES_AT_RUNTIME:
self._check_key(key)
return self._fields.__contains__(key)
def check_length(self, key_list: List[AgentBufferKey]) -> bool:
"""
Some methods will require that some fields have the same length.
check_length will return true if the fields in key_list
have the same length.
:param key_list: The fields which length will be compared
"""
if self.CHECK_KEY_TYPES_AT_RUNTIME:
for k in key_list:
self._check_key(k)
if len(key_list) < 2:
return True
length = None
for key in key_list:
if key not in self._fields:
return False
if (length is not None) and (length != len(self[key])):
return False
length = len(self[key])
return True
def shuffle(
self, sequence_length: int, key_list: List[AgentBufferKey] = None
) -> None:
"""
Shuffles the fields in key_list in a consistent way: The reordering will
be the same across fields.
:param key_list: The fields that must be shuffled.
"""
if key_list is None:
key_list = list(self._fields.keys())
if not self.check_length(key_list):
raise BufferException(
"Unable to shuffle if the fields are not of same length"
)
s = np.arange(len(self[key_list[0]]) // sequence_length)
np.random.shuffle(s)
for key in key_list:
buffer_field = self[key]
tmp: List[np.ndarray] = []
for i in s:
tmp += buffer_field[i * sequence_length : (i + 1) * sequence_length]
buffer_field.set(tmp)
def make_mini_batch(self, start: int, end: int) -> "AgentBuffer":
"""
Creates a mini-batch from buffer.
:param start: Starting index of buffer.
:param end: Ending index of buffer.
:return: Dict of mini batch.
"""
mini_batch = AgentBuffer()
for key, field in self._fields.items():
# slicing AgentBufferField returns a List[Any}
mini_batch[key] = field[start:end] # type: ignore
return mini_batch
def sample_mini_batch(
self, batch_size: int, sequence_length: int = 1
) -> "AgentBuffer":
"""
Creates a mini-batch from a random start and end.
:param batch_size: number of elements to withdraw.
:param sequence_length: Length of sequences to sample.
Number of sequences to sample will be batch_size/sequence_length.
"""
num_seq_to_sample = batch_size // sequence_length
mini_batch = AgentBuffer()
buff_len = self.num_experiences
num_sequences_in_buffer = buff_len // sequence_length
start_idxes = (
np.random.randint(num_sequences_in_buffer, size=num_seq_to_sample)
* sequence_length
) # Sample random sequence starts
for key in self:
buffer_field = self[key]
mb_list = (buffer_field[i : i + sequence_length] for i in start_idxes)
# See comparison of ways to make a list from a list of lists here:
# https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
mini_batch[key].set(list(itertools.chain.from_iterable(mb_list)))
return mini_batch
def save_to_file(self, file_object: BinaryIO) -> None:
"""
Saves the AgentBuffer to a file-like object.
"""
with h5py.File(file_object, "w") as write_file:
for key, data in self.items():
write_file.create_dataset(
self._encode_key(key), data=data, dtype="f", compression="gzip"
)
def load_from_file(self, file_object: BinaryIO) -> None:
"""
Loads the AgentBuffer from a file-like object.
"""
with h5py.File(file_object, "r") as read_file:
for key in list(read_file.keys()):
decoded_key = self._decode_key(key)
self[decoded_key] = AgentBufferField()
# extend() will convert the numpy array's first dimension into list
self[decoded_key].extend(read_file[key][()])
def truncate(self, max_length: int, sequence_length: int = 1) -> None:
"""
Truncates the buffer to a certain length.
This can be slow for large buffers. We compensate by cutting further than we need to, so that
we're not truncating at each update. Note that we must truncate an integer number of sequence_lengths
param: max_length: The length at which to truncate the buffer.
"""
current_length = self.num_experiences
# make max_length an integer number of sequence_lengths
max_length -= max_length % sequence_length
if current_length > max_length:
for _key in self.keys():
self[_key][:] = self[_key][current_length - max_length :]
def resequence_and_append(
self,
target_buffer: "AgentBuffer",
key_list: List[AgentBufferKey] = None,
batch_size: int = None,
training_length: int = None,
) -> None:
"""
Takes in a batch size and training length (sequence length), and appends this AgentBuffer to target_buffer
properly padded for LSTM use. Optionally, use key_list to restrict which fields are inserted into the new
buffer.
:param target_buffer: The buffer which to append the samples to.
:param key_list: The fields that must be added. If None: all fields will be appended.
:param batch_size: The number of elements that must be appended. If None: All of them will be.
:param training_length: The length of the samples that must be appended. If None: only takes one element.
"""
if key_list is None:
key_list = list(self.keys())
if not self.check_length(key_list):
raise BufferException(
f"The length of the fields {key_list} were not of same length"
)
for field_key in key_list:
target_buffer[field_key].extend(
self[field_key].get_batch(
batch_size=batch_size, training_length=training_length
)
)
@property
def num_experiences(self) -> int:
"""
The number of agent experiences in the AgentBuffer, i.e. the length of the buffer.
An experience consists of one element across all of the fields of this AgentBuffer.
Note that these all have to be the same length, otherwise shuffle and append_to_update_buffer
will fail.
"""
if self.values():
return len(next(iter(self.values())))
else:
return 0
| AgentBuffer |
python | ray-project__ray | rllib/core/models/configs.py | {
"start": 16291,
"end": 27462
} | class ____(ModelConfig):
"""Configuration for a convolutional transpose head (decoder) network.
The configured Model transforms 1D-observations into an image space.
The stack of layers is composed of an initial Dense layer, followed by a sequence
of Conv2DTranspose layers.
`input_dims` describes the shape of the (1D) input tensor,
`initial_image_dims` describes the input into the first Conv2DTranspose
layer, where the translation from `input_dim` to `initial_image_dims` is done
via the initial Dense layer (w/o activation, w/o layer-norm, and w/ bias).
Beyond that, each layer specified by `cnn_transpose_filter_specifiers`
is followed by an activation function according to `cnn_transpose_activation`.
`output_dims` is reached after the final Conv2DTranspose layer.
Not that the last Conv2DTranspose layer is never activated and never layer-norm'd
regardless of the other settings.
An example for a single conv2d operation is as follows:
Input "image" is (4, 4, 24) (not yet strided), padding is "same", stride=2,
kernel=5.
First, the input "image" is strided (with stride=2):
Input image (4x4 (x24)):
A B C D
E F G H
I J K L
M N O P
Stride with stride=2 -> (7x7 (x24))
A 0 B 0 C 0 D
0 0 0 0 0 0 0
E 0 F 0 G 0 H
0 0 0 0 0 0 0
I 0 J 0 K 0 L
0 0 0 0 0 0 0
M 0 N 0 O 0 P
Then this strided "image" (strided_size=7x7) is padded (exact padding values will be
computed by the model):
Padding -> (left=3, right=2, top=3, bottom=2)
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 A 0 B 0 C 0 D 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 E 0 F 0 G 0 H 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 I 0 J 0 K 0 L 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 M 0 N 0 O 0 P 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
Then deconvolution with kernel=5 yields an output "image" of 8x8 (x num output
filters).
Attributes:
input_dims: The input dimensions of the network. This must be a 1D tensor.
initial_image_dims: The shape of the input to the first
Conv2DTranspose layer. We will make sure the input is transformed to
these dims via a preceding initial Dense layer, followed by a reshape,
before entering the Conv2DTranspose stack.
initial_dense_weights_initializer: The initializer function or class to use for
weight initialization in the initial dense layer. If `None` the default
initializer of the respective dense layer of a framework (`"torch"` or
`"tf2"`) is used. Note, all initializers defined in the framework `"tf2`)
are allowed. For `"torch"` only the in-place initializers, i.e. ending with
an underscore "_" are allowed.
initial_dense_weights_initializer_config: Configuration to pass into the
initializer defined in `initial_dense_weights_initializer`.
initial_dense_bias_initializer: The initializer function or class to use for
bias initialization in the initial dense layer. If `None` the default
initializer of the respective CNN layer of a framework (`"torch"` or `"tf2"`
) is used. For `"torch"` only the in-place initializers, i.e. ending with an
underscore "_" are allowed.
initial_dense_bias_initializer_config: Configuration to pass into the
initializer defined in `initial_dense_bias_initializer`.
cnn_transpose_filter_specifiers: A list of lists, where each element of an inner
list contains elements of the form
`[number of channels/filters, [kernel width, kernel height], stride]` to
specify a convolutional layer stacked in order of the outer list.
cnn_transpose_use_bias: Whether to use bias on all Conv2DTranspose layers.
cnn_transpose_activation: The activation function to use after each layer
(except for the output).
cnn_transpose_use_layernorm: Whether to insert a LayerNorm functionality
in between each Conv2DTranspose layer's output and its activation.
cnn_transpose_kernel_initializer: The initializer function or class to use for
kernel initialization in the CNN layers. If `None` the default initializer
of the respective CNN layer of a framework (`"torch"` or `"tf2"`) is used.
Note, all initializers defined in the framework `"tf2`) are allowed. For
`"torch"` only the in-place initializers, i.e. ending with an underscore "_"
are allowed.
cnn_transpose_kernel_initializer_config: Configuration to pass into the
initializer defined in `cnn_transpose_kernel_initializer`.
cnn_transpose_bias_initializer: The initializer function or class to use for
bias initialization in the CNN layers. If `None` the default initializer of
the respective CNN layer of a framework (`"torch"` or `"tf2"`) is used.
For `"torch"` only the in-place initializers, i.e. ending with an underscore
"_" are allowed.
cnn_transpose_bias_initializer_config: Configuration to pass into the
initializer defined in `cnn_transpose_bias_initializer`.
Example:
.. testcode::
:skipif: True
# Configuration:
config = CNNTransposeHeadConfig(
input_dims=[10], # 1D input vector (possibly coming from another NN)
initial_image_dims=[4, 4, 96], # first image input to deconv stack
# Initializer for TensorFlow.
initial_dense_weights_initializer="HeNormal",
initial_dense_weights_initializer={"seed": 334},
cnn_transpose_filter_specifiers=[
[48, [4, 4], 2],
[24, [4, 4], 2],
[3, [4, 4], 2],
],
cnn_transpose_activation="silu", # or "swish", which is the same
cnn_transpose_use_layernorm=False,
cnn_use_bias=True,
)
model = config.build(framework="torch)
# Resulting stack in pseudocode:
# Linear(10, 4*4*24)
# Conv2DTranspose(
# in_channels=96, out_channels=48,
# kernel_size=[4, 4], stride=2, bias=True,
# )
# Swish()
# Conv2DTranspose(
# in_channels=48, out_channels=24,
# kernel_size=[4, 4], stride=2, bias=True,
# )
# Swish()
# Conv2DTranspose(
# in_channels=24, out_channels=3,
# kernel_size=[4, 4], stride=2, bias=True,
# )
Example:
.. testcode::
:skipif: True
# Configuration:
config = CNNTransposeHeadConfig(
input_dims=[128], # 1D input vector (possibly coming from another NN)
initial_image_dims=[4, 4, 32], # first image input to deconv stack
cnn_transpose_filter_specifiers=[
[16, 4, 2],
[3, 4, 2],
],
cnn_transpose_activation="relu",
cnn_transpose_use_layernorm=True,
cnn_use_bias=False,
# Initializer for `framework="tf2"`.
# Note, for Torch only in-place initializers are allowed.
cnn_transpose_kernel_initializer="xavier_normal_",
cnn_transpose_kernel_initializer_config={"gain": 0.8},
)
model = config.build(framework="torch)
# Resulting stack in pseudocode:
# Linear(128, 4*4*32, bias=True) # bias always True for initial dense layer
# Conv2DTranspose(
# in_channels=32, out_channels=16,
# kernel_size=[4, 4], stride=2, bias=False,
# )
# LayerNorm((-3, -2, -1)) # layer normalize over last 3 axes
# ReLU()
# Conv2DTranspose(
# in_channels=16, out_channels=3,
# kernel_size=[4, 4], stride=2, bias=False,
# )
"""
input_dims: Union[List[int], Tuple[int, ...]] = None
initial_image_dims: Union[List[int], Tuple[int, ...]] = field(
default_factory=lambda: [4, 4, 96]
)
initial_dense_weights_initializer: Optional[Union[str, Callable]] = None
initial_dense_weights_initializer_config: Optional[Dict] = None
initial_dense_bias_initializer: Optional[Union[str, Callable]] = None
initial_dense_bias_initializer_config: Optional[Dict] = None
cnn_transpose_filter_specifiers: List[List[Union[int, List[int]]]] = field(
default_factory=lambda: [[48, [4, 4], 2], [24, [4, 4], 2], [3, [4, 4], 2]]
)
cnn_transpose_use_bias: bool = True
cnn_transpose_activation: str = "relu"
cnn_transpose_use_layernorm: bool = False
cnn_transpose_kernel_initializer: Optional[Union[str, Callable]] = None
cnn_transpose_kernel_initializer_config: Optional[Dict] = None
cnn_transpose_bias_initializer: Optional[Union[str, Callable]] = None
cnn_transpose_bias_initializer_config: Optional[Dict] = None
@property
def output_dims(self):
# Infer output dims, layer by layer.
dims = self.initial_image_dims
for filter_spec in self.cnn_transpose_filter_specifiers:
# Same padding.
num_filters, kernel, stride = filter_spec
# Compute stride output size first (striding is performed first in a
# conv transpose layer.
stride_w, stride_h = (stride, stride) if isinstance(stride, int) else stride
dims = [
dims[0] * stride_w - (stride_w - 1),
dims[1] * stride_h - (stride_h - 1),
num_filters,
]
# TODO (Sven): Support "valid" padding for Conv2DTranspose layers, too.
# Analogous to Conv2D Layers in a CNNEncoder.
# Apply the correct padding. Note that this might be asymetrical, meaning
# left padding might be != right padding, same for top/bottom.
_, padding_out_size = same_padding_transpose_after_stride(
(dims[0], dims[1]), kernel, stride
)
# Perform conv transpose operation with the kernel.
kernel_w, kernel_h = (kernel, kernel) if isinstance(kernel, int) else kernel
dims = [
padding_out_size[0] - (kernel_w - 1),
padding_out_size[1] - (kernel_h - 1),
num_filters,
]
return tuple(dims)
def _validate(self, framework: str = "torch"):
if len(self.input_dims) != 1:
raise ValueError(
f"`input_dims` ({self.input_dims}) of CNNTransposeHeadConfig must be a "
"3D tensor (image-like) with the dimensions meaning: width x height x "
"num_filters, e.g. `[4, 4, 92]`!"
)
@_framework_implemented()
def build(self, framework: str = "torch") -> "Model":
self._validate(framework)
if framework == "torch":
from ray.rllib.core.models.torch.heads import TorchCNNTransposeHead
return TorchCNNTransposeHead(self)
@ExperimentalAPI
@dataclass
| CNNTransposeHeadConfig |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 77908,
"end": 78052
} | class ____:
xlOLEControl = 2 # from enum XlOLEType
xlOLEEmbed = 1 # from enum XlOLEType
xlOLELink = 0 # from enum XlOLEType
| OLEType |
python | plotly__plotly.py | plotly/graph_objs/scatter/hoverlabel/_font.py | {
"start": 233,
"end": 17143
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter.hoverlabel"
_path_str = "scatter.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | doocs__leetcode | solution/2700-2799/2748.Number of Beautiful Pairs/Solution.py | {
"start": 0,
"end": 310
} | class ____:
def countBeautifulPairs(self, nums: List[int]) -> int:
cnt = [0] * 10
ans = 0
for x in nums:
for y in range(10):
if cnt[y] and gcd(x % 10, y) == 1:
ans += cnt[y]
cnt[int(str(x)[0])] += 1
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 146717,
"end": 147701
} | class ____(nn.Module):
def __init__(self, ratio=2, kernel_size=None):
super().__init__()
cutoff = 0.5 / ratio
half_width = 0.6 / ratio
if cutoff < 0.0:
raise ValueError("Minimum cutoff must be larger than zero.")
if cutoff > 0.5:
raise ValueError("A cutoff above 0.5 does not make sense.")
self.even = kernel_size % 2 == 0
self.pad_left = kernel_size // 2 - int(self.even)
self.pad_right = kernel_size // 2
self.stride = ratio
filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
self.register_buffer("filter", filter, persistent=False)
def forward(self, hidden_states):
channels = hidden_states.shape[1]
hidden_states = F.pad(hidden_states, (self.pad_left, self.pad_right), mode="replicate")
out = F.conv1d(hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels)
return out
| DownSample1d |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_match_xml_schema.py | {
"start": 2520,
"end": 8008
} | class ____(ColumnMapExpectation):
"""Expect column entries to be XML documents matching a given [XMLSchema](https://en.wikipedia.org/wiki/XML_schema).
expect_column_values_to_match_xml_schema is a \
[Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).
Args:
column (str): \
The column name.
xml_schema (str): \
The XMLSchema name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[expect_column_values_to_be_xml_parseable](https://greatexpectations.io/expectations/expect_column_values_to_be_xml_parseable)
[The XMLSchema docs](https://www.w3.org/XML/Schema)
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [{"data": {}, "tests": []}]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": ["xml", "glam"],
"contributors": ["@mielvds"],
"requirements": ["lxml"],
}
map_metric = "column_values.match_xml_schema"
success_keys = (
"xml_schema",
"mostly",
)
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"catch_exceptions": True,
}
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
_ = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "xml_schema", "row_condition", "condition_parser"],
)
if not params.get("xml_schema"):
template_str = "values must match a XML Schema but none was specified."
else:
params["formatted_xml"] = (
"<pre>"
+ etree.tostring(params.get("xml_schema"), pretty_print=True)
+ "</pre>" # TODO:
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str = "values must match the following XML Schema, at least $mostly_pct % of the time: $formatted_xml"
else:
template_str = "values must match the following XML Schema: $formatted_xml"
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": {"params": {"formatted_xml": {"classes": []}}},
},
}
)
]
if __name__ == "__main__":
ExpectColumnValuesToMatchXmlSchema().print_diagnostic_checklist()
| ExpectColumnValuesToMatchXmlSchema |
python | mlflow__mlflow | mlflow/gateway/providers/palm.py | {
"start": 369,
"end": 7990
} | class ____(BaseProvider):
NAME = "PaLM"
CONFIG_TYPE = PaLMConfig
def __init__(self, config: EndpointConfig) -> None:
super().__init__(config)
warnings.warn(
"PaLM provider is deprecated and will be removed in a future MLflow version.",
category=FutureWarning,
stacklevel=2,
)
if config.model.config is None or not isinstance(config.model.config, PaLMConfig):
raise TypeError(f"Unexpected config type {config.model.config}")
self.palm_config: PaLMConfig = config.model.config
async def _request(self, path: str, payload: dict[str, Any]) -> dict[str, Any]:
headers = {"x-goog-api-key": self.palm_config.palm_api_key}
return await send_request(
headers=headers,
base_url="https://generativelanguage.googleapis.com/v1beta3/models/",
path=path,
payload=payload,
)
async def chat(self, payload: chat.RequestPayload) -> chat.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
if "max_tokens" in payload or "maxOutputTokens" in payload:
raise AIGatewayException(
status_code=422, detail="Max tokens is not supported for PaLM chat."
)
key_mapping = {
"stop": "stopSequences",
"n": "candidateCount",
}
for k1, k2 in key_mapping.items():
if k2 in payload:
raise AIGatewayException(
status_code=422, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
payload = rename_payload_keys(payload, key_mapping)
# The range of PaLM's temperature is 0-1, but ours is 0-2, so we halve it
payload["temperature"] = 0.5 * payload["temperature"]
# Replace 'role' with 'author' in payload
for m in payload["messages"]:
m["author"] = m.pop("role")
# Map 'messages', 'examples, and 'context' to 'prompt'
prompt = {"messages": payload.pop("messages")}
if "examples" in payload:
prompt["examples"] = payload.pop("examples")
if "context" in payload:
prompt["context"] = payload.pop("context")
payload["prompt"] = prompt
resp = await self._request(
f"{self.config.model.name}:generateMessage",
payload,
)
# Response example
# (https://developers.generativeai.google/api/rest/generativelanguage/models/generateMessage)
# ```
# {
# "candidates": [
# {
# "author": "1",
# "content": "Hi there! How can I help you today?"
# }
# ],
# "messages": [
# {
# "author": "0",
# "content": "hi"
# }
# ]
# }
# ```
return chat.ResponsePayload(
created=int(time.time()),
model=self.config.model.name,
choices=[
chat.Choice(
index=idx,
message=chat.ResponseMessage(role=c["author"], content=c["content"]),
finish_reason=None,
)
for idx, c in enumerate(resp["candidates"])
],
usage=chat.ChatUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
key_mapping = {
"stop": "stopSequences",
"n": "candidateCount",
"max_tokens": "maxOutputTokens",
}
for k1, k2 in key_mapping.items():
if k2 in payload:
raise AIGatewayException(
status_code=422, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
payload = rename_payload_keys(payload, key_mapping)
# The range of PaLM's temperature is 0-1, but ours is 0-2, so we halve it
payload["temperature"] = 0.5 * payload["temperature"]
payload["prompt"] = {"text": payload["prompt"]}
resp = await self._request(
f"{self.config.model.name}:generateText",
payload,
)
# Response example (https://developers.generativeai.google/api/rest/generativelanguage/models/generateText)
# ```
# {
# "candidates": [
# {
# "output": "Once upon a time, there was a young girl named Lily...",
# "safetyRatings": [
# {
# "category": "HARM_CATEGORY_DEROGATORY",
# "probability": "NEGLIGIBLE"
# }, ...
# ]
# {
# "output": "Once upon a time, there was a young boy named Billy...",
# "safetyRatings": [
# ...
# ]
# }
# ]
# }
# ```
return completions.ResponsePayload(
created=int(time.time()),
object="text_completion",
model=self.config.model.name,
choices=[
completions.Choice(
index=idx,
text=c["output"],
finish_reason=None,
)
for idx, c in enumerate(resp["candidates"])
],
usage=completions.CompletionsUsage(
prompt_tokens=None,
completion_tokens=None,
total_tokens=None,
),
)
async def embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
key_mapping = {
"input": "texts",
}
for k1, k2 in key_mapping.items():
if k2 in payload:
raise AIGatewayException(
status_code=422, detail=f"Invalid parameter {k2}. Use {k1} instead."
)
payload = rename_payload_keys(payload, key_mapping)
resp = await self._request(
f"{self.config.model.name}:batchEmbedText",
payload,
)
# Batch-text response example (https://developers.generativeai.google/api/rest/generativelanguage/models/batchEmbedText):
# ```
# {
# "embeddings": [
# {
# "value": [
# 3.25,
# 0.7685547,
# 2.65625,
# ...
# -0.30126953,
# -2.3554688,
# 1.2597656
# ]
# }
# ]
# }
# ```
return embeddings.ResponsePayload(
data=[
embeddings.EmbeddingObject(
embedding=embedding["value"],
index=idx,
)
for idx, embedding in enumerate(resp["embeddings"])
],
model=self.config.model.name,
usage=embeddings.EmbeddingsUsage(
prompt_tokens=None,
total_tokens=None,
),
)
| PaLMProvider |
python | automl__auto-sklearn | autosklearn/data/abstract_data_manager.py | {
"start": 189,
"end": 1974
} | class ____:
__metaclass__ = abc.ABCMeta
def __init__(self, name: str):
self._data = dict() # type: Dict
self._info = dict() # type: Dict
self._name = name
@property
def name(self) -> str:
return self._name
@property
def data(self) -> Dict[str, np.ndarray]:
return self._data
@property
def info(self) -> Dict[str, Any]:
return self._info
@property
def feat_type(self) -> Dict[Union[str, int], str]:
return self._feat_type
@feat_type.setter
def feat_type(self, value: Dict[Union[str, int], str]) -> None:
self._feat_type = value
@property
def encoder(self) -> FeatTypeSplit:
return self._encoder
@encoder.setter
def encoder(self, value: FeatTypeSplit) -> FeatTypeSplit:
self._encoder = value
def __repr__(self) -> str:
return "DataManager : " + self.name
def __str__(self) -> str:
val = "DataManager : " + self.name + "\ninfo:\n"
for item in self.info:
val = val + "\t" + item + " = " + str(self.info[item]) + "\n"
val = val + "data:\n"
for subset in self.data:
val = val + "\t%s = %s %s %s\n" % (
subset,
type(self.data[subset]),
str(self.data[subset].shape),
str(self.data[subset].dtype),
)
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + "\tdensity: %f\n" % (
float(len(self.data[subset].data))
/ self.data[subset].shape[0]
/ self.data[subset].shape[1]
)
val = val + "feat_type:\t" + str(self.feat_type) + "\n"
return val
| AbstractDataManager |
python | pytorch__pytorch | test/inductor/test_config.py | {
"start": 500,
"end": 12187
} | class ____(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._saved_config = config.save_config()
def tearDown(self):
super().tearDown()
config.load_config(self._saved_config)
def test_set(self):
config.max_fusion_size = 13337
self.assertEqual(config.max_fusion_size, 13337)
self.assertEqual(config.get_config_copy()["max_fusion_size"], 13337)
config.max_fusion_size = 32
self.assertEqual(config.max_fusion_size, 32)
# a nested config
prior = config.triton.cudagraphs
config.triton.cudagraphs = not prior
self.assertEqual(config.triton.cudagraphs, not prior)
self.assertEqual(config.get_config_copy()["triton.cudagraphs"], not prior)
def test_save_load(self):
config.max_fusion_size = 123
config.triton.cudagraphs = True
saved1 = config.save_config()
config.max_fusion_size = 321
config.triton.cudagraphs = False
saved2 = config.save_config()
self.assertEqual(config.max_fusion_size, 321)
self.assertEqual(config.triton.cudagraphs, False)
config.load_config(saved1)
self.assertEqual(config.max_fusion_size, 123)
self.assertEqual(config.triton.cudagraphs, True)
config.load_config(saved2)
self.assertEqual(config.max_fusion_size, 321)
self.assertEqual(config.triton.cudagraphs, False)
def test_hasattr(self):
self.assertTrue(hasattr(config, "max_fusion_size"))
self.assertFalse(hasattr(config, "missing_name"))
def test_invalid_names(self):
self.assertRaises(AttributeError, lambda: config.does_not_exist)
self.assertRaises(AttributeError, lambda: config.triton.does_not_exist)
def store1():
config.does_not_exist = True
def store2():
config.triton.does_not_exist = True
self.assertRaises(AttributeError, store1)
self.assertRaises(AttributeError, store2)
def test_patch(self):
with config.patch(max_fusion_size=456):
self.assertEqual(config.max_fusion_size, 456)
with config.patch(max_fusion_size=789):
self.assertEqual(config.max_fusion_size, 789)
self.assertEqual(config.max_fusion_size, 456)
with config.patch({"cpp.threads": 9000, "max_fusion_size": 9001}):
self.assertEqual(config.cpp.threads, 9000)
self.assertEqual(config.max_fusion_size, 9001)
with config.patch("cpp.threads", 8999):
self.assertEqual(config.cpp.threads, 8999)
self.assertEqual(config.cpp.threads, 9000)
@unittest.skipIf(not HAS_CPU, "requires C++ compiler")
def test_compile_api(self):
# these are mostly checking config processing doesn't blow up with exceptions
x = torch.randn(8)
y = dummy_fn(x)
checks = [
{},
{"mode": "default"},
{"mode": "reduce-overhead"},
{"mode": "max-autotune"},
{
"options": {
"max-fusion-size": 128,
"unroll_reductions_threshold": 32,
"triton.cudagraphs": False,
}
},
{"dynamic": True},
{"fullgraph": True, "backend": "inductor"},
{"disable": True},
]
for kwargs in checks:
torch._dynamo.reset()
opt_fn = torch.compile(dummy_fn, **kwargs)
torch.testing.assert_close(
opt_fn(x), y, msg=f"torch.compile(..., **{kwargs!r}) failed"
)
def test_get_compiler_config(self):
from torch._inductor import config as inductor_default_config
default_cudagraphs = inductor_default_config.triton.cudagraphs
# nn.Module: should update default config with a new value
model = DummyModule()
optimized_module = torch.compile(
model, options={"triton.cudagraphs": not default_cudagraphs}
)
compiler_config = optimized_module.get_compiler_config()
self.assertEqual(compiler_config["triton.cudagraphs"], not default_cudagraphs)
# nn.Module: keep default config
model = DummyModule()
optimized_module = torch.compile(model)
compiler_config = optimized_module.get_compiler_config()
self.assertEqual(
compiler_config["triton.cudagraphs"],
default_cudagraphs,
)
# compile user func: should update default config with a new value
optimized_module = torch.compile(
dummy_fn, options={"triton.cudagraphs": not default_cudagraphs}
)
compiler_config = optimized_module.get_compiler_config()
self.assertEqual(compiler_config["triton.cudagraphs"], not default_cudagraphs)
# compile user func: keep default config
optimized_module = torch.compile(dummy_fn)
compiler_config = optimized_module.get_compiler_config()
self.assertEqual(
compiler_config["triton.cudagraphs"],
default_cudagraphs,
)
# backend=eager: expect None
optimized_module = torch.compile(dummy_fn, backend="eager")
compiler_config = optimized_module.get_compiler_config()
self.assertTrue(compiler_config is None)
def test_compile_api_passes_config(self):
# ensure configs are actually passed down to inductor
self.assertRaises(
torch._dynamo.exc.BackendCompilerFailed,
lambda: torch.compile(dummy_fn, options={"_raise_error_for_testing": True})(
torch.randn(10)
),
)
def test_api_options(self):
reduce_overhead_opts = torch._inductor.list_mode_options("reduce-overhead")
self.assertEqual(reduce_overhead_opts["triton.cudagraphs"], True)
self.assertEqual(reduce_overhead_opts.get("max_autotune", False), False)
max_autotune_opts = torch._inductor.list_mode_options("max-autotune")
self.assertEqual(max_autotune_opts["max_autotune"], True)
self.assertEqual(max_autotune_opts["triton.cudagraphs"], True)
max_autotune_opts = torch._inductor.list_mode_options(
"max-autotune", dynamic=True
)
self.assertEqual(max_autotune_opts["max_autotune"], True)
self.assertEqual(max_autotune_opts["triton.cudagraphs"], True)
max_autotune_no_cudagraphs_opts = torch._inductor.list_mode_options(
"max-autotune-no-cudagraphs"
)
self.assertEqual(max_autotune_no_cudagraphs_opts["max_autotune"], True)
self.assertEqual(
max_autotune_no_cudagraphs_opts.get("triton.cudagraphs", False), False
)
def test_invalid_backend(self):
self.assertRaises(
torch._dynamo.exc.InvalidBackend,
lambda: torch.compile(dummy_fn, backend="does_not_exist")(torch.randn(10)),
)
def test_non_inductor_backend(self):
def assert_options(expected_mode=None, expected_options=None):
def backend(gm, _, *, mode=None, options=None):
nonlocal call_count
self.assertEqual(mode, expected_mode)
self.assertEqual(options, expected_options)
call_count += 1
return gm
return backend
inp = torch.randn(8)
def fn(x):
return x + 1
for mode, options in [
(None, None),
("fast-mode", None),
(None, {"foo": "bar"}),
]:
call_count = 0
torch.compile(
fn, backend=assert_options(mode, options), mode=mode, options=options
)(inp)
torch._dynamo.reset()
self.assertEqual(call_count, 1)
def test_codegen_skips_custom_passes(self):
class _CustomPass(PatternMatcherPass):
def __init__(self) -> None:
super().__init__()
def __call__(self, g: torch.fx.Graph):
self.apply(g)
g = _CustomPass()
with torch._inductor.config.patch(
post_grad_custom_post_pass=g,
post_grad_custom_pre_pass=g,
):
code = torch._inductor.config.codegen_config()
self.assertNotIn("post_grad_custom", code)
def test_select_decomp_table_fallback_embedding_bag_byte_unpack(self):
"""Test that select_decomp_table removes embedding_bag_byte_unpack when fallback is enabled"""
from torch._inductor.decomposition import select_decomp_table
# Test with fallback_embedding_bag_byte_unpack = False (default)
with config.patch(fallback_embedding_bag_byte_unpack=False):
decomp_table = select_decomp_table()
# The operation should be in decompositions when fallback is False
# Note: We check if it's in the fast_random_decomps() or decompositions table
self.assertTrue(
torch.ops.quantized.embedding_bag_byte_unpack.default in decomp_table
or len(decomp_table)
> 0 # fast_random_decomps() is used when fallback is False
)
# Test with fallback_embedding_bag_byte_unpack = True
with config.patch(fallback_embedding_bag_byte_unpack=True):
decomp_table = select_decomp_table()
# The operation should NOT be in decompositions when fallback is True
self.assertNotIn(
torch.ops.quantized.embedding_bag_byte_unpack.default, decomp_table
)
@unittest.skipIf(not HAS_TRITON, "requires triton")
def test_options_do_something(self):
"""
Verify that we can populate and load functions from the cache.
"""
counters.clear()
def fn(x, y):
yy = y @ y
return x * 2 + yy.view(25)
def fn2(x, y):
yy = y @ y
return x * 2 + yy.view(25)
a_orig = torch.rand(25, dtype=torch.float32, device="cpu")
b_orig = torch.rand(5, 5, dtype=torch.float32, device="cpu")
compiled_fn = torch.compile(
fn,
options={
"fx_graph_cache": True,
"fx_graph_remote_cache": False,
"bundle_triton_into_fx_graph_cache": True,
},
)
a1 = a_orig.clone()
b1 = b_orig.clone()
a2 = a_orig.clone()
b2 = b_orig.clone()
# A first call should miss in the cache.
eager_result = fn(a1, b1)
compiled_result = compiled_fn(a2, b2)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
counters.clear()
compiled_fn2 = torch.compile(
fn2,
options={
"fx_graph_cache": False,
"fx_graph_remote_cache": False,
"bundle_triton_into_fx_graph_cache": False,
},
)
# A first call should do nothing since cache is disabled
eager_result = fn2(a1, b1)
compiled_result = compiled_fn2(a2, b2)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
if __name__ == "__main__":
run_tests()
| TestInductorConfig |
python | celery__celery | celery/canvas.py | {
"start": 3743,
"end": 7653
} | class ____(metaclass=ABCMeta):
"""Stamping API. A class that provides a stamping API possibility for
canvas primitives. If you want to implement stamping behavior for
a canvas primitive override method that represents it.
"""
def on_group_start(self, group, **headers) -> dict:
"""Method that is called on group stamping start.
Arguments:
group (group): Group that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
def on_group_end(self, group, **headers) -> None:
"""Method that is called on group stamping end.
Arguments:
group (group): Group that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
"""
pass
def on_chain_start(self, chain, **headers) -> dict:
"""Method that is called on chain stamping start.
Arguments:
chain (chain): Chain that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
def on_chain_end(self, chain, **headers) -> None:
"""Method that is called on chain stamping end.
Arguments:
chain (chain): Chain that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
"""
pass
@abstractmethod
def on_signature(self, sig, **headers) -> dict:
"""Method that is called on signature stamping.
Arguments:
sig (Signature): Signature that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
def on_chord_header_start(self, sig, **header) -> dict:
"""Method that is called on сhord header stamping start.
Arguments:
sig (chord): chord that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
if not isinstance(sig.tasks, group):
sig.tasks = group(sig.tasks)
return self.on_group_start(sig.tasks, **header)
def on_chord_header_end(self, sig, **header) -> None:
"""Method that is called on сhord header stamping end.
Arguments:
sig (chord): chord that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
"""
self.on_group_end(sig.tasks, **header)
def on_chord_body(self, sig, **header) -> dict:
"""Method that is called on chord body stamping.
Arguments:
sig (chord): chord that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
def on_callback(self, callback, **header) -> dict:
"""Method that is called on callback stamping.
Arguments:
callback (Signature): callback that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
def on_errback(self, errback, **header) -> dict:
"""Method that is called on errback stamping.
Arguments:
errback (Signature): errback that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
@abstract.CallableSignature.register
| StampingVisitor |
python | bokeh__bokeh | src/bokeh/models/annotations/labels.py | {
"start": 9667,
"end": 11330
} | class ____(TextAnnotation):
''' Render a single title box as an annotation.
See :ref:`ug_basic_annotations_titles` for information on plotting titles.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
vertical_align = Enum(VerticalAlign, default='bottom', help="""
Alignment of the text in its enclosing space, *across* the direction of the text.
""")
align = Enum(TextAlign, default='left', help="""
Alignment of the text in its enclosing space, *along* the direction of the text.
""")
offset = Float(default=0, help="""
Offset the text by a number of pixels (can be positive or negative). Shifts the text in
different directions based on the location of the title:
* above: shifts title right
* right: shifts title down
* below: shifts title right
* left: shifts title up
""")
standoff = Float(default=10, help="""
""")
text_font_size = Override(default="13px")
text_font_style = Override(default="bold")
text_line_height = Override(default=1.0)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Title |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 8038,
"end": 8342
} | class ____:
target_E = 0.292579
solution = [0, 1.253131828927371]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = cos(sin(abs(x[0]**2 - x[1]**2)))**2 - 0.5
den = (1+0.001*(x[0]**2 + x[1]**2))**2
return 0.5 + num / den
| Schaffer4 |
python | dask__dask | dask/dataframe/dask_expr/_collection.py | {
"start": 8656,
"end": 95143
} | class ____(DaskMethodsMixin):
"""Base class for Expr-backed Collections"""
__dask_scheduler__ = staticmethod(
named_schedulers.get("threads", named_schedulers["sync"])
)
__dask_optimize__ = staticmethod(lambda dsk, keys, **kwargs: dsk)
def __init__(self, expr):
global _WARN_ANNOTATIONS
if _WARN_ANNOTATIONS and (annot := get_annotations()):
_WARN_ANNOTATIONS = False
warnings.warn(
f"Dask annotations {annot} detected. Annotations will be ignored when using query-planning."
)
self._expr = expr
@property
def expr(self) -> expr.Expr:
return self._expr
@property
def _meta(self):
return self.expr._meta
@functools.cached_property
def _meta_nonempty(self):
return meta_nonempty(self._meta)
@property
def divisions(self):
"""
Tuple of ``npartitions + 1`` values, in ascending order, marking the
lower/upper bounds of each partition's index. Divisions allow Dask
to know which partition will contain a given value, significantly
speeding up operations like `loc`, `merge`, and `groupby` by not
having to search the full dataset.
Example: for ``divisions = (0, 10, 50, 100)``, there are three partitions,
where the index in each partition contains values [0, 10), [10, 50),
and [50, 100], respectively. Dask therefore knows ``df.loc[45]``
will be in the second partition.
When every item in ``divisions`` is ``None``, the divisions are unknown.
Most operations can still be performed, but some will be much slower,
and a few may fail.
It is not supported to set ``divisions`` directly. Instead, use ``set_index``,
which sorts and splits the data as needed.
See https://docs.dask.org/en/latest/dataframe-design.html#partitions.
"""
return self.expr.divisions
@property
def npartitions(self):
"""Return number of partitions"""
return self.expr.npartitions
@property
def dtypes(self):
"""Return data types"""
return self.expr._meta.dtypes
@property
def size(self):
"""Size of the Series or DataFrame as a Delayed object.
Examples
--------
>>> series.size # doctest: +SKIP
<dask_expr.expr.Scalar: expr=df.size(), dtype=int64>
"""
return new_collection(self.expr.size)
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
if len(columns) != len(self.columns):
# surface pandas error
self._expr._meta.columns = columns
self._expr = expr.ColumnsSetter(self, columns)
def clear_divisions(self):
"""Forget division information.
This is useful if the divisions are no longer meaningful.
"""
return new_collection(expr.ClearDivisions(self))
def __len__(self):
return new_collection(Len(self)).compute()
@property
def nbytes(self):
raise NotImplementedError("nbytes is not implemented on DataFrame")
def __reduce__(self):
return new_collection, (self._expr,)
def __getitem__(self, other):
if isinstance(other, FrameBase):
if not expr.are_co_aligned(self.expr, other.expr):
return new_collection(expr.FilterAlign(self, other))
return new_collection(self.expr.__getitem__(other.expr))
elif isinstance(other, slice):
from pandas.api.types import is_float_dtype
is_integer_slice = any(
isinstance(i, Integral) for i in (other.start, other.step, other.stop)
)
if (
self.ndim == 2
and is_integer_slice
and (not is_float_dtype(self.index.dtype) or PANDAS_GE_300)
):
return self.iloc[other]
else:
return self.loc[other]
if isinstance(other, np.ndarray) or is_series_like(other):
other = list(other)
elif isinstance(other, list):
other = other.copy()
elif isinstance(other, np.generic):
other = other.item()
return new_collection(self.expr.__getitem__(other))
def __dask_tokenize__(self):
return type(self).__name__, self._expr._name
def __repr__(self):
data = self._repr_data().to_string(max_rows=5)
_str_fmt = """Dask {klass} Structure:
{data}
Dask Name: {name}, {n_expr}
Expr={expr}"""
if not isinstance(self, Series) and not len(self.columns):
data = data.partition("\n")[-1].replace("Index", "Divisions")
_str_fmt = f"Empty {_str_fmt}"
n_expr = len({e._name for e in self.expr.walk()})
return _str_fmt.format(
klass=self.__class__.__name__,
data=data,
name=key_split(self._name),
n_expr=maybe_pluralize(n_expr, "expression"),
expr=self.expr,
)
def __bool__(self):
raise ValueError(
f"The truth value of a {self.__class__.__name__} is ambiguous. "
"Use a.any() or a.all()."
)
def __array__(self, dtype=None, **kwargs):
return np.array(self.compute())
def persist(self, fuse=True, **kwargs):
out = self.optimize(fuse=fuse)
return DaskMethodsMixin.persist(out, **kwargs)
def analyze(self, filename: str | None = None, format: str | None = None) -> None:
"""Outputs statistics about every node in the expression.
analyze optimizes the expression and triggers a computation. It records statistics
like memory usage per partition to analyze how data flow through the graph.
.. warning::
analyze adds plugins to the scheduler and the workers that have a non-trivial
cost. This method should not be used in production workflows.
Parameters
----------
filename: str, None
File to store the graph representation.
format: str, default is png
File format for the graph representation.
Returns
-------
None, but writes a graph representation of the expression enriched with
statistics to disk.
"""
out = self
if not isinstance(out, Scalar):
out = out.repartition(npartitions=1)
return out.expr.analyze(filename=filename, format=format)
def explain(self, stage: OptimizerStage = "fused", format: str | None = None):
"""Create a graph representation of the Expression.
explain runs the optimizer and creates a graph of the optimized expression
with graphviz. No computation is triggered.
Parameters
----------
stage: {"logical", "simplified-logical", "tuned-logical", "physical", "simplified-physical", "fused"}
The optimizer stage that is returned. Default is "fused".
- logical: outputs the expression as is
- simplified-logical: simplifies the expression which includes predicate
pushdown and column projection.
- tuned-logical: applies additional optimizations like partition squashing
- physical: outputs the physical expression; this expression can actually
be computed
- simplified-physical: runs another simplification after the physical
plan is generated
- fused: fuses the physical expression to reduce the nodes in thr graph.
.. warning::
The optimizer stages are subject to change.
format: str, default None
The format of the output. Default is "png".
Returns
-------
None, but opens a new window with the graph visualization and outputs
a file with the graph representation.
"""
out = self
if not isinstance(out, Scalar):
out = out.repartition(npartitions=1)
return out.expr.explain(stage, format)
def pprint(self):
"""Outputs a string representation of the DataFrame.
The expression is returned as is. Please run optimize manually if necessary.
Returns
-------
None, the representation is put into stdout.
"""
return self.expr.pprint()
@property
def dask(self):
return self.__dask_graph__()
def __dask_graph__(self):
out = self.expr
out = out.lower_completely()
return out.__dask_graph__()
def __dask_keys__(self):
out = self.expr
out = out.lower_completely()
return out.__dask_keys__()
def simplify(self):
return new_collection(self.expr.simplify())
def lower_once(self):
return new_collection(self.expr.lower_once({}))
def optimize(self, fuse: bool = True):
"""Optimizes the DataFrame.
Runs the optimizer with all steps over the DataFrame and wraps the result in a
new DataFrame collection. Only use this method if you want to analyze the
optimized expression.
Parameters
----------
fuse: bool, default True
Whether to fuse the expression tree after running the optimizer.
It is often easier to look at the non-fused expression when analyzing
the result.
Returns
-------
The optimized Dask Dataframe
"""
return new_collection(self.expr.optimize(fuse=fuse))
def __dask_postcompute__(self):
state = new_collection(self.expr.lower_completely())
if type(self) != type(state):
return state.__dask_postcompute__()
return _concat, ()
@staticmethod
def _postpersist(futures, meta, divisions, name):
return from_graph(
futures,
meta,
divisions,
sorted(futures),
name,
)
def __dask_postpersist__(self):
return FrameBase._postpersist, (
self._meta,
self.divisions,
# Note: This prefix is wrong since optimization may actually yield a
# different one. That's should only be an issue for visualization.
key_split(self._name),
)
def __getattr__(self, key):
try:
# Prioritize `FrameBase` attributes
return object.__getattribute__(self, key)
except AttributeError as err:
try:
# Fall back to `expr` API
# (Making sure to convert to/from Expr)
val = getattr(self.expr, key)
if callable(val):
return functools.partial(_wrap_expr_api, wrap_api=val)
return val
except AttributeError:
# Raise original error
raise err
def visualize(self, tasks: bool = False, **kwargs): # type: ignore[override]
"""Visualize the expression or task graph
Parameters
----------
tasks:
Whether to visualize the task graph. By default
the expression graph will be visualized instead.
"""
if tasks:
return super().visualize(**kwargs)
return self.expr.visualize(**kwargs)
@property
def known_divisions(self):
"""Whether the divisions are known.
This check can be expensive if the division calculation is expensive.
DataFrame.set_index is a good example where the calculation needs an
inspection of the data.
"""
return self.expr.known_divisions
@property
def index(self):
"""Return dask Index instance"""
return new_collection(self.expr.index)
@index.setter
def index(self, value):
assert expr.are_co_aligned(
self.expr, value.expr
), "value needs to be aligned with the index"
_expr = expr.AssignIndex(self, value)
self._expr = _expr
def reset_index(self, drop: bool = False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset index for a Dask DataFrame will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return new_collection(expr.ResetIndex(self, drop))
def head(self, n: int = 5, npartitions=1, compute: bool = True):
"""First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
out = new_collection(expr.Head(self, n=n, npartitions=npartitions))
if compute:
out = out.compute()
return out
def tail(self, n: int = 5, compute: bool = True):
"""Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
out = new_collection(expr.Tail(self, n=n))
if compute:
out = out.compute()
return out
def copy(self, deep: bool = False):
"""Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
Parameters
----------
deep : boolean, default False
The deep value must be `False` and it is declared as a parameter just for
compatibility with third-party libraries like cuDF and pandas
"""
if deep is not False:
raise ValueError(
"The `deep` value must be False. This is strictly a shallow copy "
"of the underlying computational graph."
)
return new_collection(self.expr)
@derived_from(pd.DataFrame)
def isin(self, values):
if isinstance(self, DataFrame):
# DataFrame.isin does weird alignment stuff
bad_types = (FrameBase, pd.Series, pd.DataFrame)
else:
bad_types = (FrameBase,)
if isinstance(values, bad_types):
if (
isinstance(values, FrameBase)
and values.ndim == 1
and values.npartitions == 1
):
# Can broadcast
return new_collection(expr.Isin(self, values=values))
raise NotImplementedError(f"Passing a {typename(type(values))!r} to `isin`")
# We wrap values in a delayed for two reasons:
# - avoid serializing data in every task
# - avoid cost of traversal of large list in optimizations
if isinstance(values, list):
# Motivated by https://github.com/dask/dask/issues/9411. This appears to be
# caused by https://github.com/dask/distributed/issues/6368, and further
# exacerbated by the fact that the list contains duplicates. This is a patch until
# we can create a better fix for Serialization.
try:
values = list(set(values))
except TypeError:
pass
if not any(is_dask_collection(v) for v in values):
# Avoid always passing a numpy array of object dtype
inferred_type = pd.api.types.infer_dtype(values, skipna=False)
object_like = {
"mixed-integer",
"decimal",
"categorical",
"time",
"period",
"mixed",
"unknown-array",
}
if inferred_type in object_like:
values = np.fromiter(values, dtype=object, count=len(values))
else:
values = np.asarray(values)
return new_collection(
expr.Isin(
self,
values=expr._DelayedExpr(
delayed(values, name="delayed-" + _tokenize_deterministic(values))
),
)
)
def _partitions(self, index):
# Used by `partitions` for partition-wise slicing
# Convert index to list
if isinstance(index, int):
index = [index]
index = np.arange(self.npartitions, dtype=object)[index].tolist()
# Check that selection makes sense
assert set(index).issubset(range(self.npartitions))
# Return selected partitions
return new_collection(expr.Partitions(self, index))
@property
def partitions(self):
"""Slice dataframe by partitions
This allows partitionwise slicing of a Dask Dataframe. You can perform normal
Numpy-style slicing, but now rather than slice elements of the array you
slice along partitions so, for example, ``df.partitions[:5]`` produces a new
Dask Dataframe of the first five partitions. Valid indexers are integers, sequences
of integers, slices, or boolean masks.
Examples
--------
>>> df.partitions[0] # doctest: +SKIP
>>> df.partitions[:3] # doctest: +SKIP
>>> df.partitions[::10] # doctest: +SKIP
Returns
-------
A Dask DataFrame
"""
return IndexCallable(self._partitions)
def get_partition(self, n):
"""
Get a dask DataFrame/Series representing the `nth` partition.
Parameters
----------
n : int
The 0-indexed partition number to select.
Returns
-------
Dask DataFrame or Series
The same type as the original object.
See Also
--------
DataFrame.partitions
"""
if not 0 <= n < self.npartitions:
msg = f"n must be 0 <= n < {self.npartitions}"
raise ValueError(msg)
return self.partitions[n]
def shuffle(
self,
on: str | list | no_default = no_default, # type: ignore[valid-type]
ignore_index: bool = False,
npartitions: int | None = None,
shuffle_method: str | None = None,
on_index: bool = False,
force: bool = False,
**options,
):
"""Rearrange DataFrame into new partitions
Uses hashing of `on` to map rows to output partitions. After this
operation, rows with the same value of `on` will be in the same
partition.
Parameters
----------
on : str, list of str, or Series, Index, or DataFrame
Column names to shuffle by.
ignore_index : optional
Whether to ignore the index. Default is ``False``.
npartitions : optional
Number of output partitions. The partition count will
be preserved by default.
shuffle_method : optional
Desired shuffle method. Default chosen at optimization time.
on_index : bool, default False
Whether to shuffle on the index. Mutually exclusive with 'on'.
Set this to ``True`` if 'on' is not provided.
force : bool, default False
This forces the optimizer to keep the shuffle even if the final
expression could be further simplified.
**options : optional
Algorithm-specific options.
Notes
-----
This does not preserve a meaningful index/partitioning scheme. This
is not deterministic if done in parallel.
Examples
--------
>>> df = df.shuffle(df.columns[0]) # doctest: +SKIP
"""
if on is no_default and not on_index: # type: ignore[unreachable]
raise TypeError(
"Must shuffle on either columns or the index; currently shuffling on "
"neither. Pass column(s) to 'on' or set 'on_index' to True."
)
elif on is not no_default and on_index:
raise TypeError(
"Cannot shuffle on both columns and the index. Do not pass column(s) "
"to 'on' or set 'on_index' to False."
)
# Preserve partition count by default
npartitions = npartitions or self.npartitions
if isinstance(on, FrameBase):
if not expr.are_co_aligned(self.expr, on.expr):
raise TypeError(
"index must be aligned with the DataFrame to use as shuffle index."
)
else:
if pd.api.types.is_list_like(on) and not is_dask_collection(on):
on = list(on)
elif isinstance(on, (str, int)):
on = [on]
elif on_index:
on = [] # type: ignore[unreachable]
bad_cols = [
index_col
for index_col in on
if (index_col not in self.columns) and (index_col != self.index.name)
]
if bad_cols:
raise KeyError(
f"Cannot shuffle on {bad_cols}, column(s) not in dataframe to shuffle"
)
if (shuffle_method or get_default_shuffle_method()) == "p2p":
from distributed.shuffle._arrow import check_dtype_support
check_dtype_support(self._meta)
if any(not isinstance(c, str) for c in self._meta.columns):
unsupported = {
c: type(c) for c in self._meta.columns if not isinstance(c, str)
}
raise TypeError(
f"p2p requires all column names to be str, found: {unsupported}",
)
# Returned shuffled result
res = new_collection(
RearrangeByColumn(
self,
on,
npartitions,
ignore_index,
get_specified_shuffle(shuffle_method),
options,
index_shuffle=on_index,
)
)
if force:
# TODO: This forces the optimizer to not remove the shuffle It would
# be nice to teach the optimizer directly (e.g. to avoid key
# renames, etc.)
return res.map_partitions(lambda x: x, meta=res._meta)
return res
@derived_from(pd.DataFrame)
def resample(self, rule, closed=None, label=None):
from dask.dataframe.tseries.resample import Resampler
return Resampler(self, rule, closed=closed, label=label)
def rolling(self, window, **kwargs):
"""Provides rolling transformations.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. When not using a ``DatetimeIndex``,
the window size must not be so large as to span more than one
adjacent partition. If using an offset or offset alias like '5D',
the data must have a ``DatetimeIndex``
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, str, None, default 0
This parameter is deprecated with ``pandas>=2.1``.
Returns
-------
a Rolling object on which to call a method to compute a statistic
"""
from dask.dataframe.dask_expr._rolling import Rolling
return Rolling(self, window, **kwargs)
@insert_meta_param_description(pad=12)
def map_partitions(
self,
func,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
clear_divisions=False,
align_dataframes=False,
parent_meta=None,
required_columns=None,
**kwargs,
):
"""Apply a Python function to each partition
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. Arguments and
keywords may contain ``FrameBase`` or regular python objects.
DataFrame-like args (both dask and pandas) must have the same
number of partitions as ``self`` or comprise a single partition.
Key-word arguments, Single-partition arguments, and general
python-object arguments will be broadcasted to all partitions.
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition, and will
raise an error if this doesn't work, but it won't raise if dtypes
don't match.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
clear_divisions : bool, default False
Whether divisions should be cleared. If True, `transform_divisions`
will be ignored.
required_columns : list or None, default None
List of columns that ``func`` requires for execution. These columns
must belong to the first DataFrame argument (in ``args``). If None
is specified (the default), the query optimizer will assume that
all input columns are required.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
Here we apply a function to a Series resulting in a Series:
>>> res = ddf.x.map_partitions(lambda x: len(x)) # ddf.x is a Dask Series Structure
>>> res.dtype
dtype('int64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=ddf)
Also note that the index and divisions are assumed to remain unchanged.
If the function you're mapping changes the index/divisions, you'll need
to pass ``clear_divisions=True``.
>>> ddf.map_partitions(func, clear_divisions=True) # doctest: +SKIP
Your map function gets information about where it is in the dataframe by
accepting a special ``partition_info`` keyword argument.
>>> def func(partition, partition_info=None):
... pass
This will receive the following information:
>>> partition_info # doctest: +SKIP
{'number': 1, 'division': 3}
For each argument and keyword arguments that are dask dataframes you will
receive the number (n) which represents the nth partition of the dataframe
and the division (the first index value in the partition). If divisions
are not known (for instance if the index is not sorted) then you will get
None as the division.
"""
return map_partitions(
func,
self,
*args,
meta=meta,
enforce_metadata=enforce_metadata,
transform_divisions=transform_divisions,
clear_divisions=clear_divisions,
align_dataframes=align_dataframes,
parent_meta=parent_meta,
required_columns=required_columns,
**kwargs,
)
@insert_meta_param_description(pad=12)
def map_overlap(
self,
func,
before,
after,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
clear_divisions=False,
align_dataframes=False,
**kwargs,
):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int, timedelta or string timedelta
The rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int, timedelta or string timedelta
The rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Positional and keyword arguments to pass to the function.
Positional arguments are computed on a per-partition basis, while
keyword arguments are shared across all partitions. The partition
itself will be the first positional argument, with all other
arguments passed *after*. Arguments can be ``Scalar``, ``Delayed``,
or regular Python objects. DataFrame-like args (both dask and
pandas) will be repartitioned to align (if necessary) before
applying the function; see ``align_dataframes`` to control this
behavior.
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work,
but it won't raise if dtypes don't match.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
align_dataframes : bool, default True
Whether to repartition DataFrame- or Series-like args
(both dask and pandas) so their divisions align before applying
the function. This requires all inputs to have known divisions.
Single-partition inputs will be split into multiple partitions.
If False, all inputs must have either the same number of partitions
or a single partition. Single-partition inputs will be broadcast to
every partition of multi-partition inputs.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-
based windows or any ``pd.Timedelta`` convertible string:
>>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))
>>> dts = dd.from_pandas(ts, npartitions=2)
>>> dts.map_overlap(lambda df: df.rolling('2D').sum(),
... pd.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
Freq: D, dtype: float64
"""
return map_overlap(
func,
self,
before,
after,
*args,
meta=meta,
enforce_metadata=enforce_metadata,
transform_divisions=transform_divisions,
clear_divisions=clear_divisions,
align_dataframes=align_dataframes,
**kwargs,
)
def repartition(
self,
divisions: tuple | None = None,
npartitions: int | None = None,
partition_size: str | None = None,
freq=None,
force: bool = False,
):
"""Repartition a collection
Exactly one of `divisions`, `npartitions` or `partition_size` should be
specified. A ``ValueError`` will be raised when that is not the case.
Parameters
----------
divisions : list, optional
The "dividing lines" used to split the dataframe into partitions.
For ``divisions=[0, 10, 50, 100]``, there would be three output partitions,
where the new index contained [0, 10), [10, 50), and [50, 100), respectively.
See https://docs.dask.org/en/latest/dataframe-design.html#partitions.
npartitions : int, Callable, optional
Approximate number of partitions of output. The number of
partitions used may be slightly lower than npartitions depending
on data distribution, but will never be higher.
The Callable gets the number of partitions of the input as an argument
and should return an int.
partition_size : str, optional
Max number of bytes of memory for each partition. Use numbers or strings
like 5MB. If specified npartitions and divisions will be ignored. Note that
the size reflects the number of bytes used as computed by
pandas.DataFrame.memory_usage, which will not necessarily match the size
when storing to disk.
.. warning::
This keyword argument triggers computation to determine
the memory size of each partition, which may be expensive.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions' lower and upper bounds must be
the same as the old divisions'.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
Notes
-----
Exactly one of `divisions`, `npartitions`, `partition_size`, or `freq`
should be specified. A ``ValueError`` will be raised when that is
not the case.
Also note that ``len(divisions)`` is equal to ``npartitions + 1``. This is because ``divisions``
represents the upper and lower bounds of each partition. The first item is the
lower bound of the first partition, the second item is the lower bound of the
second partition and the upper bound of the first partition, and so on.
The second-to-last item is the lower bound of the last partition, and the last
(extra) item is the upper bound of the last partition.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
See Also
--------
DataFrame.memory_usage_per_partition
pandas.DataFrame.memory_usage
"""
if (
sum(
[
divisions is not None,
npartitions is not None,
partition_size is not None,
freq is not None,
]
)
!= 1
):
raise ValueError(
"Please provide exactly one of the ``npartitions=`` or "
"``divisions=`` keyword arguments."
)
if divisions is not None:
check_divisions(divisions)
if freq is not None:
if not isinstance(self.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
return new_collection(RepartitionFreq(self, freq))
else:
return new_collection(
Repartition(self, npartitions, divisions, force, partition_size, freq)
)
def to_dask_array(
self, lengths=None, meta=None, optimize: bool = True, **optimize_kwargs
) -> Array:
"""Convert a dask DataFrame to a dask array.
Parameters
----------
lengths : bool or Sequence of ints, optional
How to determine the chunks sizes for the output array.
By default, the output array will have unknown chunk lengths
along the first axis, which can cause some later operations
to fail.
* True : immediately compute the length of each partition
* Sequence : a sequence of integers to use for the chunk sizes
on the first axis. These values are *not* validated for
correctness, beyond ensuring that the number of items
matches the number of partitions.
meta : object, optional
An optional `meta` parameter can be passed for dask to override the
default metadata on the underlying dask array.
optimize : bool
Whether to optimize the expression before converting to an Array.
Returns
-------
A Dask Array
"""
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
arr = self.values
chunks = self._validate_chunks(arr, lengths)
arr._chunks = chunks
if meta is not None:
arr._meta = meta
return arr
@property
def values(self):
"""Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
if is_extension_array_dtype(self._meta.values):
warnings.warn(
"Dask currently has limited support for converting pandas extension dtypes "
f"to arrays. Converting {self._meta.values.dtype} to object dtype.",
UserWarning,
)
return self.map_partitions(methods.values)
def __divmod__(self, other):
result = self.expr.__divmod__(other)
return new_collection(result[0]), new_collection(result[1])
def __rdivmod__(self, other):
result = self.expr.__rdivmod__(other)
return new_collection(result[0]), new_collection(result[1])
def __abs__(self):
return self.abs()
@derived_from(pd.DataFrame)
def sum(
self,
axis=0,
skipna=True,
numeric_only=False,
min_count=0,
split_every=False,
**kwargs,
):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.sum,
skipna=skipna,
numeric_only=numeric_only,
axis=axis,
min_count=min_count,
)
result = new_collection(self.expr.sum(skipna, numeric_only, split_every, axis))
return self._apply_min_count(result, min_count)
def _apply_min_count(self, result, min_count):
if min_count:
cond = self.notnull().sum() >= min_count
cond_meta = cond._meta
if not is_series_like(cond_meta):
result = result.to_series()
cond = cond.to_series()
result = result.where(cond, other=np.nan)
if not is_series_like(cond_meta):
return result.min()
else:
return result
else:
return result
@derived_from(pd.DataFrame)
def prod(
self,
axis=0,
skipna=True,
numeric_only=False,
min_count=0,
split_every=False,
**kwargs,
):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.prod,
skipna=skipna,
numeric_only=numeric_only,
axis=axis,
min_count=min_count,
)
result = new_collection(self.expr.prod(skipna, numeric_only, split_every, axis))
return self._apply_min_count(result, min_count)
product = prod
@derived_from(pd.DataFrame)
def var(
self,
axis=0,
skipna=True,
ddof=1,
numeric_only=False,
split_every=False,
**kwargs,
):
_raise_if_object_series(self, "var")
axis = self._validate_axis(axis)
self._meta.var(axis=axis, skipna=skipna, numeric_only=numeric_only)
frame = self
if is_dataframe_like(self._meta) and numeric_only:
frame = frame[list(self._meta.var(numeric_only=True).index)]
return new_collection(
frame.expr.var(axis, skipna, ddof, numeric_only, split_every=split_every)
)
@derived_from(pd.DataFrame)
def std(
self,
axis=0,
skipna=True,
ddof=1,
numeric_only=False,
split_every=False,
**kwargs,
):
_raise_if_object_series(self, "std")
axis = self._validate_axis(axis)
numeric_dd = self
meta = meta_nonempty(self._meta).std(
axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only
)
needs_time_conversion, time_cols = False, None
if is_dataframe_like(self._meta):
if axis == 0:
numeric_dd = numeric_dd[list(meta.index)]
else:
numeric_dd = numeric_dd.copy()
if numeric_only is True:
_meta = numeric_dd._meta.select_dtypes(include=[np.number])
else:
_meta = numeric_dd._meta
time_cols = _meta.select_dtypes(include=["datetime", "timedelta"]).columns
if len(time_cols) > 0:
if axis == 1 and len(time_cols) != len(self.columns):
numeric_dd = from_pandas(
meta_frame_constructor(self)(
{"_": meta_series_constructor(self)([np.nan])},
index=self.index,
),
npartitions=self.npartitions,
)
else:
needs_time_conversion = True
if axis == 1:
numeric_dd = numeric_dd.astype(f"datetime64[{meta.array.unit}]")
for col in time_cols:
numeric_dd[col] = _convert_to_numeric(numeric_dd[col], skipna)
else:
needs_time_conversion = is_datetime64_any_dtype(self._meta)
if needs_time_conversion:
numeric_dd = _convert_to_numeric(self, skipna)
units = None
if needs_time_conversion and time_cols is not None:
units = [getattr(self._meta[c].array, "unit", None) for c in time_cols]
if axis == 1:
_kwargs = (
{}
if not needs_time_conversion
else {"unit": meta.array.unit, "dtype": meta.dtype}
)
return numeric_dd.map_partitions(
M.std if not needs_time_conversion else _sqrt_and_convert_to_timedelta,
meta=meta,
axis=axis,
skipna=skipna,
ddof=ddof,
enforce_metadata=False,
numeric_only=numeric_only,
**_kwargs,
)
result = numeric_dd.var(
skipna=skipna, ddof=ddof, numeric_only=numeric_only, split_every=split_every
)
if needs_time_conversion:
sqrt_func_kwargs = {
"is_df_like": is_dataframe_like(self._meta),
"time_cols": time_cols,
"axis": axis,
"dtype": getattr(meta, "dtype", None),
"unit": getattr(meta, "unit", None),
"units": units,
}
sqrt_func = _sqrt_and_convert_to_timedelta
else:
sqrt_func_kwargs = {}
sqrt_func = np.sqrt
result = result.map_partitions(
sqrt_func,
meta=meta,
enforce_metadata=False,
parent_meta=self._meta,
**sqrt_func_kwargs,
)
return result
def enforce_runtime_divisions(self):
"""Enforce the current divisions at runtime.
Injects a layer into the Task Graph that checks that the current divisions
match the expected divisions at runtime.
"""
if not self.known_divisions:
raise ValueError("No known divisions to enforce!")
return new_collection(expr.EnforceRuntimeDivisions(self))
@derived_from(pd.DataFrame)
def skew(
self,
axis=0,
bias=True,
nan_policy="propagate",
numeric_only=False,
):
"""
.. note::
This implementation follows the dask.array.stats implementation
of skewness and calculates skewness without taking into account
a bias term for finite sample size, which corresponds to the
default settings of the scipy.stats skewness calculation. However,
Pandas corrects for this, so the values differ by a factor of
(n * (n - 1)) ** 0.5 / (n - 2), where n is the number of samples.
Further, this method currently does not support filtering out NaN
values, which is again a difference to Pandas.
"""
_raise_if_object_series(self, "skew")
if axis is None:
raise ValueError("`axis=None` isn't currently supported for `skew`")
axis = self._validate_axis(axis)
if is_dataframe_like(self):
# Let pandas raise errors if necessary
meta = self._meta_nonempty.skew(axis=axis, numeric_only=numeric_only)
else:
meta = self._meta_nonempty.skew()
if axis == 1:
return self.map_partitions(
M.skew,
meta=meta,
axis=axis,
enforce_metadata=False,
)
if not bias:
raise NotImplementedError("bias=False is not implemented.")
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
frame = self
if frame.ndim > 1:
frame = frame.select_dtypes(
include=["number", "bool"], exclude=[np.timedelta64]
)
m2 = new_collection(Moment(frame, order=2))
m3 = new_collection(Moment(frame, order=3))
result = m3 / m2**1.5
if result.ndim == 1:
result = result.fillna(0.0)
return result
@derived_from(pd.DataFrame)
def kurtosis(
self,
axis=0,
fisher=True,
bias=True,
nan_policy="propagate",
numeric_only=False,
):
"""
.. note::
This implementation follows the dask.array.stats implementation
of kurtosis and calculates kurtosis without taking into account
a bias term for finite sample size, which corresponds to the
default settings of the scipy.stats kurtosis calculation. This differs
from pandas.
Further, this method currently does not support filtering out NaN
values, which is again a difference to Pandas.
"""
_raise_if_object_series(self, "kurtosis")
if axis is None:
raise ValueError("`axis=None` isn't currently supported for `skew`")
axis = self._validate_axis(axis)
if is_dataframe_like(self):
# Let pandas raise errors if necessary
meta = self._meta_nonempty.kurtosis(axis=axis, numeric_only=numeric_only)
else:
meta = self._meta_nonempty.kurtosis()
if axis == 1:
return map_partitions(
M.kurtosis,
self,
meta=meta,
token=f"{self._token_prefix}kurtosis",
axis=axis,
enforce_metadata=False,
)
if not bias:
raise NotImplementedError("bias=False is not implemented.")
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
frame = self
if frame.ndim > 1:
frame = frame.select_dtypes(
include=["number", "bool"], exclude=[np.timedelta64]
)
m2 = new_collection(Moment(frame, order=2))
m4 = new_collection(Moment(frame, order=4))
result = m4 / m2**2.0
if result.ndim == 1:
result = result.fillna(0.0)
if fisher:
return result - 3
else:
return result
kurt = kurtosis
@derived_from(pd.DataFrame)
def sem(
self, axis=None, skipna=True, ddof=1, split_every=False, numeric_only=False
):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "sem")
if axis == 1:
return self.map_partitions(
M.sem,
axis=axis,
skipna=skipna,
ddof=ddof,
numeric_only=numeric_only,
)
meta = self._meta.sem(skipna=skipna, ddof=ddof, numeric_only=numeric_only)
frame = self
if self.ndim == 2:
frame = self[list(meta.index)]
v = frame.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = frame.count(split_every=split_every)
result = map_partitions(
np.sqrt,
v / n,
meta=meta,
enforce_metadata=False,
parent_meta=self._meta,
)
return result
def _prepare_cov_corr(self, min_periods, numeric_only):
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
self._meta.cov(numeric_only=numeric_only, min_periods=min_periods)
frame = self
if numeric_only:
numerics = self._meta._get_numeric_data()
if len(numerics.columns) != len(self.columns):
frame = frame[list(numerics.columns)]
return frame, min_periods
def _cov(
self, min_periods=None, numeric_only=False, split_every=False, scalar=False
):
frame, min_periods = self._prepare_cov_corr(min_periods, numeric_only)
return new_collection(Cov(frame, min_periods, split_every, scalar))
def _corr(
self,
method="pearson",
min_periods=None,
numeric_only=False,
split_every=False,
scalar=False,
):
if method != "pearson":
raise NotImplementedError("Only Pearson correlation has been implemented")
frame, min_periods = self._prepare_cov_corr(min_periods, numeric_only)
return new_collection(Corr(frame, min_periods, split_every, scalar))
@derived_from(pd.DataFrame)
def mean(
self, axis=0, skipna=True, numeric_only=False, split_every=False, **kwargs
):
_raise_if_object_series(self, "mean")
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.mean, skipna=skipna, numeric_only=numeric_only, axis=axis
)
return new_collection(
self.expr.mean(skipna, numeric_only, split_every=split_every, axis=axis)
)
@derived_from(pd.DataFrame)
def max(self, axis=0, skipna=True, numeric_only=False, split_every=False, **kwargs):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.max, skipna=skipna, numeric_only=numeric_only, axis=axis
)
return new_collection(self.expr.max(skipna, numeric_only, split_every, axis))
@derived_from(pd.DataFrame)
def any(self, axis=0, skipna=True, split_every=False, **kwargs):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.any, skipna=skipna, axis=axis)
return new_collection(self.expr.any(skipna, split_every))
@derived_from(pd.DataFrame)
def all(self, axis=0, skipna=True, split_every=False, **kwargs):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.all, skipna=skipna, axis=axis)
return new_collection(self.expr.all(skipna, split_every))
@derived_from(pd.DataFrame)
def idxmin(self, axis=0, skipna=True, numeric_only=False, split_every=False):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.idxmin, skipna=skipna, numeric_only=numeric_only, axis=axis
)
return new_collection(self.expr.idxmin(skipna, numeric_only, split_every))
@derived_from(pd.DataFrame)
def idxmax(self, axis=0, skipna=True, numeric_only=False, split_every=False):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.idxmax, skipna=skipna, numeric_only=numeric_only, axis=axis
)
return new_collection(self.expr.idxmax(skipna, numeric_only, split_every))
@derived_from(pd.DataFrame)
def min(self, axis=0, skipna=True, numeric_only=False, split_every=False, **kwargs):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(
M.min, skipna=skipna, numeric_only=numeric_only, axis=axis
)
return new_collection(self.expr.min(skipna, numeric_only, split_every, axis))
@derived_from(pd.DataFrame)
def count(self, axis=0, numeric_only=False, split_every=False):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.count, numeric_only=numeric_only, axis=axis)
return new_collection(self.expr.count(numeric_only, split_every))
@derived_from(pd.DataFrame)
def abs(self):
# Raise pandas errors
_raise_if_object_series(self, "abs")
meta_nonempty(self._meta).abs()
return new_collection(self.expr.abs())
@derived_from(pd.DataFrame)
def astype(self, dtypes):
return new_collection(self.expr.astype(dtypes))
@derived_from(pd.DataFrame)
def combine_first(self, other):
other = self._create_alignable_frame(other, "outer").expr
return new_collection(self.expr.combine_first(other))
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how="start"):
return new_collection(self.expr.to_timestamp(freq, how))
@derived_from(pd.DataFrame)
def isna(self):
return new_collection(self.expr.isna())
def random_split(self, frac, random_state=None, shuffle=False):
"""Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state : int or np.random.RandomState
If int or None create a new RandomState with this as the seed.
Otherwise draw from the passed RandomState.
shuffle : bool, default False
If set to True, the dataframe is shuffled (within partition)
before the split.
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
frame = expr.Split(self, frac, random_state, shuffle)
out = []
for i in range(len(frac)):
out.append(new_collection(expr.SplitTake(frame, i, self.ndim)))
return out
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return new_collection(self.expr.round(decimals))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
cond = self._create_alignable_frame(cond)
other = self._create_alignable_frame(other)
cond = cond.expr if isinstance(cond, FrameBase) else cond
other = other.expr if isinstance(other, FrameBase) else other
return new_collection(self.expr.where(cond, other))
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
cond = self._create_alignable_frame(cond)
other = self._create_alignable_frame(other)
cond = cond.expr if isinstance(cond, FrameBase) else cond
other = other.expr if isinstance(other, FrameBase) else other
return new_collection(self.expr.mask(cond, other))
@derived_from(pd.DataFrame)
def replace(self, to_replace=None, value=no_default, regex=False):
return new_collection(self.expr.replace(to_replace, value, regex))
@derived_from(pd.DataFrame)
def ffill(self, axis=0, limit=None):
axis = _validate_axis(axis)
if axis == 1:
return self.map_partitions(M.ffill, axis=axis, limit=limit)
frame = self
if limit is None:
frame = FillnaCheck(self, "ffill", lambda x: 0)
return new_collection(FFill(frame, limit))
@derived_from(pd.DataFrame)
def bfill(self, axis=0, limit=None):
axis = _validate_axis(axis)
if axis == 1:
return self.map_partitions(M.bfill, axis=axis, limit=limit)
frame = self
if limit is None:
frame = FillnaCheck(self, "bfill", lambda x: x.npartitions - 1)
return new_collection(BFill(frame, limit))
@derived_from(pd.DataFrame)
def fillna(self, value=None, axis=None):
axis = self._validate_axis(axis)
if axis == 1:
return self.map_partitions(M.fillna, value, axis=axis)
if isinstance(value, FrameBase):
value = value.expr
return new_collection(self.expr.fillna(value))
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
axis = _validate_axis(axis)
if axis == 0:
return new_collection(Shift(self, periods, freq))
return self.map_partitions(
func=Shift.func,
enforce_metadata=False,
transform_divisions=False,
periods=periods,
axis=axis,
freq=freq,
)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
"""
.. note::
Pandas currently uses an ``object``-dtype column to represent
boolean data with missing values. This can cause issues for
boolean-specific operations, like ``|``. To enable boolean-
specific operations, at the cost of metadata that doesn't match
pandas, use ``.astype(bool)`` after the ``shift``.
"""
axis = _validate_axis(axis)
if axis == 0:
return new_collection(Diff(self, periods))
return self.map_partitions(
func=Diff.func,
enforce_metadata=False,
transform_divisions=False,
clear_divisions=False,
periods=periods,
axis=axis,
)
@derived_from(pd.DataFrame)
def rename_axis(
self, mapper=no_default, index=no_default, columns=no_default, axis=0
):
return new_collection(self.expr.rename_axis(mapper, index, columns, axis))
def _create_alignable_frame(self, other, join="outer"):
if not is_dask_collection(other) and (
is_series_like(other) or is_dataframe_like(other)
):
if join in ("inner", "left"):
npartitions = 1
else:
# We have to trigger alignment, otherwise pandas will add
# the same values to every partition
npartitions = 2
other = from_pandas(other, npartitions=npartitions)
return other
@derived_from(pd.DataFrame)
def align(self, other, join="outer", axis=None, fill_value=None):
other = self._create_alignable_frame(other, join)
return self.expr.align(other.expr, join, axis, fill_value)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
return new_collection(self.expr.nunique_approx(split_every=split_every))
@derived_from(pd.DataFrame)
def cumsum(self, axis=0, skipna=True, **kwargs):
if axis == 1:
return self.map_partitions(M.cumsum, axis=axis, skipna=skipna)
return new_collection(self.expr.cumsum(skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=0, skipna=True, **kwargs):
if axis == 1:
return self.map_partitions(M.cumprod, axis=axis, skipna=skipna)
return new_collection(self.expr.cumprod(skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=0, skipna=True):
if axis == 1:
return self.map_partitions(M.cummax, axis=axis, skipna=skipna)
return new_collection(self.expr.cummax(skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=0, skipna=True):
if axis == 1:
return self.map_partitions(M.cummin, axis=axis, skipna=skipna)
return new_collection(self.expr.cummin(skipna=skipna))
def reduction(
self,
chunk,
aggregate=None,
combine=None,
meta=no_default,
token=None,
split_every=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
**kwargs,
):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
np.int64(25)
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if split_every is not None and split_every < 2 and split_every is not False:
raise ValueError("split_every must be at least 2")
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs.update(kwargs)
chunk_kwargs["func"] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs.update(kwargs)
combine_kwargs["func"] = combine or aggregate or chunk
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs.update(kwargs)
aggregate_kwargs["func"] = aggregate or chunk
return new_collection(
CustomReduction(
self,
meta,
chunk_kwargs,
aggregate_kwargs,
combine_kwargs,
split_every,
token,
)
)
def memory_usage_per_partition(self, index: bool = True, deep: bool = False):
"""Return the memory usage of each partition
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the index in
returned Series.
deep : bool, default False
If True, introspect the data deeply by interrogating
``object`` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the partition number and whose values
are the memory usage of each partition in bytes.
"""
return new_collection(self.expr.memory_usage_per_partition(index, deep))
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP
"""
from dask.dataframe.dask_expr._indexing import LocIndexer
return LocIndexer(self)
@derived_from(pd.DataFrame)
def notnull(self):
return new_collection(expr.NotNull(self))
@derived_from(pd.DataFrame)
def isnull(self):
return ~self.notnull()
def compute_current_divisions(self, col=None, set_divisions: bool = False):
"""Compute the current divisions of the DataFrame.
This method triggers immediate computation. If you find yourself running this command
repeatedly for the same dataframe, we recommend storing the result
so you don't have to rerun it.
If the column or index values overlap between partitions, raises ``ValueError``.
To prevent this, make sure the data are sorted by the column or index.
Parameters
----------
col : string, optional
Calculate the divisions for a non-index column by passing in the name of the column.
If col is not specified, the index will be used to calculate divisions.
In this case, if the divisions are already known, they will be returned
immediately without computing.
set_divisions : bool, default False
Whether to set the computed divisions into the DataFrame. If False, the divisions
of the DataFrame are unchanged.
Examples
--------
>>> import dask
>>> ddf = dask.datasets.timeseries(start="2021-01-01", end="2021-01-07", freq="1h").clear_divisions()
>>> divisions = ddf.compute_current_divisions()
>>> print(divisions) # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2021-01-01 00:00:00'),
Timestamp('2021-01-02 00:00:00'),
Timestamp('2021-01-03 00:00:00'),
Timestamp('2021-01-04 00:00:00'),
Timestamp('2021-01-05 00:00:00'),
Timestamp('2021-01-06 00:00:00'),
Timestamp('2021-01-06 23:00:00'))
>>> ddf = ddf.reset_index().clear_divisions()
>>> divisions = ddf.compute_current_divisions("timestamp")
>>> print(divisions) # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2021-01-01 00:00:00'),
Timestamp('2021-01-02 00:00:00'),
Timestamp('2021-01-03 00:00:00'),
Timestamp('2021-01-04 00:00:00'),
Timestamp('2021-01-05 00:00:00'),
Timestamp('2021-01-06 00:00:00'),
Timestamp('2021-01-06 23:00:00'))
>>> ddf = ddf.set_index("timestamp", divisions=divisions, sorted=True)
"""
if col is None and self.known_divisions:
if set_divisions:
return self
return self.divisions
if col is not None and set_divisions:
raise NotImplementedError(
"Can't set divisions of non-index, call set_index instead."
)
if col is not None:
frame = self[col]
else:
frame = self.index
mins, maxes, lens = _compute_partition_stats(frame, allow_overlap=set_divisions)
divisions = tuple(mins) + (maxes[-1],)
if not set_divisions:
return divisions
if len(mins) == len(self.divisions) - 1:
if not any(mins[i] >= maxes[i - 1] for i in range(1, len(mins))):
return new_collection(expr.SetDivisions(self, divisions))
return new_collection(expr.ResolveOverlappingDivisions(self, mins, maxes, lens))
@classmethod
def from_dict(
cls, data, *, npartitions=1, orient="columns", dtype=None, columns=None
):
"""
Construct a Dask DataFrame from a Python Dictionary
See Also
--------
dask.dataframe.from_dict
"""
return from_dict(data, npartitions, orient, dtype=dtype, columns=columns)
def to_json(self, filename, *args, **kwargs):
"""See dd.to_json docstring for more information"""
from dask.dataframe.io import to_json
return to_json(self, filename, *args, **kwargs)
def to_sql(
self,
name: str,
uri: str,
schema=None,
if_exists: str = "fail",
index: bool = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
compute=True,
parallel=False,
engine_kwargs=None,
):
from dask.dataframe.io.sql import to_sql
return to_sql(
self,
name,
uri,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
compute=compute,
parallel=parallel,
engine_kwargs=engine_kwargs,
)
def to_orc(self, path, *args, **kwargs):
"""See dd.to_orc docstring for more information"""
from dask.dataframe.io.orc import to_orc
return to_orc(self, path, *args, **kwargs)
def to_csv(self, filename, **kwargs):
"""See dd.to_csv docstring for more information"""
from dask.dataframe.io.csv import to_csv
return to_csv(self, filename, **kwargs)
def to_records(self, index=False, lengths=None):
from dask.dataframe.dask_expr.io.records import to_records
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
records = to_records(self)
chunks = self._validate_chunks(records, lengths)
records._chunks = (chunks[0],)
return records
def _validate_chunks(self, arr, lengths):
from collections.abc import Sequence
from dask.array.core import normalize_chunks
if isinstance(lengths, Sequence):
lengths = tuple(lengths)
if len(lengths) != self.npartitions:
raise ValueError(
"The number of items in 'lengths' does not match the number of "
f"partitions. {len(lengths)} != {self.npartitions}"
)
if self.ndim == 1:
chunks = normalize_chunks((lengths,))
else:
chunks = normalize_chunks((lengths, (len(self.columns),)))
return chunks
elif lengths is not None:
raise ValueError(f"Unexpected value for 'lengths': '{lengths}'")
return arr._chunks
def to_bag(self, index=False, format="tuple"):
"""Create a Dask Bag from a Series"""
from dask.dataframe.dask_expr.io.bag import to_bag
return to_bag(self, index, format=format)
def to_hdf(self, path_or_buf, key, mode="a", append=False, **kwargs):
"""See dd.to_hdf docstring for more information"""
from dask.dataframe.io.hdf import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, **kwargs)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
See Also
--------
dask_expr.from_delayed
"""
from dask.highlevelgraph import HighLevelGraph
if optimize_graph:
frame = self.optimize()
else:
frame = self
keys = frame.__dask_keys__()
graph = frame.__dask_graph__()
layer = f"delayed-{frame._name}"
graph = HighLevelGraph.from_collections(layer, graph, dependencies=())
return [Delayed(k, graph, layer=layer) for k in keys]
def to_backend(self, backend: str | None = None, **kwargs):
"""Move to a new DataFrame backend
Parameters
----------
backend : str, Optional
The name of the new backend to move to. The default
is the current "dataframe.backend" configuration.
Returns
-------
DataFrame, Series or Index
"""
from dask.dataframe.dask_expr._backends import dataframe_creation_dispatch
# Get desired backend
backend = backend or dataframe_creation_dispatch.backend
# Check that "backend" has a registered entrypoint
backend_entrypoint = dataframe_creation_dispatch.dispatch(backend)
# Call `DataFrameBackendEntrypoint.to_backend`
return backend_entrypoint.to_backend(self, **kwargs)
@derived_from(pd.Series)
def dot(self, other, meta=no_default):
if not isinstance(other, FrameBase):
raise TypeError("The second operand must be a dask dataframe")
if isinstance(other, DataFrame):
s = self.map_partitions(M.dot, other, meta=meta)
return s.groupby(by=s.index).apply(
lambda x: x.sum(skipna=False), meta=s._meta_nonempty
)
return self.map_partitions(_dot_series, other, meta=meta).sum(skipna=False)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError(
f"{target} is both the pipe target and a keyword argument"
)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def sample(self, n=None, frac=None, replace=False, random_state=None):
"""Random sample of items
Parameters
----------
n : int, optional
Number of items to return is not supported by dask. Use frac
instead.
frac : float, optional
Approximate fraction of items to return. This sampling fraction is
applied to all partitions equally. Note that this is an
**approximate fraction**. You should not expect exactly ``len(df) * frac``
items to be returned, as the exact number of elements selected will
depend on how your data is partitioned (but should be pretty close
in practice).
replace : boolean, optional
Sample with or without replacement. Default = False.
random_state : int or ``np.random.RandomState``
If an int, we create a new RandomState with this as the seed;
Otherwise we draw from the passed RandomState.
See Also
--------
DataFrame.random_split
pandas.DataFrame.sample
"""
if n is not None:
msg = (
"sample does not support the number of sampled items "
"parameter, 'n'. Please use the 'frac' parameter instead."
)
if isinstance(n, Number) and 0 <= n <= 1:
warnings.warn(msg)
frac = n
else:
raise ValueError(msg)
if frac is None:
raise ValueError("frac must not be None")
if random_state is None:
random_state = np.random.RandomState()
state_data = random_state_data(self.npartitions, random_state)
return new_collection(
expr.Sample(self, state_data=state_data, frac=frac, replace=replace)
)
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = f"npartitions={self.npartitions}"
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index([""] * (self.npartitions + 1), name=name)
return divisions
def _dot_series(*args, **kwargs):
# .sum() is invoked on each partition before being applied to all
# partitions. The return type is expected to be a series, not a numpy object
return meta_series_constructor(args[0])(M.dot(*args, **kwargs))
# Add operator attributes
for op in [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__mod__",
"__rmod__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__lt__",
"__rlt__",
"__gt__",
"__rgt__",
"__le__",
"__rle__",
"__ge__",
"__rge__",
"__eq__",
"__ne__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
]:
setattr(FrameBase, op, functools.partialmethod(_wrap_expr_op, op=op))
for op in [
"__invert__",
"__neg__",
"__pos__",
]:
setattr(FrameBase, op, functools.partialmethod(_wrap_unary_expr_op, op=op))
| FrameBase |
python | coleifer__peewee | tests/keys.py | {
"start": 1143,
"end": 1312
} | class ____(TestModel):
f1 = CharField()
f2 = IntegerField()
f3 = FloatField()
class Meta:
primary_key = CompositeKey('f1', 'f2')
| CompositeKeyModel |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_facets_performance.py | {
"start": 2662,
"end": 5006
} | class ____(OrganizationEventsFacetsPerformanceEndpointBase):
def get(self, request: Request, organization: Organization) -> Response:
try:
snuba_params, aggregate_column, filter_query = self._setup(request, organization)
except NoProjects:
return Response([])
all_tag_keys = bool(request.GET.get("allTagKeys"))
tag_key = request.GET.get("tagKey")
if tag_key in TAG_ALIASES:
tag_key = TAG_ALIASES.get(tag_key)
def data_fn(offset, limit: int):
with sentry_sdk.start_span(op="discover.endpoint", name="discover_query"):
referrer = "api.organization-events-facets-performance.top-tags"
tag_data = query_tag_data(
filter_query=filter_query,
aggregate_column=aggregate_column,
referrer=referrer,
snuba_params=snuba_params,
)
if not tag_data:
return {"data": []}
results = query_facet_performance(
tag_data=tag_data,
filter_query=filter_query,
aggregate_column=aggregate_column,
referrer=referrer,
orderby=self.get_orderby(request),
limit=limit,
offset=offset,
snuba_params=snuba_params,
all_tag_keys=all_tag_keys,
tag_key=tag_key,
)
for row in results["data"]:
row["tags_value"] = tagstore.backend.get_tag_value_label(
row["tags_key"], row["tags_value"]
)
row["tags_key"] = tagstore.backend.get_standardized_key(row["tags_key"])
return results
with handle_query_errors():
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=data_fn),
on_results=lambda results: self.handle_results_with_meta(
request, organization, snuba_params.project_ids, results
),
default_per_page=5,
max_per_page=20,
)
@region_silo_endpoint
| OrganizationEventsFacetsPerformanceEndpoint |
python | keras-team__keras | keras/src/models/model.py | {
"start": 1301,
"end": 36664
} | class ____(Trainer, base_trainer.Trainer, Layer):
"""A model grouping layers into an object with training/inference features.
There are three ways to instantiate a `Model`:
## With the "Functional API"
You start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally, you create your model from inputs and outputs:
```python
inputs = keras.Input(shape=(37,))
x = keras.layers.Dense(32, activation="relu")(inputs)
outputs = keras.layers.Dense(5, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Note: Only dicts, lists, and tuples of input tensors are supported. Nested
inputs are not supported (e.g. lists of list or dicts of dict).
A new Functional API model can also be created by using the
intermediate tensors. This enables you to quickly extract sub-components
of the model.
Example:
```python
inputs = keras.Input(shape=(None, None, 3))
processed = keras.layers.RandomCrop(width=128, height=128)(inputs)
conv = keras.layers.Conv2D(filters=32, kernel_size=3)(processed)
pooling = keras.layers.GlobalAveragePooling2D()(conv)
feature = keras.layers.Dense(10)(pooling)
full_model = keras.Model(inputs, feature)
backbone = keras.Model(processed, conv)
activations = keras.Model(conv, feature)
```
Note that the `backbone` and `activations` models are not
created with `keras.Input` objects, but with the tensors that originate
from `keras.Input` objects. Under the hood, the layers and weights will
be shared across these models, so that user can train the `full_model`, and
use `backbone` or `activations` to do feature extraction.
The inputs and outputs of the model can be nested structures of tensors as
well, and the created models are standard Functional API models that support
all the existing APIs.
## By subclassing the `Model` class
In that case, you should define your
layers in `__init__()` and you should implement the model's forward pass
in `call()`.
```python
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call()`, which you can use to specify
a different behavior in training and inference:
```python
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="softmax")
self.dropout = keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
Once the model is created, you can config the model with losses and metrics
with `model.compile()`, train the model with `model.fit()`, or use the model
to do prediction with `model.predict()`.
## With the `Sequential` class
In addition, `keras.Sequential` is a special case of model where
the model is purely a stack of single-input, single-output layers.
```python
model = keras.Sequential([
keras.Input(shape=(None, None, 3)),
keras.layers.Conv2D(filters=32, kernel_size=3),
])
```
"""
def __new__(cls, *args, **kwargs):
# Signature detection for usage of `Model` as a `Functional`
if functional_init_arguments(args, kwargs) and cls == Model:
from keras.src.models.functional import Functional
return Functional.__new__(Functional, *args, **kwargs)
return typing.cast(cls, super().__new__(cls))
def __init__(self, *args, **kwargs):
Trainer.__init__(self)
from keras.src.models import functional
# Signature detection for usage of a `Model` subclass
# as a `Functional` subclass
if functional_init_arguments(args, kwargs):
inject_functional_model_class(self.__class__)
functional.Functional.__init__(self, *args, **kwargs)
else:
Layer.__init__(self, *args, **kwargs)
def call(self, *args, **kwargs):
raise NotImplementedError(
f"Model {self.__class__.__name__} does not have a `call()` "
"method implemented."
)
@property
def layers(self):
return list(self._flatten_layers(include_self=False, recursive=False))
@layers.setter
def layers(self, _):
raise AttributeError(
"`Model.layers` attribute is reserved and should not be used. "
"Please use another name."
)
@traceback_utils.filter_traceback
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Args:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
"""
if index is not None and name is not None:
raise ValueError(
"Provide only a layer name or a layer index. Received: "
f"index={index}, name={name}."
)
if index is not None:
if len(self.layers) <= index:
raise ValueError(
f"Was asked to retrieve layer at index {index}"
f" but model only has {len(self.layers)}"
" layers."
)
else:
return self.layers[index]
if name is not None:
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError(
f"No such layer: {name}. Existing layers are: "
f"{list(layer.name for layer in self.layers)}."
)
raise ValueError(
"Provide either a layer name or layer index at `get_layer`."
)
@traceback_utils.filter_traceback
def summary(
self,
line_length=None,
positions=None,
print_fn=None,
expand_nested=False,
show_trainable=False,
layer_range=None,
):
"""Prints a string summary of the network.
Args:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided, becomes
`[0.3, 0.6, 0.70, 1.]`. Defaults to `None`.
print_fn: Print function to use. By default, prints to `stdout`.
If `stdout` doesn't work in your environment, change to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
expand_nested: Whether to expand the nested models.
Defaults to `False`.
show_trainable: Whether to show if a layer is trainable.
Defaults to `False`.
layer_range: a list or tuple of 2 strings,
which is the starting layer name and ending layer name
(both inclusive) indicating the range of layers to be printed
in summary. It also accepts regex patterns instead of exact
names. In this case, the start predicate will be
the first element that matches `layer_range[0]`
and the end predicate will be the last element
that matches `layer_range[1]`.
By default `None` considers all layers of the model.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
summary_utils.print_summary(
self,
line_length=line_length,
positions=positions,
print_fn=print_fn,
expand_nested=expand_nested,
show_trainable=show_trainable,
layer_range=layer_range,
)
@traceback_utils.filter_traceback
def save(self, filepath, overwrite=True, zipped=None, **kwargs):
"""Saves a model as a `.keras` file.
Note that `model.save()` is an alias for `keras.saving.save_model()`.
The saved `.keras` file contains:
- The model's configuration (architecture)
- The model's weights
- The model's optimizer's state (if any)
Thus models can be reinstantiated in the exact same state.
Args:
filepath: `str` or `pathlib.Path` object.
The path where to save the model. Must end in `.keras`
(unless saving the model as an unzipped directory
via `zipped=False`).
overwrite: Whether we should overwrite any existing model at
the target location, or instead ask the user via
an interactive prompt.
zipped: Whether to save the model as a zipped `.keras`
archive (default when saving locally), or as an
unzipped directory (default when saving on the
Hugging Face Hub).
Example:
```python
model = keras.Sequential(
[
keras.layers.Dense(5, input_shape=(3,)),
keras.layers.Softmax(),
],
)
model.save("model.keras")
loaded_model = keras.saving.load_model("model.keras")
x = keras.random.uniform((10, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
"""
return saving_api.save_model(
self, filepath, overwrite=overwrite, zipped=zipped, **kwargs
)
@traceback_utils.filter_traceback
def save_weights(self, filepath, overwrite=True, max_shard_size=None):
"""Saves all weights to a single file or sharded files.
By default, the weights will be saved in a single `.weights.h5` file.
If sharding is enabled (`max_shard_size` is not `None`), the weights
will be saved in multiple files, each with a size at most
`max_shard_size` (in GB). Additionally, a configuration file
`.weights.json` will contain the metadata for the sharded files.
The saved sharded files contain:
- `*.weights.json`: The configuration file containing 'metadata' and
'weight_map'.
- `*_xxxxxx.weights.h5`: The sharded files containing only the
weights.
Args:
filepath: `str` or `pathlib.Path` object. Path where the weights
will be saved. When sharding, the filepath must end in
`.weights.json`. If `.weights.h5` is provided, it will be
overridden.
overwrite: Whether to overwrite any existing weights at the target
location or instead ask the user via an interactive prompt.
max_shard_size: `int` or `float`. Maximum size in GB for each
sharded file. If `None`, no sharding will be done. Defaults to
`None`.
Example:
```python
# Instantiate a EfficientNetV2L model with about 454MB of weights.
model = keras.applications.EfficientNetV2L(weights=None)
# Save the weights in a single file.
model.save_weights("model.weights.h5")
# Save the weights in sharded files. Use `max_shard_size=0.25` means
# each sharded file will be at most ~250MB.
model.save_weights("model.weights.json", max_shard_size=0.25)
# Load the weights in a new model with the same architecture.
loaded_model = keras.applications.EfficientNetV2L(weights=None)
loaded_model.load_weights("model.weights.h5")
x = keras.random.uniform((1, 480, 480, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
# Load the sharded weights in a new model with the same architecture.
loaded_model = keras.applications.EfficientNetV2L(weights=None)
loaded_model.load_weights("model.weights.json")
x = keras.random.uniform((1, 480, 480, 3))
assert np.allclose(model.predict(x), loaded_model.predict(x))
```
"""
return saving_api.save_weights(
self, filepath, overwrite=overwrite, max_shard_size=max_shard_size
)
@traceback_utils.filter_traceback
def load_weights(self, filepath, skip_mismatch=False, **kwargs):
"""Load the weights from a single file or sharded files.
Weights are loaded based on the network's topology. This means the
architecture should be the same as when the weights were saved. Note
that layers that don't have weights are not taken into account in the
topological ordering, so adding or removing layers is fine as long as
they don't have weights.
**Partial weight loading**
If you have modified your model, for instance by adding a new layer
(with weights) or by changing the shape of the weights of a layer, you
can choose to ignore errors and continue loading by setting
`skip_mismatch=True`. In this case any layer with mismatching weights
will be skipped. A warning will be displayed for each skipped layer.
**Sharding**
When loading sharded weights, it is important to specify `filepath` that
ends with `*.weights.json` which is used as the configuration file.
Additionally, the sharded files `*_xxxxx.weights.h5` must be in the same
directory as the configuration file.
Args:
filepath: `str` or `pathlib.Path` object. Path where the weights
will be saved. When sharding, the filepath must end in
`.weights.json`.
skip_mismatch: Boolean, whether to skip loading of layers where
there is a mismatch in the number of weights, or a mismatch in
the shape of the weights.
Example:
```python
# Load the weights in a single file.
model.load_weights("model.weights.h5")
# Load the weights in sharded files.
model.load_weights("model.weights.json")
```
"""
saving_api.load_weights(
self,
filepath,
skip_mismatch=skip_mismatch,
**kwargs,
)
def quantize(self, mode, config=None, **kwargs):
"""Quantize the weights of the model.
Note that the model must be built first before calling this method.
`quantize` will recursively call `quantize(mode)` in all layers and
will be skipped if the layer doesn't implement the function.
Args:
mode: The mode of the quantization. Only 'int8' is supported at this
time.
"""
from keras.src.dtype_policies import QUANTIZATION_MODES
# Validate inputs.
type_check = kwargs.pop("type_check", True)
if kwargs:
raise ValueError(
"Unrecognized keyword arguments "
f"passed to {self.__class__.__name__}: {kwargs}"
)
if mode not in QUANTIZATION_MODES:
raise ValueError(
"Invalid quantization mode. "
f"Expected one of {QUANTIZATION_MODES}. Received: mode={mode}"
)
if mode == "gptq":
if not isinstance(config, GPTQConfig):
raise ValueError(
"Mode 'gptq' requires a valid `config` argument of type "
f"`GPTQConfig`. Received: {type(config)}"
)
elif config is not None:
# All other modes must not receive a config
raise ValueError(
f"The `config` argument is only supported for 'gptq' mode, "
f"but received mode='{mode}' and a non-None config."
)
graph_modified = False
for layer in self._flatten_layers():
if len(list(layer._flatten_layers())) == 1:
try:
layer.quantize(mode, type_check=type_check, config=config)
graph_modified = True
except NotImplementedError as e:
warnings.warn(str(e))
except AttributeError:
pass
if mode == "gptq":
gptq_quantize(self, config)
# If any layer was changed, we must rebuild the execution functions.
if graph_modified:
self.train_function = None
self.test_function = None
self.predict_function = None
self._post_quantize(mode, **kwargs)
def _post_quantize(self, mode, **kwargs):
if backend.backend() == "torch":
# We need to manually retrack `torch_params`.
# The reason is that after quantization, the removed variables are
# still referenced by `torch_params` and cannot be gc.
for layer in self._flatten_layers():
layer._track_variables()
def build_from_config(self, config):
if not config:
return
status = False
if "input_shape" in config:
# Case: all inputs are in the first arg (possibly nested).
if utils.is_default(self.build):
status = self._build_by_run_for_single_pos_arg(
config["input_shape"]
)
else:
try:
self.build(config["input_shape"])
status = True
except:
pass
self._build_shapes_dict = config
elif "shapes_dict" in config:
# Case: inputs were recorded as multiple keyword arguments.
if utils.is_default(self.build):
status = self._build_by_run_for_kwargs(config["shapes_dict"])
else:
try:
self.build(**config["shapes_dict"])
status = True
except:
pass
self._build_shapes_dict = config["shapes_dict"]
if not status:
warnings.warn(
f"Model '{self.name}' had a build config, but the model "
"cannot be built automatically in "
"`build_from_config(config)`. "
"You should implement "
"`def build_from_config(self, config)`, "
"and you might also want to implement the method "
" that generates the config at saving time, "
"`def get_build_config(self)`. "
"The method `build_from_config()` is meant to "
"create the state of the model (i.e. its variables) "
"upon deserialization.",
stacklevel=2,
)
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={...})`.
Args:
**kwargs: Additional keyword arguments to be passed to
`json.dumps()`.
Returns:
A JSON string.
"""
from keras.src.saving import serialization_lib
model_config = serialization_lib.serialize_keras_object(self)
return json.dumps(model_config, **kwargs)
def export(
self,
filepath,
format="tf_saved_model",
verbose=None,
input_signature=None,
**kwargs,
):
"""Export the model as an artifact for inference.
Args:
filepath: `str` or `pathlib.Path` object. The path to save the
artifact.
format: `str`. The export format. Supported values:
`"tf_saved_model"` and `"onnx"`. Defaults to
`"tf_saved_model"`.
verbose: `bool`. Whether to print a message during export. Defaults
to `None`, which uses the default value set by different
backends and formats.
input_signature: Optional. Specifies the shape and dtype of the
model inputs. Can be a structure of `keras.InputSpec`,
`tf.TensorSpec`, `backend.KerasTensor`, or backend tensor. If
not provided, it will be automatically computed. Defaults to
`None`.
**kwargs: Additional keyword arguments.
- `is_static`: Optional `bool`. Specific to the JAX backend and
`format="tf_saved_model"`. Indicates whether `fn` is static.
Set to `False` if `fn` involves state updates (e.g., RNG
seeds and counters).
- `jax2tf_kwargs`: Optional `dict`. Specific to the JAX backend
and `format="tf_saved_model"`. Arguments for
`jax2tf.convert`. See the documentation for
[`jax2tf.convert`](
https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md).
If `native_serialization` and `polymorphic_shapes` are not
provided, they will be automatically computed.
- `opset_version`: Optional `int`. Specific to `format="onnx"`.
An integer value that specifies the ONNX opset version.
**Note:** This feature is currently supported only with TensorFlow, JAX
and Torch backends.
**Note:** Be aware that the exported artifact may contain information
from the local file system when using `format="onnx"`, `verbose=True`
and Torch backend.
Examples:
Here's how to export a TensorFlow SavedModel for inference.
```python
# Export the model as a TensorFlow SavedModel artifact
model.export("path/to/location", format="tf_saved_model")
# Load the artifact in a different process/environment
reloaded_artifact = tf.saved_model.load("path/to/location")
predictions = reloaded_artifact.serve(input_data)
```
Here's how to export an ONNX for inference.
```python
# Export the model as a ONNX artifact
model.export("path/to/location", format="onnx")
# Load the artifact in a different process/environment
ort_session = onnxruntime.InferenceSession("path/to/location")
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), input_data)
}
predictions = ort_session.run(None, ort_inputs)
```
"""
from keras.src.export import export_onnx
from keras.src.export import export_openvino
from keras.src.export import export_saved_model
available_formats = ("tf_saved_model", "onnx", "openvino")
if format not in available_formats:
raise ValueError(
f"Unrecognized format={format}. Supported formats are: "
f"{list(available_formats)}."
)
if format == "tf_saved_model":
export_saved_model(
self,
filepath,
verbose,
input_signature=input_signature,
**kwargs,
)
elif format == "onnx":
export_onnx(
self,
filepath,
verbose,
input_signature=input_signature,
**kwargs,
)
elif format == "openvino":
export_openvino(
self,
filepath,
verbose,
input_signature=input_signature,
**kwargs,
)
@classmethod
def from_config(cls, config, custom_objects=None):
from keras.src.models.functional import Functional
functional_config_keys = [
"name",
"layers",
"input_layers",
"output_layers",
]
is_functional_config = all(
key in config for key in functional_config_keys
)
argspec = inspect.getfullargspec(cls.__init__)
functional_init_args = inspect.getfullargspec(Functional.__init__).args[
1:
]
revivable_as_functional = (
cls in {Functional, Model}
or argspec.args[1:] == functional_init_args
or (argspec.varargs == "args" and argspec.varkw == "kwargs")
)
if is_functional_config and revivable_as_functional:
# Revive Functional model
# (but not Functional subclasses with a custom __init__)
from keras.src.models.functional import functional_from_config
return functional_from_config(
cls, config, custom_objects=custom_objects
)
# Either the model has a custom __init__, or the config
# does not contain all the information necessary to
# revive a Functional model. This happens when the user creates
# subclassed models where `get_config()` is returning
# insufficient information to be considered a Functional model.
# In this case, we fall back to provide all config into the
# constructor of the class.
try:
return cls(**config)
except TypeError as e:
raise TypeError(
"Unable to revive model from config. When overriding "
"the `get_config()` method, make sure that the "
"returned config contains all items used as arguments "
f"in the constructor to {cls}, "
"which is the default behavior. "
"You can override this default behavior by defining a "
"`from_config(cls, config)` class method to specify "
"how to create an "
f"instance of {cls.__name__} from its config.\n\n"
f"Received config={config}\n\n"
f"Error encountered during deserialization: {e}"
)
def _get_variable_map(self):
store = {}
map_saveable_variables(self, store=store, visited_saveables=set())
return store
def get_state_tree(self, value_format="backend_tensor"):
"""Retrieves tree-like structure of model variables.
This method allows retrieval of different model variables (trainable,
non-trainable, optimizer, and metrics). The variables are returned in a
nested dictionary format, where the keys correspond to the variable
names and the values are the nested representations of the variables.
Returns:
dict: A dictionary containing the nested representations of the
requested variables. The keys are the variable names, and the
values are the corresponding nested dictionaries.
value_format: One of `"backend_tensor"`, `"numpy_array"`.
The kind of array to return as the leaves of the nested
state tree.
Example:
```python
model = keras.Sequential([
keras.Input(shape=(1,), name="my_input"),
keras.layers.Dense(1, activation="sigmoid", name="my_dense"),
], name="my_sequential")
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
model.fit(np.array([[1.0]]), np.array([[1.0]]))
state_tree = model.get_state_tree()
```
The `state_tree` dictionary returned looks like:
```
{
'metrics_variables': {
'loss': {
'count': ...,
'total': ...,
},
'mean_absolute_error': {
'count': ...,
'total': ...,
}
},
'trainable_variables': {
'my_sequential': {
'my_dense': {
'bias': ...,
'kernel': ...,
}
}
},
'non_trainable_variables': {},
'optimizer_variables': {
'adam': {
'iteration': ...,
'learning_rate': ...,
'my_sequential_my_dense_bias_momentum': ...,
'my_sequential_my_dense_bias_velocity': ...,
'my_sequential_my_dense_kernel_momentum': ...,
'my_sequential_my_dense_kernel_velocity': ...,
}
}
}
}
```
"""
variables = {}
variables["trainable_variables"] = self._create_nested_dict(
self.trainable_variables, value_format
)
variables["non_trainable_variables"] = self._create_nested_dict(
self.non_trainable_variables, value_format
)
variables["optimizer_variables"] = self._create_nested_dict(
self.optimizer.variables, value_format
)
variables["metrics_variables"] = self._create_nested_dict(
self.metrics_variables, value_format
)
return variables
def _create_nested_dict(self, variables, value_format):
flat_dict = {}
for v in variables:
if v.path in flat_dict:
raise ValueError(
"The following variable path is found twice in the model: "
f"'{v.path}'. `get_state_tree()` can only be called when "
"all variable paths are unique. Make sure to give unique "
"names to your layers (and other objects)."
)
if value_format == "backend_tensor":
flat_dict[v.path] = v.value
elif value_format == "numpy_array":
flat_dict[v.path] = v.numpy()
else:
raise ValueError(
"Invalid `value_format` argument. Expected one of "
"{'numpy_array', 'backend_tensor'}. Received: "
f"value_format={value_format}"
)
nested_dict = {}
for path, value in flat_dict.items():
parts = path.split("/")
current_dict = nested_dict
for part in parts[:-1]:
if part not in current_dict:
current_dict[part] = {}
current_dict = current_dict[part]
current_dict[parts[-1]] = value
return nested_dict
def set_state_tree(self, state_tree):
"""Assigns values to variables of the model.
This method takes a dictionary of nested variable values, which
represents the state tree of the model, and assigns them to the
corresponding variables of the model. The dictionary keys represent the
variable names (e.g., `'trainable_variables'`, `'optimizer_variables'`),
and the values are nested dictionaries containing the variable
paths and their corresponding values.
Args:
state_tree: A dictionary representing the state tree of the model.
The keys are the variable names, and the values are nested
dictionaries representing the variable paths and their values.
"""
for k, v in state_tree.items():
path_value_dict = self._flatten_nested_dict(v)
if k == "trainable_variables":
self._assign_variable_values(
self.trainable_variables, path_value_dict
)
elif k == "non_trainable_variables":
self._assign_variable_values(
self.non_trainable_variables, path_value_dict
)
elif k == "optimizer_variables":
self._assign_variable_values(
self.optimizer.variables, path_value_dict
)
elif k == "metrics_variables":
self._assign_variable_values(
self.metrics_variables, path_value_dict
)
else:
raise ValueError(f"Unknown variable name: {k}")
def _assign_variable_values(self, variables, path_value_dict):
for path, value in path_value_dict.items():
for variable in variables:
if variable.path == path:
variable.assign(value)
def _flatten_nested_dict(self, nested_dict):
flat_dict = {}
def _flatten(current_dict, prefix=""):
for key, value in current_dict.items():
if isinstance(value, dict):
_flatten(value, f"{prefix}{key}/")
else:
flat_dict[f"{prefix}{key}"] = value
_flatten(nested_dict)
return flat_dict
@keras_export("keras.models.model_from_json")
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration string and returns a model instance.
Example:
>>> model = keras.Sequential([
... keras.layers.Dense(5, input_shape=(3,)),
... keras.layers.Softmax()])
>>> config = model.to_json()
>>> loaded_model = keras.models.model_from_json(config)
Args:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
from keras.src.saving import serialization_lib
model_config = json.loads(json_string)
return serialization_lib.deserialize_keras_object(
model_config, custom_objects=custom_objects
)
def functional_init_arguments(args, kwargs):
return (
(len(args) == 2)
or (len(args) == 1 and "outputs" in kwargs)
or ("inputs" in kwargs and "outputs" in kwargs)
)
def inject_functional_model_class(cls):
"""Inject `Functional` into the hierarchy of this class if needed."""
from keras.src.models import functional
if cls is Model:
return functional.Functional
# In case there is any multiple inheritance, we stop injecting the
# class if keras model is not in its class hierarchy.
if cls is object:
return object
cls.__bases__ = tuple(
inject_functional_model_class(base) for base in cls.__bases__
)
# Trigger any `__new__` class swapping that needed to happen on `Functional`
# but did not because functional was not in the class hierarchy.
cls.__new__(cls)
return cls
| Model |
python | vyperlang__vyper | vyper/exceptions.py | {
"start": 7986,
"end": 8074
} | class ____(VyperException):
"""Invalid event declaration."""
| EventDeclarationException |
python | spack__spack | lib/spack/spack/fetch_strategy.py | {
"start": 66930,
"end": 67026
} | class ____(spack.error.FetchError):
"""Raised when archive fails to checksum."""
| ChecksumError |
python | celery__celery | t/unit/worker/test_loops.py | {
"start": 4481,
"end": 15399
} | class ____:
def setup_method(self):
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
def test_drain_after_consume(self):
x, _ = get_task_callback(self.app, transport_driver_type='amqp')
assert _quick_drain in [p.fun for p in x.hub._ready]
def test_pool_did_not_start_at_startup(self):
x = X(self.app)
x.obj.restart_count = 0
x.obj.pool.did_start_ok.return_value = False
with pytest.raises(WorkerLostError):
asynloop(*x.args)
def test_setup_heartbeat(self):
x = X(self.app, heartbeat=10)
x.hub.timer.call_repeatedly = Mock(name='x.hub.call_repeatedly()')
x.blueprint.state = CLOSE
asynloop(*x.args)
x.consumer.consume.assert_called_with()
x.obj.on_ready.assert_called_with()
last_call_args, _ = x.hub.timer.call_repeatedly.call_args
assert last_call_args[0] == 10 / 2.0
assert last_call_args[2] == (2.0,)
def task_context(self, sig, **kwargs):
x, on_task = get_task_callback(self.app, **kwargs)
message = self.task_message_from_sig(self.app, sig)
strategy = x.obj.strategies[sig.task] = Mock(name='strategy')
return x, on_task, message, strategy
def test_on_task_received(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
on_task(msg)
strategy.assert_called_with(
msg, None,
PromiseEqual(x._consumer.call_soon, msg.ack_log_error),
PromiseEqual(x._consumer.call_soon, msg.reject_log_error), [],
)
def test_on_task_received_executes_on_task_message(self):
cbs = [Mock(), Mock(), Mock()]
x, on_task, msg, strategy = self.task_context(
self.add.s(2, 2), on_task_message=cbs,
)
on_task(msg)
strategy.assert_called_with(
msg, None,
PromiseEqual(x._consumer.call_soon, msg.ack_log_error),
PromiseEqual(x._consumer.call_soon, msg.reject_log_error),
cbs,
)
def test_on_task_message_missing_name(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
msg.headers.pop('task')
on_task(msg)
x.on_unknown_message.assert_called_with(msg.decode(), msg)
def test_on_task_pool_raises(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
strategy.side_effect = ValueError()
with pytest.raises(ValueError):
on_task(msg)
def test_on_task_InvalidTaskError(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
exc = strategy.side_effect = InvalidTaskError()
on_task(msg)
x.on_invalid_task.assert_called_with(None, msg, exc)
def test_on_task_DecodeError(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
exc = strategy.side_effect = DecodeError()
on_task(msg)
x.on_decode_error.assert_called_with(msg, exc)
@pytest.mark.parametrize('should_stop', (None, False, True, EX_OK))
def test_should_terminate(self, should_stop):
x = X(self.app)
state.should_stop = should_stop
state.should_terminate = True
try:
with pytest.raises(WorkerTerminate):
asynloop(*x.args)
finally:
state.should_stop = None
state.should_terminate = None
def test_should_terminate_hub_close_raises(self):
x = X(self.app)
# XXX why aren't the errors propagated?!?
state.should_terminate = EX_FAILURE
x.hub.close.side_effect = MemoryError()
try:
with pytest.raises(WorkerTerminate):
asynloop(*x.args)
finally:
state.should_terminate = None
def test_should_stop(self):
x = X(self.app)
state.should_stop = 303
try:
with pytest.raises(WorkerShutdown):
asynloop(*x.args)
finally:
state.should_stop = None
def test_updates_qos(self):
x = X(self.app)
x.qos.prev = 3
x.qos.value = 3
x.hub.on_tick.add(x.closer(mod=2))
x.hub.timer._queue = [1]
asynloop(*x.args)
x.qos.update.assert_not_called()
x = X(self.app)
x.qos.prev = 1
x.qos.value = 6
x.hub.on_tick.add(x.closer(mod=2))
asynloop(*x.args)
x.qos.update.assert_called_with()
x.hub.fire_timers.assert_called_with(propagate=(socket.error,))
def test_poll_empty(self):
x = X(self.app)
x.hub.readers = {6: Mock()}
x.hub.timer._queue = [1]
x.close_then_error(x.hub.poller.poll)
x.hub.fire_timers.return_value = 33.37
poller = x.hub.poller
poller.poll.return_value = []
with pytest.raises(socket.error):
asynloop(*x.args)
poller.poll.assert_called_with(33.37)
def test_poll_readable(self):
x = X(self.app)
reader = Mock(name='reader')
x.hub.add_reader(6, reader, 6)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4))
poller = x.hub.poller
poller.poll.return_value = [(6, READ)]
with pytest.raises(socket.error):
asynloop(*x.args)
reader.assert_called_with(6)
poller.poll.assert_called()
def test_poll_readable_raises_Empty(self):
x = X(self.app)
reader = Mock(name='reader')
x.hub.add_reader(6, reader, 6)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
poller = x.hub.poller
poller.poll.return_value = [(6, READ)]
reader.side_effect = Empty()
with pytest.raises(socket.error):
asynloop(*x.args)
reader.assert_called_with(6)
poller.poll.assert_called()
def test_poll_writable(self):
x = X(self.app)
writer = Mock(name='writer')
x.hub.add_writer(6, writer, 6)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
poller = x.hub.poller
poller.poll.return_value = [(6, WRITE)]
with pytest.raises(socket.error):
asynloop(*x.args)
writer.assert_called_with(6)
poller.poll.assert_called()
def test_poll_writable_none_registered(self):
x = X(self.app)
writer = Mock(name='writer')
x.hub.add_writer(6, writer, 6)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
poller = x.hub.poller
poller.poll.return_value = [(7, WRITE)]
with pytest.raises(socket.error):
asynloop(*x.args)
poller.poll.assert_called()
def test_poll_unknown_event(self):
x = X(self.app)
writer = Mock(name='reader')
x.hub.add_writer(6, writer, 6)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
poller = x.hub.poller
poller.poll.return_value = [(6, 0)]
with pytest.raises(socket.error):
asynloop(*x.args)
poller.poll.assert_called()
def test_poll_keep_draining_disabled(self):
x = X(self.app)
x.hub.writers = {6: Mock()}
poll = x.hub.poller.poll
def se(*args, **kwargs):
poll.side_effect = socket.error()
poll.side_effect = se
poller = x.hub.poller
poll.return_value = [(6, 0)]
with pytest.raises(socket.error):
asynloop(*x.args)
poller.poll.assert_called()
def test_poll_err_writable(self):
x = X(self.app)
writer = Mock(name='writer')
x.hub.add_writer(6, writer, 6, 48)
x.hub.on_tick.add(x.close_then_error(Mock(), 2))
poller = x.hub.poller
poller.poll.return_value = [(6, ERR)]
with pytest.raises(socket.error):
asynloop(*x.args)
writer.assert_called_with(6, 48)
poller.poll.assert_called()
def test_poll_write_generator(self):
x = X(self.app)
x.hub.remove_writer = Mock(name='hub.remove_writer()')
def Gen():
yield 1
yield 2
gen = Gen()
x.hub.add_writer(6, gen)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
x.hub.poller.poll.return_value = [(6, WRITE)]
with pytest.raises(socket.error):
asynloop(*x.args)
assert gen.gi_frame.f_lasti != -1
x.hub.remove_writer.assert_not_called()
def test_poll_write_generator_stopped(self):
x = X(self.app)
def Gen():
if 0:
yield
gen = Gen()
x.hub.add_writer(6, gen)
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
x.hub.poller.poll.return_value = [(6, WRITE)]
x.hub.remove_writer = Mock(name='hub.remove_writer()')
with pytest.raises(socket.error):
asynloop(*x.args)
assert gen.gi_frame is None
def test_poll_write_generator_raises(self):
x = X(self.app)
def Gen():
raise ValueError('foo')
yield
gen = Gen()
x.hub.add_writer(6, gen)
x.hub.remove = Mock(name='hub.remove()')
x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2))
x.hub.poller.poll.return_value = [(6, WRITE)]
with pytest.raises(ValueError):
asynloop(*x.args)
assert gen.gi_frame is None
x.hub.remove.assert_called_with(6)
def test_poll_err_readable(self):
x = X(self.app)
reader = Mock(name='reader')
x.hub.add_reader(6, reader, 6, 24)
x.hub.on_tick.add(x.close_then_error(Mock(), 2))
poller = x.hub.poller
poller.poll.return_value = [(6, ERR)]
with pytest.raises(socket.error):
asynloop(*x.args)
reader.assert_called_with(6, 24)
poller.poll.assert_called()
def test_poll_raises_ValueError(self):
x = X(self.app)
x.hub.readers = {6: Mock()}
poller = x.hub.poller
x.close_then_error(poller.poll, exc=ValueError)
asynloop(*x.args)
poller.poll.assert_called()
def test_heartbeat_error(self):
x = X(self.app, heartbeat=10)
x.connection.heartbeat_check = Mock(
side_effect=RuntimeError("Heartbeat error")
)
def call_repeatedly(rate, fn, args):
fn(*args)
x.hub.timer.call_repeatedly = call_repeatedly
with pytest.raises(RuntimeError):
asynloop(*x.args)
def test_no_heartbeat_support(self):
x = X(self.app)
x.connection.supports_heartbeats = False
x.hub.timer.call_repeatedly = Mock(
name='x.hub.timer.call_repeatedly()'
)
x.hub.on_tick.add(x.closer(mod=2))
asynloop(*x.args)
x.hub.timer.call_repeatedly.assert_not_called()
| test_asynloop |
python | walkccc__LeetCode | solutions/1800. Maximum Ascending Subarray Sum/1800.py | {
"start": 0,
"end": 277
} | class ____:
def maxAscendingSum(self, nums: list[int]) -> int:
ans = 0
sum = nums[0]
for i in range(1, len(nums)):
if nums[i] > nums[i - 1]:
sum += nums[i]
else:
ans = max(ans, sum)
sum = nums[i]
return max(ans, sum)
| Solution |
python | arrow-py__arrow | arrow/locales.py | {
"start": 109003,
"end": 110564
} | class ____(Locale):
names = ["la", "la-va"]
past = "ante {0}"
future = "in {0}"
and_word = "et"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "nunc",
"second": "secundum",
"seconds": "{0} secundis",
"minute": "minutam",
"minutes": "{0} minutis",
"hour": "horam",
"hours": "{0} horas",
"day": "diem",
"days": "{0} dies",
"week": "hebdomadem",
"weeks": "{0} hebdomades",
"month": "mensem",
"months": "{0} mensis",
"year": "annum",
"years": "{0} annos",
}
month_names = [
"",
"Ianuarius",
"Februarius",
"Martius",
"Aprilis",
"Maius",
"Iunius",
"Iulius",
"Augustus",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Ian",
"Febr",
"Mart",
"Apr",
"Mai",
"Iun",
"Iul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"dies Lunae",
"dies Martis",
"dies Mercurii",
"dies Iovis",
"dies Veneris",
"dies Saturni",
"dies Solis",
]
day_abbreviations = [
"",
"dies Lunae",
"dies Martis",
"dies Mercurii",
"dies Iovis",
"dies Veneris",
"dies Saturni",
"dies Solis",
]
| LatinLocale |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 19328,
"end": 22105
} | class ____(BaseNode):
text_resource: MediaResource | None = Field(
default=None, description="Text content of the node."
)
image_resource: MediaResource | None = Field(
default=None, description="Image content of the node."
)
audio_resource: MediaResource | None = Field(
default=None, description="Audio content of the node."
)
video_resource: MediaResource | None = Field(
default=None, description="Video content of the node."
)
text_template: str = Field(
default=DEFAULT_TEXT_NODE_TMPL,
description=(
"Template for how text_resource is formatted, with {content} and "
"{metadata_str} placeholders."
),
)
@classmethod
def class_name(cls) -> str:
return "Node"
@classmethod
def get_type(cls) -> str:
"""Get Object type."""
return ObjectType.MULTIMODAL
def get_content(self, metadata_mode: MetadataMode = MetadataMode.NONE) -> str:
"""
Get the text content for the node if available.
Provided for backward compatibility, use self.text_resource directly instead.
"""
if self.text_resource:
metadata_str = self.get_metadata_str(metadata_mode)
if metadata_mode == MetadataMode.NONE or not metadata_str:
return self.text_resource.text or ""
return self.text_template.format(
content=self.text_resource.text or "",
metadata_str=metadata_str,
).strip()
return ""
def set_content(self, value: str) -> None:
"""
Set the text content of the node.
Provided for backward compatibility, set self.text_resource instead.
"""
self.text_resource = MediaResource(text=value)
@property
def hash(self) -> str:
"""
Generate a hash representing the state of the node.
The hash is generated based on the available resources (audio, image, text or video) and its metadata.
"""
doc_identities = []
metadata_str = self.get_metadata_str(mode=MetadataMode.ALL)
if metadata_str:
doc_identities.append(metadata_str)
if self.audio_resource is not None:
doc_identities.append(self.audio_resource.hash)
if self.image_resource is not None:
doc_identities.append(self.image_resource.hash)
if self.text_resource is not None:
doc_identities.append(self.text_resource.hash)
if self.video_resource is not None:
doc_identities.append(self.video_resource.hash)
doc_identity = "-".join(doc_identities)
return str(sha256(doc_identity.encode("utf-8", "surrogatepass")).hexdigest())
| Node |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 457420,
"end": 457926
} | class ____(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
| GlobalsExprNode |
python | PrefectHQ__prefect | src/prefect/server/concurrency/lease_storage/__init__.py | {
"start": 433,
"end": 541
} | class ____(Protocol):
ConcurrencyLeaseStorage: type[ConcurrencyLeaseStorage]
| ConcurrencyLeaseStorageModule |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 9107,
"end": 10217
} | class ____:
tol = 0.0
def distance(self, a, b, p):
return minkowski_distance(a * 1.0, b * 1.0, p)
def test_in_ball(self):
x = np.atleast_2d(self.x)
d = np.broadcast_to(self.d, x.shape[:-1])
l = self.T.query_ball_point(x, self.d, p=self.p, eps=self.eps)
for i, ind in enumerate(l):
dist = self.distance(self.data[ind], x[i], self.p) - d[i]*(1.+self.eps)
norm = self.distance(self.data[ind], x[i], self.p) + d[i]*(1.+self.eps)
assert_array_equal(dist < self.tol * norm, True)
def test_found_all(self):
x = np.atleast_2d(self.x)
d = np.broadcast_to(self.d, x.shape[:-1])
l = self.T.query_ball_point(x, self.d, p=self.p, eps=self.eps)
for i, ind in enumerate(l):
c = np.ones(self.T.n, dtype=bool)
c[ind] = False
dist = self.distance(self.data[c], x[i], self.p) - d[i]/(1.+self.eps)
norm = self.distance(self.data[c], x[i], self.p) + d[i]/(1.+self.eps)
assert_array_equal(dist > -self.tol * norm, True)
@KDTreeTest
| ball_consistency |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 2484,
"end": 2783
} | class ____(BaseStringType):
_members: Tuple[str, ...]
@classmethod
def validate(cls, value: Any) -> None:
cls.validate_string(value)
if value not in cls._members:
raise ValueError("must be one of %s, got '%s'" % (cls._members, value))
| BaseStringEnumerationType |
python | viewflow__viewflow | viewflow/contrib/plotly/views.py | {
"start": 911,
"end": 2557
} | class ____(TemplateView):
template_name = "viewflow/contrib/plotly.html"
viewset = None
def get_context_data(self, **kwargs):
return super().get_context_data(
dash_scripts_urls=_extract_urls(
self.viewset.dash_app._generate_scripts_html()
),
dash_config_html=self.viewset.dash_app._generate_config_html(),
)
def layout_endpoint(request, viewset):
dash_response = viewset.dash_app.serve_layout()
return HttpResponse(dash_response.data, content_type="application/json")
def dependencies_endpoint(request, viewset):
return JsonResponse(viewset.dash_app._callback_list, safe=False)
@csrf_exempt
@require_POST
def update_component_endpoint(request, viewset):
try:
request_body = json.loads(request.body.decode())
output = request_body["output"]
outputs_list = request_body["outputs"]
inputs = request_body.get("inputs", [])
state = request_body.get("state", [])
callback_data = viewset.dash_app.callback_map[output]
except (json.JSONDecodeError, KeyError) as e:
return JsonResponse(
{"code": 400, "message": f"Request body error: {e}"}, status=400
)
else:
callback_args = [
[item.get("value") for item in items]
if isinstance(items, list)
else items.get("value")
for items in inputs + state
]
callback_func = callback_data["callback"]
result = callback_func(*callback_args, outputs_list=outputs_list)
return HttpResponse(result, content_type="application/json")
| DashboardView |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 45647,
"end": 46962
} | class ____(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function."""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters."""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"intercept": outputs_unit["z"],
"slope_x": outputs_unit["z"] / inputs_unit["x"],
"slope_y": outputs_unit["z"] / inputs_unit["y"],
}
| Planar2D |
python | pennersr__django-allauth | allauth/socialaccount/providers/openid/utils.py | {
"start": 1953,
"end": 2074
} | class ____:
EMAIL = "email"
NAME = "fullname"
SRegFields = [
SRegField.EMAIL,
SRegField.NAME,
]
| SRegField |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_set_value.py | {
"start": 138,
"end": 2492
} | class ____:
def test_set_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
float_frame._set_value(idx, col, 1)
assert float_frame[col][idx] == 1
def test_set_value_resize(self, float_frame, using_infer_string):
res = float_frame._set_value("foobar", "B", 0)
assert res is None
assert float_frame.index[-1] == "foobar"
assert float_frame._get_value("foobar", "B") == 0
float_frame.loc["foobar", "qux"] = 0
assert float_frame._get_value("foobar", "qux") == 0
res = float_frame.copy()
res._set_value("foobar", "baz", "sam")
if using_infer_string:
assert res["baz"].dtype == "str"
else:
assert res["baz"].dtype == np.object_
res = float_frame.copy()
res._set_value("foobar", "baz", True)
assert res["baz"].dtype == np.object_
res = float_frame.copy()
res._set_value("foobar", "baz", 5)
assert is_float_dtype(res["baz"])
assert isna(res["baz"].drop(["foobar"])).all()
with pytest.raises(TypeError, match="Invalid value"):
res._set_value("foobar", "baz", "sam")
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(
np.random.default_rng(2).standard_normal((3, 3)),
index=range(3),
columns=list("ABC"),
)
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df._set_value("C", 2, 1.0)
assert list(df.index) == list(df_orig.index) + ["C"]
# assert list(df.columns) == list(df_orig.columns) + [2]
df = df_orig.copy()
df.loc["C", 2] = 1.0
assert list(df.index) == list(df_orig.index) + ["C"]
# assert list(df.columns) == list(df_orig.columns) + [2]
# create both new
df = df_orig.copy()
df._set_value("C", "D", 1.0)
assert list(df.index) == list(df_orig.index) + ["C"]
assert list(df.columns) == list(df_orig.columns) + ["D"]
df = df_orig.copy()
df.loc["C", "D"] = 1.0
assert list(df.index) == list(df_orig.index) + ["C"]
assert list(df.columns) == list(df_orig.columns) + ["D"]
| TestSetValue |
python | kamyu104__LeetCode-Solutions | Python/the-k-strongest-values-in-an-array.py | {
"start": 915,
"end": 2353
} | class ____(object):
def getStrongest(self, arr, k):
"""
:type arr: List[int]
:type k: int
:rtype: List[int]
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def partition_around_pivot(left, right, pivot_idx, nums, compare):
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if compare(nums[i], nums[right]):
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = random.randint(left, right)
new_pivot_idx = partition_around_pivot(left, right, pivot_idx, nums, compare)
if new_pivot_idx == n:
return
elif new_pivot_idx > n:
right = new_pivot_idx - 1
else: # new_pivot_idx < n
left = new_pivot_idx + 1
nth_element(arr, (len(arr)-1)//2)
m = arr[(len(arr)-1)//2]
nth_element(arr, k, lambda a, b: abs(a-m) > abs(b-m) if abs(a-m) != abs(b-m) else a > b)
return arr[:k]
| Solution_TLE |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 11087,
"end": 13923
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.is_causal = False
self.attention_dropout = config.attention_dropout
# small tweak here compared to CLIP, no bias here
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
if config.qkv_bias:
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
else:
q_bias = None
v_bias = None
if q_bias is not None:
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
self.qkv.bias = nn.Parameter(qkv_bias)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(
2, 0, 3, 1, 4
)
query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2]
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scale,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.projection(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.blip.modeling_blip.BlipMLP
| Blip2Attention |
python | django__django | django/contrib/sessions/middleware.py | {
"start": 355,
"end": 3483
} | class ____(MiddlewareMixin):
def __init__(self, get_response):
super().__init__(get_response)
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
return response
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty.
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
patch_vary_headers(response, ("Cookie",))
else:
if accessed:
patch_vary_headers(response, ("Cookie",))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = http_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 5xx responses.
if response.status_code < 500:
try:
request.session.save()
except UpdateError:
raise SessionInterrupted(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key,
max_age=max_age,
expires=expires,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
return response
| SessionMiddleware |
python | getsentry__sentry | src/sentry/workflow_engine/models/alertrule_workflow.py | {
"start": 280,
"end": 1422
} | class ____(DefaultFieldsModel):
"""
A lookup model for rules and workflows.
"""
__relocation_scope__ = RelocationScope.Organization
alert_rule_id = BoundedBigIntegerField(null=True, db_index=True)
rule_id = BoundedBigIntegerField(null=True, db_index=True)
workflow = FlexibleForeignKey("workflow_engine.Workflow")
class Meta:
db_table = "workflow_engine_alertruleworkflow"
app_label = "workflow_engine"
unique_together = (
("workflow", "rule_id"),
("workflow", "alert_rule_id"),
)
constraints = [
CheckConstraint(
condition=Q(rule_id__isnull=False, alert_rule_id__isnull=True)
| Q(rule_id__isnull=True, alert_rule_id__isnull=False),
name="rule_or_alert_rule_workflow",
),
]
indexes = [
models.Index(
fields=["rule_id"],
name="idx_arw_rule_id",
),
models.Index(
fields=["alert_rule_id"],
name="idx_arw_alert_rule_id",
),
]
| AlertRuleWorkflow |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_web_fetch_tool_result_error_block_param.py | {
"start": 321,
"end": 508
} | class ____(TypedDict, total=False):
error_code: Required[BetaWebFetchToolResultErrorCode]
type: Required[Literal["web_fetch_tool_result_error"]]
| BetaWebFetchToolResultErrorBlockParam |
python | gevent__gevent | src/greentest/3.13/test_threading.py | {
"start": 66185,
"end": 70382
} | class ____(BaseTestCase):
def setUp(self):
restore_default_excepthook(self)
super().setUp()
@force_not_colorized
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
@force_not_colorized
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
def test_original_excepthook(self):
def run_thread():
with support.captured_output("stderr") as output:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
return output.getvalue()
def threading_hook(args):
print("Running a thread failed", file=sys.stderr)
default_output = run_thread()
with support.swap_attr(threading, 'excepthook', threading_hook):
custom_hook_output = run_thread()
threading.excepthook = threading.__excepthook__
recovered_output = run_thread()
self.assertEqual(default_output, recovered_output)
self.assertNotEqual(default_output, custom_hook_output)
self.assertEqual(custom_hook_output, "Running a thread failed\n")
| ExceptHookTests |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/types.py | {
"start": 922,
"end": 1305
} | class ____(ABC):
"""Base Cross Encoder Finetuning Engine."""
@abstractmethod
def finetune(self) -> None:
"""Goes off and does stuff."""
@abstractmethod
def get_finetuned_model(
self, model_name: str, top_n: int = 3
) -> SentenceTransformerRerank:
"""Gets fine-tuned Cross-Encoder model as re-ranker."""
| BaseCrossEncoderFinetuningEngine |
python | spack__spack | lib/spack/spack/llnl/util/link_tree.py | {
"start": 20372,
"end": 20504
} | class ____(MergeConflictError):
def __init__(self, spec_1, spec_2):
super().__init__(spec_1, spec_2)
| ConflictingSpecsError |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/shortcuts/progress_bar/formatters.py | {
"start": 3370,
"end": 3857
} | class ____(Formatter):
"""
Display the progress as a percentage.
"""
template = HTML("<percentage>{percentage:>5}%</percentage>")
def format(
self,
progress_bar: ProgressBar,
progress: ProgressBarCounter[object],
width: int,
) -> AnyFormattedText:
return self.template.format(percentage=round(progress.percentage, 1))
def get_width(self, progress_bar: ProgressBar) -> AnyDimension:
return D.exact(6)
| Percentage |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 461023,
"end": 461472
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of AcceptTopicSuggestion"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "topic")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
topic = sgqlc.types.Field("Topic", graphql_name="topic")
"""The accepted topic."""
| AcceptTopicSuggestionPayload |
python | openai__gym | gym/error.py | {
"start": 4929,
"end": 5269
} | class ____(Exception):
"""Raised when an asynchronous `reset`, or `step` is not running, but `reset_wait`, or `step_wait` (respectively) is called."""
def __init__(self, message: str, name: str):
"""Initialises the exception with name attributes."""
super().__init__(message)
self.name = name
| NoAsyncCallError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.