language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | RaRe-Technologies__gensim | gensim/test/test_fasttext.py | {
"start": 69951,
"end": 71742
} | class ____(unittest.TestCase):
"""
This class containts tests that check the following scenario:
+ create binary fastText file model1.bin using facebook_binary (FT)
+ load file model1.bin to variable `model`
+ save `model` to model2.bin using gensim
+ check if files model1.bin and model2.bin are byte-identical
"""
def _check_roundtrip_file_file(self, sg):
model_params = {"vector_size": 10, "sg": sg, "seed": 42}
# fasttext tool creates both *vec and *bin files, so we have to remove both, even thought *vec is unused
with temporary_file("m1.bin") as m1, temporary_file("m2.bin") as m2, temporary_file("m1.vec"):
m1_basename = m1[:-4]
_save_test_model(m1_basename, model_params)
model = gensim.models.fasttext.load_facebook_model(m1)
gensim.models.fasttext.save_facebook_model(model, m2)
bin1 = _read_binary_file(m1)
bin2 = _read_binary_file(m2)
self.assertEqual(bin1, bin2)
def test_skipgram(self):
self._check_roundtrip_file_file(sg=1)
def test_cbow(self):
self._check_roundtrip_file_file(sg=0)
def _read_wordvectors_using_fasttext(fasttext_fname, words):
def line_to_array(line):
return np.array([float(s) for s in line.split()[1:]], dtype=np.float32)
cmd = [FT_CMD, "print-word-vectors", fasttext_fname]
process = subprocess.Popen(
cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
words_str = '\n'.join(words)
out, _ = process.communicate(input=words_str.encode("utf-8"))
return np.array([line_to_array(line) for line in out.splitlines()], dtype=np.float32)
@unittest.skipIf(not FT_CMD, "fasttext not in FT_HOME or PATH, skipping test")
| SaveFacebookByteIdentityTest |
python | nedbat__coveragepy | tests/test_data.py | {
"start": 38294,
"end": 38740
} | class ____(CoverageTest):
"""Tests of in-memory CoverageData."""
run_in_temp_dir = False
def test_updating(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/1323
a = CoverageData(no_disk=True)
a.add_lines({"foo.py": [10, 20, 30]})
assert a.measured_files() == {"foo.py"}
b = CoverageData(no_disk=True)
b.update(a)
assert b.measured_files() == {"foo.py"}
| NoDiskTest |
python | qdrant__qdrant-client | tests/congruence_tests/test_sparse_search.py | {
"start": 633,
"end": 13959
} | class ____:
__test__ = False
def __init__(self):
self.query_text = generate_random_sparse_vector(sparse_text_vector_size, density=0.3)
self.query_image = generate_random_sparse_vector(sparse_image_vector_size, density=0.2)
self.query_code = generate_random_sparse_vector(sparse_code_vector_size, density=0.1)
def simple_search_text(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=True,
with_vectors=["sparse-text"],
limit=10,
).points
def simple_search_image(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=self.query_image,
using="sparse-image",
with_payload=True,
with_vectors=["sparse-image"],
limit=10,
).points
def simple_search_code(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-code",
query=self.query_code,
with_payload=True,
with_vectors=True,
limit=10,
).points
def simple_search_text_offset(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=self.query_text,
using="sparse-text",
with_payload=True,
limit=10,
offset=10,
).points
def search_score_threshold(self, client: QdrantBase) -> list[models.ScoredPoint]:
res1 = client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=True,
limit=10,
score_threshold=0.9,
).points
res2 = client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=True,
limit=10,
score_threshold=0.95,
).points
res3 = client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=True,
limit=10,
score_threshold=0.1,
).points
return res1 + res2 + res3
def simple_search_text_select_payload(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=["text_array", "nested.id"],
limit=10,
).points
def search_payload_exclude(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
with_payload=models.PayloadSelectorExclude(exclude=["text_array", "nested.id"]),
limit=10,
).points
def simple_search_image_select_vector(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-image",
query=self.query_image,
with_payload=False,
with_vectors=["sparse-image", "sparse-code"],
limit=10,
).points
def filter_search_text(
self, client: QdrantBase, query_filter: models.Filter
) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
using="sparse-text",
query=self.query_text,
query_filter=query_filter,
with_payload=True,
limit=10,
).points
def default_mmr_query(self, client: QdrantBase) -> models.QueryResponse:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.NearestQuery(
nearest=self.query_text,
mmr=models.Mmr(),
),
using="sparse-text",
limit=10,
)
def mmr_query_parametrized(self, client: QdrantBase) -> models.QueryResponse:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.NearestQuery(
nearest=self.query_text,
mmr=models.Mmr(diversity=0.3, candidates_limit=30),
),
using="sparse-text",
limit=10,
)
def mmr_query_parametrized_score_threshold(self, client: QdrantBase) -> models.QueryResponse:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.NearestQuery(
nearest=self.query_text,
mmr=models.Mmr(diversity=0.3, candidates_limit=30),
),
score_threshold=3.3,
using="sparse-text",
limit=10,
)
def test_simple_search():
fixture_points = generate_sparse_fixtures()
searcher = TestSimpleSparseSearcher()
local_client = init_local()
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
compare_client_results(local_client, remote_client, searcher.simple_search_text)
compare_client_results(local_client, remote_client, searcher.simple_search_image)
compare_client_results(local_client, remote_client, searcher.simple_search_code)
compare_client_results(local_client, remote_client, searcher.simple_search_text_offset)
compare_client_results(local_client, remote_client, searcher.search_score_threshold)
compare_client_results(local_client, remote_client, searcher.simple_search_text_select_payload)
compare_client_results(local_client, remote_client, searcher.simple_search_image_select_vector)
compare_client_results(local_client, remote_client, searcher.search_payload_exclude)
for i in range(100):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client, remote_client, searcher.filter_search_text, query_filter=query_filter
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_mmr():
fixture_points = generate_sparse_fixtures(num=100)
searcher = TestSimpleSparseSearcher()
local_client = init_local()
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
compare_client_results(local_client, remote_client, searcher.default_mmr_query)
compare_client_results(local_client, remote_client, searcher.mmr_query_parametrized)
compare_client_results(
local_client, remote_client, searcher.mmr_query_parametrized_score_threshold
)
def test_simple_opt_vectors_search():
fixture_points = generate_sparse_fixtures(skip_vectors=True)
searcher = TestSimpleSparseSearcher()
local_client = init_local()
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
compare_client_results(local_client, remote_client, searcher.simple_search_text)
compare_client_results(local_client, remote_client, searcher.simple_search_image)
compare_client_results(local_client, remote_client, searcher.simple_search_code)
compare_client_results(local_client, remote_client, searcher.simple_search_text_offset)
compare_client_results(local_client, remote_client, searcher.search_score_threshold)
compare_client_results(local_client, remote_client, searcher.simple_search_text_select_payload)
compare_client_results(local_client, remote_client, searcher.simple_search_image_select_vector)
compare_client_results(local_client, remote_client, searcher.search_payload_exclude)
for i in range(100):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client, remote_client, searcher.filter_search_text, query_filter=query_filter
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_search_with_persistence():
import tempfile
fixture_points = generate_sparse_fixtures()
searcher = TestSimpleSparseSearcher()
with tempfile.TemporaryDirectory() as tmpdir:
local_client = init_local(tmpdir)
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
payload_update_filter = one_random_filter_please()
local_client.set_payload(COLLECTION_NAME, {"test": f"test"}, payload_update_filter)
del local_client
local_client_2 = init_local(tmpdir)
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client.set_payload(COLLECTION_NAME, {"test": f"test"}, payload_update_filter)
payload_update_filter = one_random_filter_please()
local_client_2.set_payload(COLLECTION_NAME, {"test": "test2"}, payload_update_filter)
remote_client.set_payload(COLLECTION_NAME, {"test": "test2"}, payload_update_filter)
for i in range(10):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client_2,
remote_client,
searcher.filter_search_text,
query_filter=query_filter,
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_search_with_persistence_and_skipped_vectors():
import tempfile
fixture_points = generate_sparse_fixtures(skip_vectors=True)
searcher = TestSimpleSparseSearcher()
with tempfile.TemporaryDirectory() as tmpdir:
local_client = init_local(tmpdir)
init_client(local_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
payload_update_filter = one_random_filter_please()
local_client.set_payload(COLLECTION_NAME, {"test": f"test"}, payload_update_filter)
count_before_load = local_client.count(COLLECTION_NAME)
del local_client
local_client_2 = init_local(tmpdir)
count_after_load = local_client_2.count(COLLECTION_NAME)
assert count_after_load == count_before_load
remote_client = init_remote()
init_client(remote_client, fixture_points, sparse_vectors_config=sparse_vectors_config)
remote_client.set_payload(COLLECTION_NAME, {"test": f"test"}, payload_update_filter)
payload_update_filter = one_random_filter_please()
local_client_2.set_payload(COLLECTION_NAME, {"test": "test2"}, payload_update_filter)
remote_client.set_payload(COLLECTION_NAME, {"test": "test2"}, payload_update_filter)
for i in range(10):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client_2,
remote_client,
searcher.filter_search_text,
query_filter=query_filter,
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_query_with_nan():
local_client = init_local()
remote_client = init_remote()
fixture_points = generate_sparse_fixtures()
sparse_vector = random_sparse_vectors({"sparse-text": sparse_text_vector_size})
sparse_vector["sparse-text"].values[0] = np.nan
local_client.create_collection(
COLLECTION_NAME, vectors_config={}, sparse_vectors_config=sparse_vectors_config
)
if remote_client.collection_exists(COLLECTION_NAME):
remote_client.delete_collection(COLLECTION_NAME)
remote_client.create_collection(
COLLECTION_NAME, vectors_config={}, sparse_vectors_config=sparse_vectors_config
)
init_client(
local_client,
fixture_points,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
init_client(
remote_client,
fixture_points,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
with pytest.raises(AssertionError):
local_client.query_points(
COLLECTION_NAME, sparse_vector["sparse-text"], using="sparse-text"
)
with pytest.raises(UnexpectedResponse):
remote_client.query_points(
COLLECTION_NAME, sparse_vector["sparse-text"], using="sparse-text"
)
| TestSimpleSparseSearcher |
python | getsentry__sentry | src/sentry/snuba/metrics/fields/base.py | {
"start": 9547,
"end": 10137
} | class ____(MetricObject):
"""
Represents a class where the metric object just encapsulates a string name identifier for a
metric
"""
def generate_metric_ids(self, projects: Sequence[Project], use_case_id: UseCaseID) -> set[int]:
return {resolve_weak(use_case_id, org_id_from_projects(projects), self.metric_mri)}
def generate_filter_snql_conditions(self, org_id: int, use_case_id: UseCaseID) -> Function:
return Function(
"equals",
[Column("metric_id"), resolve_weak(use_case_id, org_id, self.metric_mri)],
)
| RawMetric |
python | joke2k__faker | faker/providers/date_time/da_DK/__init__.py | {
"start": 46,
"end": 771
} | class ____(DateTimeProvider):
DAY_NAMES = {
"0": "mandag",
"1": "tirsdag",
"2": "onsdag",
"3": "torsdag",
"4": "fredag",
"5": "lørdag",
"6": "søndag",
}
MONTH_NAMES = {
"01": "januar",
"02": "februar",
"03": "marts",
"04": "april",
"05": "maj",
"06": "juni",
"07": "juli",
"08": "august",
"09": "september",
"10": "oktober",
"11": "november",
"12": "decembder",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | huggingface__transformers | src/transformers/models/roberta/modeling_roberta.py | {
"start": 24313,
"end": 29744
} | class ____(RobertaPreTrainedModel):
_no_split_modules = ["RobertaEmbeddings", "RobertaLayer"]
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
seq_length = input_ids.shape[1]
else:
device = inputs_embeds.device
seq_length = inputs_embeds.shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
)
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
@auto_docstring(
custom_intro="""
RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.
"""
)
| RobertaModel |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis03.py | {
"start": 315,
"end": 2173
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis03.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [65514112, 65556864]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
}
)
chart.set_title({"name": "Title"})
chart.set_x_axis({"name": "XXX"})
chart.set_y_axis({"name": "YYY"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__jax | jax/experimental/roofline/roofline.py | {
"start": 2681,
"end": 4336
} | class ____:
flops: int = 0
unfused_flops: int = 0
ici_bytes: dict[str, int] = field(default_factory=dict)
ici_latency: dict[str, int] = field(default_factory=dict)
hbm_bytes: int = 0
peak_hbm_bytes: int = 0
unfused_hbm_bytes: int = 0
@classmethod
def zeros(cls) -> RooflineResult:
return cls()
def __add__(self, other: RooflineResult) -> RooflineResult:
def merge_ici_dicts(d1: dict[str, int], d2: dict[str, int]) -> dict[str, int]:
return {k: d1.get(k, 0) + d2.get(k, 0) for k in set(d1) | set(d2)}
return RooflineResult(
flops=self.flops + other.flops,
unfused_flops=self.unfused_flops + other.unfused_flops,
ici_bytes=merge_ici_dicts(self.ici_bytes, other.ici_bytes),
ici_latency=merge_ici_dicts(self.ici_latency, other.ici_latency),
hbm_bytes=self.hbm_bytes + other.hbm_bytes,
peak_hbm_bytes=max(self.peak_hbm_bytes, other.peak_hbm_bytes),
unfused_hbm_bytes=self.unfused_hbm_bytes + other.unfused_hbm_bytes,
)
def __mul__(self, constant: int | float) -> RooflineResult:
return RooflineResult(
flops=int(self.flops * constant),
unfused_flops=int(self.unfused_flops * constant),
ici_bytes={k: int(v * constant) for k, v in self.ici_bytes.items()},
ici_latency={k: int(v * constant) for k, v in self.ici_latency.items()},
hbm_bytes=int(self.hbm_bytes * constant),
peak_hbm_bytes=int(self.peak_hbm_bytes * constant),
unfused_hbm_bytes=int(self.unfused_hbm_bytes * constant),
)
def __rmul__(self, constant: int | float) -> RooflineResult:
return self.__mul__(constant)
| RooflineResult |
python | django__django | tests/forms_tests/field_tests/test_slugfield.py | {
"start": 76,
"end": 981
} | class ____(SimpleTestCase):
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(" aa-bb-cc "), "aa-bb-cc")
def test_slugfield_unicode_normalization(self):
f = SlugField(allow_unicode=True)
self.assertEqual(f.clean("a"), "a")
self.assertEqual(f.clean("1"), "1")
self.assertEqual(f.clean("a1"), "a1")
self.assertEqual(f.clean("你好"), "你好")
self.assertEqual(f.clean(" 你-好 "), "你-好")
self.assertEqual(f.clean("ıçğüş"), "ıçğüş")
self.assertEqual(f.clean("foo-ıç-bar"), "foo-ıç-bar")
def test_empty_value(self):
f = SlugField(required=False)
self.assertEqual(f.clean(""), "")
self.assertEqual(f.clean(None), "")
f = SlugField(required=False, empty_value=None)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
| SlugFieldTest |
python | google__pytype | pytype/abstract/_classes.py | {
"start": 13323,
"end": 25828
} | class ____(
_instance_base.SimpleValue, class_mixin.Class, mixin.LazyMembers
):
"""An abstract wrapper for PyTD class objects.
These are the abstract values for class objects that are described in PyTD.
Attributes:
cls: A pytd.Class
mro: Method resolution order. An iterable of BaseValue.
"""
def __init__(
self, name: str, pytd_cls: pytd.Class, ctx: "context.Context"
) -> None:
# Apply decorators first, in case they set any properties that later
# initialization code needs to read.
self.has_explicit_init = any(x.name == "__init__" for x in pytd_cls.methods)
pytd_cls = decorate.process_class(pytd_cls)
self.pytd_cls = pytd_cls
super().__init__(name, ctx)
if decorate.has_decorator(
pytd_cls, ("typing.final", "typing_extensions.final")
):
self.final = True
# Keep track of the names of final methods and instance variables.
self.final_members = {}
mm = {}
for val in pytd_cls.constants:
if isinstance(val.type, pytd.Annotated):
mm[val.name] = val.Replace(type=val.type.base_type)
elif (
isinstance(val.type, pytd.GenericType)
and val.type.base_type.name == "typing.Final"
):
self.final_members[val.name] = val
mm[val.name] = val.Replace(type=val.type.parameters[0])
else:
mm[val.name] = val
for val in pytd_cls.methods:
mm[val.name] = val
if val.is_final:
self.final_members[val.name] = val
for val in pytd_cls.classes:
mm[val.name.rsplit(".", 1)[-1]] = val
if pytd_cls.metaclass is None:
metaclass = None
else:
metaclass = self.ctx.convert.constant_to_value(
pytd_cls.metaclass,
subst=datatypes.AliasingDict(),
node=self.ctx.root_node,
)
self.slots = pytd_cls.slots
mixin.LazyMembers.init_mixin(self, mm)
self.is_dynamic = self.compute_is_dynamic()
class_mixin.Class.init_mixin(self, metaclass)
self.decorators = [x.type.name for x in pytd_cls.decorators]
if self.decorators:
self._populate_decorator_metadata()
if "__dataclass_fields__" in self.metadata:
self.match_args = tuple(
attr.name
for attr in self.metadata["__dataclass_fields__"]
if not attr.kw_only
)
elif self.load_lazy_attribute("__match_args__"):
self.match_args = self._convert_str_tuple("__match_args__") or ()
else:
for base in self.mro[1:]:
if isinstance(base, class_mixin.Class) and hasattr(base, "match_args"):
self.match_args = base.match_args
break
else:
self.match_args = ()
@classmethod
def make(
cls, name: str, pytd_cls: pytd.Class, ctx: "context.Context"
) -> "PyTDClass":
# See if any of the special classes can be built directly from the pytd
# class or its list of direct base classes.
ret = _special_classes.maybe_build_from_pytd(name, pytd_cls, ctx)
if ret:
return ret
# Now construct the PyTDClass, since we need a fully constructed class to
# check the MRO. If the MRO does match a special class we build it and
# discard the class constructed here.
c = cls(name, pytd_cls, ctx)
ret = _special_classes.maybe_build_from_mro(c, name, pytd_cls, ctx)
if ret:
return ret
# If none of the special classes have matched, return the PyTDClass
return c
def _populate_decorator_metadata(self) -> None:
"""Fill in class attribute metadata for decorators like @dataclass."""
keyed_decorators = {}
for decorator in self.decorators:
key = class_mixin.get_metadata_key(decorator)
if key:
keyed_decorators[decorator] = key
# Because dataclass() can be used to implement dataclass_transform() at
# runtime, a class may be decorated with both.
if (
"typing.dataclass_transform" in keyed_decorators
and "dataclasses.dataclass" in keyed_decorators
):
del keyed_decorators["dataclasses.dataclass"]
if not keyed_decorators:
return
elif len(keyed_decorators) > 1:
decorator1, decorator2, *_ = sorted(keyed_decorators)
error = f"Cannot apply both @{decorator1} and @{decorator2}."
self.ctx.errorlog.invalid_annotation(self.ctx.vm.frames, self, error)
return
((decorator, key),) = keyed_decorators.items() # pylint: disable=unbalanced-dict-unpacking
if key == "__dataclass_transform__":
# TODO(mdemello): Fix how we handle metadata keys; we have been
# assuming that they always contain __init__ fields.
self.metadata[key] = True
else:
self._init_attr_metadata_from_pytd(decorator)
self._recompute_init_from_metadata(key)
def _init_attr_metadata_from_pytd(self, decorator: str) -> None:
"""Initialise metadata[key] with a list of Attributes."""
# Use the __init__ function as the source of truth for dataclass fields; if
# this is a generated module we will have already processed ClassVar and
# InitVar attributes to generate __init__, so the fields we want to add to
# the subclass __init__ are the init params rather than the full list of
# class attributes.
init = next(x for x in self.pytd_cls.methods if x.name == "__init__")
# attr strips the leading underscores off of fields when generating the
# __init__ argument for fields. This behavior may not be shared by other
# libraries, such as dataclasses.
if decorator.startswith("attr."):
protected = {
x.name[1:]: x.name
for x in self.pytd_cls.constants
if x.name.startswith("_")
}
else:
protected = {}
params = []
for p in init.signatures[0].params[1:]:
if p.name in protected:
params.append(p.Replace(name=protected[p.name]))
else:
params.append(p)
with self.ctx.allow_recursive_convert():
own_attrs = [
class_mixin.Attribute.from_param(p, self.ctx) for p in params
]
self.compute_attr_metadata(own_attrs, decorator)
def _recompute_init_from_metadata(self, key: str) -> None:
# Some decorated classes (dataclasses e.g.) have their __init__ function
# set via traversing the MRO to collect initializers from decorated parent
# classes as well. Since we don't have access to the MRO when initially
# decorating the class, we recalculate the __init__ signature from the
# combined attribute list in the metadata.
if self.has_explicit_init:
# Do not override an __init__ from the pyi file
return
attributes = self.metadata[key]
fields = [x.to_pytd_constant() for x in attributes]
self.pytd_cls = decorate.add_init_from_fields(self.pytd_cls, fields)
init = self.pytd_cls.Lookup("__init__")
self._member_map["__init__"] = init
def get_own_attributes(self) -> set[str]:
return {name for name, _ in self._member_map.items()}
def get_own_abstract_methods(self) -> set[str]:
return {
name
for name, member in self._member_map.items()
if isinstance(member, pytd.Function) and member.is_abstract
}
# TODO: b/350643999 - Type mismatch due to superclass not having type
# annotation. Remove the suppression once type is on the superclass method.
def bases(self) -> list[cfg.Variable]: # pytype: disable=signature-mismatch
convert = self.ctx.convert
converted_bases = []
for base in self.pytd_cls.bases:
converted_base_options = []
stack = [base]
while stack:
option = stack.pop()
if isinstance(option, pytd.UnionType):
stack.extend(option.type_list)
continue
converted_option = convert.constant_to_var(
option, subst=datatypes.AliasingDict(), node=self.ctx.root_node
)
converted_base_options.append(converted_option)
if len(converted_base_options) > 1:
converted_base = self.ctx.program.NewVariable()
for converted_option in converted_base_options:
converted_base.PasteVariable(converted_option)
converted_bases.append(converted_base)
else:
converted_bases.append(converted_base_options[0])
return converted_bases
def load_lazy_attribute(
self, name: str, subst: str | None = None, store: bool = True
) -> cfg.Variable | None:
try:
return super().load_lazy_attribute(name, subst, store)
except self.ctx.convert.TypeParameterError as e:
self.ctx.errorlog.unbound_type_param(
self.ctx.vm.frames, self, name, e.type_param_name
)
member = self.ctx.new_unsolvable(self.ctx.root_node)
if store:
self.members[name] = member
return member
def _convert_member(
self,
name: str,
member: pytd.Node,
subst: datatypes.AliasingDict[str, cfg.Variable] | None = None,
) -> cfg.Variable:
"""Convert a member as a variable. For lazy lookup."""
subst = subst or datatypes.AliasingDict()
node = self.ctx.root_node
if isinstance(member, pytd.Constant):
return self.ctx.convert.pytd_cls_to_instance_var(member.type, subst, node)
elif isinstance(member, pytd.Function):
c = self.ctx.convert.constant_to_value(member, subst=subst, node=node)
c.parent = self
return c.to_variable(node)
elif isinstance(member, pytd.Class):
return self.ctx.convert.constant_to_var(member, subst=subst, node=node)
else:
raise AssertionError(f"Invalid class member {pytd_utils.Print(member)}")
def _new_instance(
self,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
),
node: cfg.CFGNode,
args: function.Args | None,
) -> _instance_base.Instance | _instances.Tuple:
if self.full_name == "builtins.tuple" and args.is_empty(): # pytype: disable=attribute-error
value = _instances.Tuple((), self.ctx)
else:
value = _instance_base.Instance(
self.ctx.convert.constant_to_value(self.pytd_cls), self.ctx
)
for type_param in self.template:
name = type_param.full_name
if name not in value.instance_type_parameters:
value.instance_type_parameters[name] = self.ctx.program.NewVariable()
return value
def instantiate(
self,
node: cfg.CFGNode,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
) -> cfg.Variable:
return self.ctx.convert.pytd_cls_to_instance_var(self.pytd_cls, {}, node)
def __repr__(self) -> str:
return f"PyTDClass({self.name})"
def __contains__(self, name: str) -> bool:
return name in self._member_map
def convert_as_instance_attribute(
self, name: str, instance: "PyTDClass"
) -> cfg.Variable | None:
"""Convert `name` as an instance attribute.
This method is used by attribute.py to lazily load attributes on instances
of this PyTDClass. Calling this method directly should be avoided. Doing so
will create multiple copies of the same attribute, leading to subtle bugs.
Args:
name: The attribute name.
instance: An instance of this PyTDClass.
Returns:
The converted attribute.
"""
if name not in self.pytd_cls:
return None
c = self.pytd_cls.Lookup(name)
if isinstance(c, pytd.Constant):
try:
self._convert_member(name, c)
except self.ctx.convert.TypeParameterError:
# Add type parameter substitutions for instance attributes.
subst = datatypes.AliasingDict()
for itm in self.pytd_cls.template:
subst[itm.full_name] = self.ctx.convert.constant_to_value(
itm.type_param, {}
).instantiate(self.ctx.root_node, container=instance)
subst[f"{self.full_name}.Self"] = instance.to_variable(
self.ctx.root_node
)
# Set all other type parameters to Any. See
# test_recursive_types:PyiTest.test_callable for a case in which it is
# not an error to have an unsubstituted type parameter here.
collector = visitors.CollectTypeParameters()
c.Visit(collector)
for type_param in collector.params:
name = type_param.full_name
if name not in subst:
subst[name] = self.ctx.new_unsolvable(self.ctx.root_node)
return self._convert_member(name, c, subst)
def has_protocol_base(self) -> bool:
for base in self.pytd_cls.bases:
if base.name == "typing.Protocol":
return True
return False
| PyTDClass |
python | numba__numba | numba/cuda/tests/doc_examples/test_matmul.py | {
"start": 475,
"end": 6135
} | class ____(CUDATestCase):
"""
Text matrix multiplication using simple, shared memory/square, and shared
memory/nonsquare cases.
"""
def setUp(self):
# Prevent output from this test showing up when running the test suite
self._captured_stdout = captured_stdout()
self._captured_stdout.__enter__()
super().setUp()
def tearDown(self):
# No exception type, value, or traceback
self._captured_stdout.__exit__(None, None, None)
super().tearDown()
def test_ex_matmul(self):
"""Test of matrix multiplication on various cases."""
# magictoken.ex_import.begin
from numba import cuda, float32
import numpy as np
import math
# magictoken.ex_import.end
# magictoken.ex_matmul.begin
@cuda.jit
def matmul(A, B, C):
"""Perform square matrix multiplication of C = A * B."""
i, j = cuda.grid(2)
if i < C.shape[0] and j < C.shape[1]:
tmp = 0.
for k in range(A.shape[1]):
tmp += A[i, k] * B[k, j]
C[i, j] = tmp
# magictoken.ex_matmul.end
# magictoken.ex_run_matmul.begin
x_h = np.arange(16).reshape([4, 4])
y_h = np.ones([4, 4])
z_h = np.zeros([4, 4])
x_d = cuda.to_device(x_h)
y_d = cuda.to_device(y_h)
z_d = cuda.to_device(z_h)
threadsperblock = (16, 16)
blockspergrid_x = math.ceil(z_h.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(z_h.shape[1] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
matmul[blockspergrid, threadsperblock](x_d, y_d, z_d)
z_h = z_d.copy_to_host()
print(z_h)
print(x_h @ y_h)
# magictoken.ex_run_matmul.end
# magictoken.ex_fast_matmul.begin
# Controls threads per block and shared memory usage.
# The computation will be done on blocks of TPBxTPB elements.
# TPB should not be larger than 32 in this example
TPB = 16
@cuda.jit
def fast_matmul(A, B, C):
"""
Perform matrix multiplication of C = A * B using CUDA shared memory.
Reference: https://stackoverflow.com/a/64198479/13697228 by @RobertCrovella
"""
# Define an array in the shared memory
# The size and type of the arrays must be known at compile time
sA = cuda.shared.array(shape=(TPB, TPB), dtype=float32)
sB = cuda.shared.array(shape=(TPB, TPB), dtype=float32)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bpg = cuda.gridDim.x # blocks per grid
# Each thread computes one element in the result matrix.
# The dot product is chunked into dot products of TPB-long vectors.
tmp = float32(0.)
for i in range(bpg):
# Preload data into shared memory
sA[ty, tx] = 0
sB[ty, tx] = 0
if y < A.shape[0] and (tx + i * TPB) < A.shape[1]:
sA[ty, tx] = A[y, tx + i * TPB]
if x < B.shape[1] and (ty + i * TPB) < B.shape[0]:
sB[ty, tx] = B[ty + i * TPB, x]
# Wait until all threads finish preloading
cuda.syncthreads()
# Computes partial product on the shared memory
for j in range(TPB):
tmp += sA[ty, j] * sB[j, tx]
# Wait until all threads finish computing
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp
# magictoken.ex_fast_matmul.end
# magictoken.ex_run_fast_matmul.begin
x_h = np.arange(16).reshape([4, 4])
y_h = np.ones([4, 4])
z_h = np.zeros([4, 4])
x_d = cuda.to_device(x_h)
y_d = cuda.to_device(y_h)
z_d = cuda.to_device(z_h)
threadsperblock = (TPB, TPB)
blockspergrid_x = math.ceil(z_h.shape[0] / threadsperblock[0])
blockspergrid_y = math.ceil(z_h.shape[1] / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
fast_matmul[blockspergrid, threadsperblock](x_d, y_d, z_d)
z_h = z_d.copy_to_host()
print(z_h)
print(x_h @ y_h)
# magictoken.ex_run_fast_matmul.end
# fast_matmul test(s)
msg = "fast_matmul incorrect for shared memory, square case."
self.assertTrue(np.all(z_h == x_h @ y_h), msg=msg)
# magictoken.ex_run_nonsquare.begin
x_h = np.arange(115).reshape([5, 23])
y_h = np.ones([23, 7])
z_h = np.zeros([5, 7])
x_d = cuda.to_device(x_h)
y_d = cuda.to_device(y_h)
z_d = cuda.to_device(z_h)
threadsperblock = (TPB, TPB)
grid_y_max = max(x_h.shape[0], y_h.shape[0])
grid_x_max = max(x_h.shape[1], y_h.shape[1])
blockspergrid_x = math.ceil(grid_x_max / threadsperblock[0])
blockspergrid_y = math.ceil(grid_y_max / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
fast_matmul[blockspergrid, threadsperblock](x_d, y_d, z_d)
z_h = z_d.copy_to_host()
print(z_h)
print(x_h @ y_h)
# magictoken.ex_run_nonsquare.end
# nonsquare fast_matmul test(s)
msg = "fast_matmul incorrect for shared memory, non-square case."
self.assertTrue(np.all(z_h == x_h @ y_h), msg=msg)
if __name__ == '__main__':
unittest.main()
| TestMatMul |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 12740,
"end": 12856
} | class ____(OpcodeWithArg): # Acts as jump
_FLAGS = HAS_JABS | HAS_ARGUMENT | NO_NEXT
__slots__ = ()
| CONTINUE_LOOP |
python | crytic__slither | slither/core/declarations/function.py | {
"start": 3279,
"end": 73993
} | class ____(SourceMapping, metaclass=ABCMeta): # pylint: disable=too-many-public-methods
"""
Function class
"""
def __init__(self, compilation_unit: "SlitherCompilationUnit") -> None:
super().__init__()
self._internal_scope: List[str] = []
self._name: Optional[str] = None
self._view: bool = False
self._pure: bool = False
self._payable: bool = False
self._visibility: Optional[str] = None
self._virtual: bool = False
self._overrides: List["FunctionContract"] = []
self._overridden_by: List["FunctionContract"] = []
self._is_implemented: Optional[bool] = None
self._is_empty: Optional[bool] = None
self._entry_point: Optional["Node"] = None
self._nodes: List["Node"] = []
self._variables: Dict[str, "LocalVariable"] = {}
# slithir Temporary and references variables (but not SSA)
self._slithir_variables: Set["SlithIRVariable"] = set()
self._parameters: List["LocalVariable"] = []
self._parameters_ssa: List["LocalIRVariable"] = []
self._parameters_src: SourceMapping = SourceMapping()
# This is used for vyper calls with default arguments
self._default_args_as_expressions: List["Expression"] = []
self._returns: List["LocalVariable"] = []
self._returns_ssa: List["LocalIRVariable"] = []
self._returns_src: SourceMapping = SourceMapping()
self._return_values: Optional[List["SlithIRVariable"]] = None
self._return_values_ssa: Optional[List["SlithIRVariable"]] = None
self._vars_read: List["Variable"] = []
self._vars_written: List["Variable"] = []
self._state_vars_read: List["StateVariable"] = []
self._vars_read_or_written: List["Variable"] = []
self._solidity_vars_read: List["SolidityVariable"] = []
self._state_vars_written: List["StateVariable"] = []
self._internal_calls: List["InternalCall"] = []
self._solidity_calls: List["SolidityCall"] = []
self._low_level_calls: List["LowLevelCall"] = []
self._high_level_calls: List[Tuple["Contract", "HighLevelCall"]] = []
self._library_calls: List["LibraryCall"] = []
self._external_calls_as_expressions: List["Expression"] = []
self._expression_vars_read: List["Expression"] = []
self._expression_vars_written: List["Expression"] = []
self._expression_calls: List["Expression"] = []
# self._expression_modifiers: List["Expression"] = []
self._modifiers: List[ModifierStatements] = []
self._explicit_base_constructor_calls: List[ModifierStatements] = []
self._contains_assembly: bool = False
self._expressions: Optional[List["Expression"]] = None
self._slithir_operations: Optional[List["Operation"]] = None
self._slithir_ssa_operations: Optional[List["Operation"]] = None
self._all_expressions: Optional[List["Expression"]] = None
self._all_slithir_operations: Optional[List["Operation"]] = None
self._all_internals_calls: Optional[List["InternalCall"]] = None
self._all_high_level_calls: Optional[List[Tuple["Contract", "HighLevelCall"]]] = None
self._all_library_calls: Optional[List["LibraryCall"]] = None
self._all_low_level_calls: Optional[List["LowLevelCall"]] = None
self._all_solidity_calls: Optional[List["SolidityCall"]] = None
self._all_variables_read: Optional[List["Variable"]] = None
self._all_variables_written: Optional[List["Variable"]] = None
self._all_state_variables_read: Optional[List["StateVariable"]] = None
self._all_solidity_variables_read: Optional[List["SolidityVariable"]] = None
self._all_state_variables_written: Optional[List["StateVariable"]] = None
self._all_slithir_variables: Optional[List["SlithIRVariable"]] = None
self._all_nodes: Optional[List["Node"]] = None
self._all_conditional_state_variables_read: Optional[List["StateVariable"]] = None
self._all_conditional_state_variables_read_with_loop: Optional[List["StateVariable"]] = None
self._all_conditional_solidity_variables_read: Optional[List["SolidityVariable"]] = None
self._all_conditional_solidity_variables_read_with_loop: Optional[
List["SolidityVariable"]
] = None
self._all_solidity_variables_used_as_args: Optional[List["SolidityVariable"]] = None
self._is_shadowed: bool = False
self._shadows: bool = False
# set(ReacheableNode)
self._reachable_from_nodes: Set[ReacheableNode] = set()
self._reachable_from_functions: Set[Function] = set()
self._all_reachable_from_functions: Optional[Set[Function]] = None
# Constructor, fallback, State variable constructor
self._function_type: Optional[FunctionType] = None
self._is_constructor: Optional[bool] = None
# Computed on the fly, can be True of False
self._can_reenter: Optional[bool] = None
self._can_send_eth: Optional[bool] = None
self._nodes_ordered_dominators: Optional[List["Node"]] = None
self._counter_nodes = 0
# Memoize parameters:
# TODO: identify all the memoize parameters and add a way to undo the memoization
self._full_name: Optional[str] = None
self._signature: Optional[Tuple[str, List[str], List[str]]] = None
self._solidity_signature: Optional[str] = None
self._signature_str: Optional[str] = None
self._canonical_name: Optional[str] = None
self._is_protected: Optional[bool] = None
self.compilation_unit: "SlitherCompilationUnit" = compilation_unit
self.function_language: FunctionLanguage = (
FunctionLanguage.Solidity if compilation_unit.is_solidity else FunctionLanguage.Vyper
)
self._id: Optional[str] = None
# To be improved with a parsing of the documentation
self.has_documentation: bool = False
###################################################################################
###################################################################################
# region General properties
###################################################################################
###################################################################################
@property
def name(self) -> str:
"""
str: function name
"""
if self._name == "" and self._function_type == FunctionType.CONSTRUCTOR:
return "constructor"
if self._name == "" and self._function_type == FunctionType.FALLBACK:
return "fallback"
if self._function_type == FunctionType.RECEIVE:
return "receive"
if self._function_type == FunctionType.CONSTRUCTOR_VARIABLES:
return "slitherConstructorVariables"
if self._function_type == FunctionType.CONSTRUCTOR_CONSTANT_VARIABLES:
return "slitherConstructorConstantVariables"
return self._name
@name.setter
def name(self, new_name: str):
self._name = new_name
@property
def internal_scope(self) -> List[str]:
"""
Return a list of name representing the scope of the function
This is used to model nested functions declared in YUL
:return:
"""
return self._internal_scope
@internal_scope.setter
def internal_scope(self, new_scope: List[str]):
self._internal_scope = new_scope
@property
def full_name(self) -> str:
"""
str: func_name(type1,type2)
Return the function signature without the return values
The difference between this function and solidity_function is that full_name does not translate the underlying
type (ex: structure, contract to address, ...)
"""
if self._full_name is None:
name, parameters, _ = self.signature
full_name = ".".join(self._internal_scope + [name]) + "(" + ",".join(parameters) + ")"
self._full_name = full_name
return self._full_name
@property
@abstractmethod
def canonical_name(self) -> str:
"""
str: contract.func_name(type1,type2)
Return the function signature without the return values
"""
return ""
@property
def contains_assembly(self) -> bool:
return self._contains_assembly
@contains_assembly.setter
def contains_assembly(self, c: bool):
self._contains_assembly = c
def can_reenter(self, callstack: Optional[List[Union["Function", "Variable"]]] = None) -> bool:
"""
Check if the function can re-enter
Follow internal calls.
Do not consider CREATE as potential re-enter, but check if the
destination's constructor can contain a call (recurs. follow nested CREATE)
For Solidity > 0.5, filter access to public variables and constant/pure/view
For call to this. check if the destination can re-enter
Do not consider Send/Transfer as there is not enough gas
:param callstack: used internally to check for recursion
:return bool:
"""
from slither.slithir.operations import Call
if self._can_reenter is None:
self._can_reenter = False
for ir in self.all_slithir_operations():
if isinstance(ir, Call) and ir.can_reenter(callstack):
self._can_reenter = True
return True
return self._can_reenter
def can_send_eth(self) -> bool:
"""
Check if the function or any internal (not external) functions called by it can send eth
:return bool:
"""
from slither.slithir.operations import Call
if self._can_send_eth is None:
self._can_send_eth = False
for ir in self.all_slithir_operations():
if isinstance(ir, Call) and ir.can_send_eth():
self._can_send_eth = True
return True
return self._can_send_eth
@property
def is_checked(self) -> bool:
"""
Return true if the overflow are enabled by default
:return:
"""
return self.compilation_unit.solc_version >= "0.8.0"
@property
def id(self) -> Optional[str]:
"""
Return the reference ID of the function, if available.
:return:
:rtype:
"""
return self._id
@id.setter
def id(self, new_id: str):
self._id = new_id
@property
@abstractmethod
def file_scope(self) -> "FileScope":
pass
# endregion
###################################################################################
###################################################################################
# region Type (FunctionType)
###################################################################################
###################################################################################
def set_function_type(self, t: FunctionType) -> None:
assert isinstance(t, FunctionType)
self._function_type = t
@property
def function_type(self) -> Optional[FunctionType]:
return self._function_type
@function_type.setter
def function_type(self, t: FunctionType):
self._function_type = t
@property
def is_constructor(self) -> bool:
"""
bool: True if the function is the constructor
"""
return self._function_type == FunctionType.CONSTRUCTOR
@property
def is_constructor_variables(self) -> bool:
"""
bool: True if the function is the constructor of the variables
Slither has inbuilt functions to hold the state variables initialization
"""
return self._function_type in [
FunctionType.CONSTRUCTOR_VARIABLES,
FunctionType.CONSTRUCTOR_CONSTANT_VARIABLES,
]
@property
def is_fallback(self) -> bool:
"""
Determine if the function is the fallback function for the contract
Returns
(bool)
"""
return self._function_type == FunctionType.FALLBACK
@property
def is_receive(self) -> bool:
"""
Determine if the function is the receive function for the contract
Returns
(bool)
"""
return self._function_type == FunctionType.RECEIVE
# endregion
###################################################################################
###################################################################################
# region Payable
###################################################################################
###################################################################################
@property
def payable(self) -> bool:
"""
bool: True if the function is payable
"""
return self._payable
@payable.setter
def payable(self, p: bool):
self._payable = p
# endregion
###################################################################################
###################################################################################
# region Virtual
###################################################################################
###################################################################################
@property
def is_virtual(self) -> bool:
"""
Note for Solidity < 0.6.0 it will always be false
bool: True if the function is virtual
"""
return self._virtual
@is_virtual.setter
def is_virtual(self, v: bool):
self._virtual = v
@property
def is_override(self) -> bool:
"""
Note for Solidity < 0.6.0 it will always be false
bool: True if the function overrides a base function
"""
return len(self._overrides) > 0
@property
def overridden_by(self) -> List["FunctionContract"]:
"""
List["FunctionContract"]: List of functions in child contracts that override this function
This may include distinct instances of the same function due to inheritance
"""
return self._overridden_by
@property
def overrides(self) -> List["FunctionContract"]:
"""
List["FunctionContract"]: List of functions in parent contracts that this function overrides
This may include distinct instances of the same function due to inheritance
"""
return self._overrides
# endregion
###################################################################################
###################################################################################
# region Visibility
###################################################################################
###################################################################################
@property
def visibility(self) -> str:
"""
str: Function visibility
"""
assert self._visibility is not None
return self._visibility
@visibility.setter
def visibility(self, v: str):
self._visibility = v
def set_visibility(self, v: str) -> None:
self._visibility = v
@property
def view(self) -> bool:
"""
bool: True if the function is declared as view
"""
return self._view
@view.setter
def view(self, v: bool):
self._view = v
@property
def pure(self) -> bool:
"""
bool: True if the function is declared as pure
"""
return self._pure
@pure.setter
def pure(self, p: bool):
self._pure = p
@property
def is_shadowed(self) -> bool:
return self._is_shadowed
@is_shadowed.setter
def is_shadowed(self, is_shadowed):
self._is_shadowed = is_shadowed
@property
def shadows(self) -> bool:
return self._shadows
@shadows.setter
def shadows(self, _shadows: bool):
self._shadows = _shadows
# endregion
###################################################################################
###################################################################################
# region Function's body
###################################################################################
###################################################################################
@property
def is_implemented(self) -> bool:
"""
bool: True if the function is implemented
"""
return self._is_implemented
@is_implemented.setter
def is_implemented(self, is_impl: bool):
self._is_implemented = is_impl
@property
def is_empty(self) -> bool:
"""
bool: True if the function is empty, None if the function is an interface
"""
return self._is_empty
@is_empty.setter
def is_empty(self, empty: bool):
self._is_empty = empty
# endregion
###################################################################################
###################################################################################
# region Nodes
###################################################################################
###################################################################################
@property
def nodes(self) -> List["Node"]:
"""
list(Node): List of the nodes
"""
return list(self._nodes)
@nodes.setter
def nodes(self, nodes: List["Node"]):
self._nodes = nodes
@property
def entry_point(self) -> Optional["Node"]:
"""
Node: Entry point of the function
"""
return self._entry_point
@entry_point.setter
def entry_point(self, node: "Node"):
self._entry_point = node
def add_node(self, node: "Node") -> None:
if not self._entry_point:
self._entry_point = node
self._nodes.append(node)
@property
def nodes_ordered_dominators(self) -> List["Node"]:
# TODO: does not work properly; most likely due to modifier call
# This will not work for modifier call that lead to multiple nodes
# from slither.core.cfg.node import NodeType
if self._nodes_ordered_dominators is None:
self._nodes_ordered_dominators = []
if self.entry_point:
self._compute_nodes_ordered_dominators(self.entry_point)
for node in self.nodes:
# if node.type == NodeType.OTHER_ENTRYPOINT:
if not node in self._nodes_ordered_dominators:
self._compute_nodes_ordered_dominators(node)
return self._nodes_ordered_dominators
def _compute_nodes_ordered_dominators(self, node: "Node"):
assert self._nodes_ordered_dominators is not None
if node in self._nodes_ordered_dominators:
return
self._nodes_ordered_dominators.append(node)
for dom in node.dominance_exploration_ordered:
self._compute_nodes_ordered_dominators(dom)
# endregion
###################################################################################
###################################################################################
# region Parameters
###################################################################################
###################################################################################
@property
def parameters(self) -> List["LocalVariable"]:
"""
list(LocalVariable): List of the parameters
"""
return list(self._parameters)
def add_parameters(self, p: "LocalVariable") -> None:
self._parameters.append(p)
@property
def parameters_ssa(self) -> List["LocalIRVariable"]:
"""
list(LocalIRVariable): List of the parameters (SSA form)
"""
return list(self._parameters_ssa)
def add_parameter_ssa(self, var: "LocalIRVariable") -> None:
self._parameters_ssa.append(var)
def parameters_src(self) -> SourceMapping:
return self._parameters_src
# endregion
###################################################################################
###################################################################################
# region Return values
###################################################################################
###################################################################################
@property
def return_type(self) -> Optional[List[Type]]:
"""
Return the list of return type
If no return, return None
"""
returns = self.returns
if returns:
return [r.type for r in returns]
return None
def returns_src(self) -> SourceMapping:
return self._returns_src
@property
def type(self) -> Optional[List[Type]]:
"""
Return the list of return type
If no return, return None
Alias of return_type
"""
return self.return_type
@property
def returns(self) -> List["LocalVariable"]:
"""
list(LocalVariable): List of the return variables
"""
return list(self._returns)
def add_return(self, r: "LocalVariable") -> None:
self._returns.append(r)
@property
def returns_ssa(self) -> List["LocalIRVariable"]:
"""
list(LocalIRVariable): List of the return variables (SSA form)
"""
return list(self._returns_ssa)
def add_return_ssa(self, var: "LocalIRVariable") -> None:
self._returns_ssa.append(var)
# endregion
###################################################################################
###################################################################################
# region Modifiers
###################################################################################
###################################################################################
@property
def modifiers(self) -> List[Union["Contract", "Function"]]:
"""
list(Modifier): List of the modifiers
Can be contract for constructor's calls
"""
return [c.modifier for c in self._modifiers]
def add_modifier(self, modif: "ModifierStatements") -> None:
self._modifiers.append(modif)
@property
def modifiers_statements(self) -> List[ModifierStatements]:
"""
list(ModifierCall): List of the modifiers call (include expression and irs)
"""
return list(self._modifiers)
@property
def explicit_base_constructor_calls(self) -> List["Function"]:
"""
list(Function): List of the base constructors called explicitly by this presumed constructor definition.
Base constructors implicitly or explicitly called by the contract definition will not be
included.
"""
# This is a list of contracts internally, so we convert it to a list of constructor functions.
return [
c.modifier.constructors_declared
for c in self._explicit_base_constructor_calls
if c.modifier.constructors_declared
]
@property
def explicit_base_constructor_calls_statements(self) -> List[ModifierStatements]:
"""
list(ModifierCall): List of the base constructors called explicitly by this presumed constructor definition.
"""
# This is a list of contracts internally, so we convert it to a list of constructor functions.
return list(self._explicit_base_constructor_calls)
def add_explicit_base_constructor_calls_statements(self, modif: ModifierStatements) -> None:
self._explicit_base_constructor_calls.append(modif)
# endregion
###################################################################################
###################################################################################
# region Variables
###################################################################################
###################################################################################
@property
def variables(self) -> List[LocalVariable]:
"""
Return all local variables
Include parameters and return values
"""
return list(self._variables.values())
@property
def local_variables(self) -> List[LocalVariable]:
"""
Return all local variables (dont include parameters and return values)
"""
return list(set(self.variables) - set(self.returns) - set(self.parameters))
@property
def variables_as_dict(self) -> Dict[str, LocalVariable]:
return self._variables
@property
def variables_read(self) -> List["Variable"]:
"""
list(Variable): Variables read (local/state/solidity)
"""
return list(self._vars_read)
@property
def variables_written(self) -> List["Variable"]:
"""
list(Variable): Variables written (local/state/solidity)
"""
return list(self._vars_written)
@property
def state_variables_read(self) -> List["StateVariable"]:
"""
list(StateVariable): State variables read
"""
return list(self._state_vars_read)
@property
def solidity_variables_read(self) -> List["SolidityVariable"]:
"""
list(SolidityVariable): Solidity variables read
"""
return list(self._solidity_vars_read)
@property
def state_variables_written(self) -> List["StateVariable"]:
"""
list(StateVariable): State variables written
"""
return list(self._state_vars_written)
@property
def variables_read_or_written(self) -> List["Variable"]:
"""
list(Variable): Variables read or written (local/state/solidity)
"""
return list(self._vars_read_or_written)
@property
def variables_read_as_expression(self) -> List["Expression"]:
return self._expression_vars_read
@property
def variables_written_as_expression(self) -> List["Expression"]:
return self._expression_vars_written
@property
def slithir_variables(self) -> List["SlithIRVariable"]:
"""
Temporary and Reference Variables (not SSA form)
"""
return list(self._slithir_variables)
# endregion
###################################################################################
###################################################################################
# region Calls
###################################################################################
###################################################################################
@property
def internal_calls(self) -> List["InternalCall"]:
"""
list(InternalCall): List of IR operations for internal calls
"""
return list(self._internal_calls)
@property
def solidity_calls(self) -> List["SolidityCall"]:
"""
list(SolidityCall): List of IR operations for Solidity calls
"""
return list(self._solidity_calls)
@property
def high_level_calls(self) -> List[Tuple["Contract", "HighLevelCall"]]:
"""
list(Tuple(Contract, "HighLevelCall")): List of call target contract and IR of the high level call
A variable is called in case of call to a public state variable
Include library calls
"""
return list(self._high_level_calls)
@property
def library_calls(self) -> List["LibraryCall"]:
"""
list(LibraryCall): List of IR operations for library calls
"""
return list(self._library_calls)
@property
def low_level_calls(self) -> List["LowLevelCall"]:
"""
list(LowLevelCall): List of IR operations for low level calls
A low level call is defined by
- the variable called
- the name of the function (call/delegatecall/callcode)
"""
return list(self._low_level_calls)
@property
def external_calls_as_expressions(self) -> List["Expression"]:
"""
list(ExpressionCall): List of message calls (that creates a transaction)
"""
return list(self._external_calls_as_expressions)
# endregion
###################################################################################
###################################################################################
# region Expressions
###################################################################################
###################################################################################
@property
def calls_as_expressions(self) -> List["Expression"]:
return self._expression_calls
@property
def expressions(self) -> List["Expression"]:
"""
list(Expression): List of the expressions
"""
if self._expressions is None:
expressionss = [n.expression for n in self.nodes]
expressions = [e for e in expressionss if e]
self._expressions = expressions
return self._expressions
@property
def return_values(self) -> List["SlithIRVariable"]:
"""
list(Return Values): List of the return values
"""
from slither.core.cfg.node import NodeType
from slither.slithir.operations import Return
from slither.slithir.variables import Constant
if self._return_values is None:
return_values = []
returns = [n for n in self.nodes if n.type == NodeType.RETURN]
[ # pylint: disable=expression-not-assigned
return_values.extend(ir.values)
for node in returns
for ir in node.irs
if isinstance(ir, Return)
]
self._return_values = list({x for x in return_values if not isinstance(x, Constant)})
return self._return_values
@property
def return_values_ssa(self) -> List["SlithIRVariable"]:
"""
list(Return Values in SSA form): List of the return values in ssa form
"""
from slither.core.cfg.node import NodeType
from slither.slithir.operations import Return
from slither.slithir.variables import Constant
if self._return_values_ssa is None:
return_values_ssa = []
returns = [n for n in self.nodes if n.type == NodeType.RETURN]
[ # pylint: disable=expression-not-assigned
return_values_ssa.extend(ir.values)
for node in returns
for ir in node.irs_ssa
if isinstance(ir, Return)
]
self._return_values_ssa = list(
{x for x in return_values_ssa if not isinstance(x, Constant)}
)
return self._return_values_ssa
# endregion
###################################################################################
###################################################################################
# region SlithIR
###################################################################################
###################################################################################
@property
def slithir_operations(self) -> List["Operation"]:
"""
list(Operation): List of the slithir operations
"""
if self._slithir_operations is None:
operationss = [n.irs for n in self.nodes]
operations = [item for sublist in operationss for item in sublist if item]
self._slithir_operations = operations
return self._slithir_operations
@property
def slithir_ssa_operations(self) -> List["Operation"]:
"""
list(Operation): List of the slithir operations (SSA)
"""
if self._slithir_ssa_operations is None:
operationss = [n.irs_ssa for n in self.nodes]
operations = [item for sublist in operationss for item in sublist if item]
self._slithir_ssa_operations = operations
return self._slithir_ssa_operations
# endregion
###################################################################################
###################################################################################
# region Signature
###################################################################################
###################################################################################
@property
def solidity_signature(self) -> str:
"""
Return a signature following the Solidity Standard
Contract and converted into address
It might still keep internal types (ex: structure name) for internal functions.
The reason is that internal functions allows recursive structure definition, which
can't be converted following the Solidity stand ard
:return: the solidity signature
"""
if self._solidity_signature is None:
parameters = [
convert_type_for_solidity_signature_to_string(x.type) for x in self.parameters
]
self._solidity_signature = self.name + "(" + ",".join(parameters) + ")"
return self._solidity_signature
@property
def signature(self) -> Tuple[str, List[str], List[str]]:
"""
(str, list(str), list(str)): Function signature as
(name, list parameters type, list return values type)
"""
# FIXME memoizing this function is not working properly for vyper
# if self._signature is None:
return (
self.name,
[str(x.type) for x in self.parameters],
[str(x.type) for x in self.returns],
)
# self._signature = signature
# return self._signature
@property
def signature_str(self) -> str:
"""
str: func_name(type1,type2) returns (type3)
Return the function signature as a str (contains the return values)
"""
if self._signature_str is None:
name, parameters, returnVars = self.signature
self._signature_str = (
name + "(" + ",".join(parameters) + ") returns(" + ",".join(returnVars) + ")"
)
return self._signature_str
# endregion
###################################################################################
###################################################################################
# region Functions
###################################################################################
###################################################################################
@property
@abstractmethod
def functions_shadowed(self) -> List["Function"]:
pass
# endregion
###################################################################################
###################################################################################
# region Reachable
###################################################################################
###################################################################################
@property
def reachable_from_nodes(self) -> Set[ReacheableNode]:
"""
Return
ReacheableNode
"""
return self._reachable_from_nodes
@property
def reachable_from_functions(self) -> Set["Function"]:
return self._reachable_from_functions
@property
def all_reachable_from_functions(self) -> Set["Function"]:
"""
Give the recursive version of reachable_from_functions (all the functions that lead to call self in the CFG)
"""
if self._all_reachable_from_functions is None:
functions: Set["Function"] = set()
new_functions = self.reachable_from_functions
# iterate until we have are finding new functions
while new_functions and not new_functions.issubset(functions):
functions = functions.union(new_functions)
# Use a temporary set, because we iterate over new_functions
new_functionss: Set["Function"] = set()
for f in new_functions:
new_functionss = new_functionss.union(f.reachable_from_functions)
new_functions = new_functionss - functions
self._all_reachable_from_functions = functions
return self._all_reachable_from_functions
def add_reachable_from_node(self, n: "Node", ir: "Operation") -> None:
self._reachable_from_nodes.add(ReacheableNode(n, ir))
self._reachable_from_functions.add(n.function)
# endregion
###################################################################################
###################################################################################
# region Recursive getters
###################################################################################
###################################################################################
def _explore_functions(self, f_new_values: Callable[["Function"], List]) -> List[Any]:
values = f_new_values(self)
explored = [self]
to_explore = [
ir.function
for ir in self.internal_calls
if isinstance(ir.function, Function) and ir.function not in explored
]
to_explore += [
ir.function
for ir in self.library_calls
if isinstance(ir.function, Function) and ir.function not in explored
]
to_explore += [m for m in self.modifiers if m not in explored]
while to_explore:
f = to_explore[0]
to_explore = to_explore[1:]
if f in explored:
continue
explored.append(f)
values += f_new_values(f)
to_explore += [
ir.function
for ir in f.internal_calls
if isinstance(ir.function, Function)
and ir.function not in explored
and ir.function not in to_explore
]
to_explore += [
ir.function
for ir in f.library_calls
if isinstance(ir.function, Function)
and ir.function not in explored
and ir.function not in to_explore
]
to_explore += [m for m in f.modifiers if m not in explored and m not in to_explore]
return list(set(values))
def all_variables_read(self) -> List["Variable"]:
"""recursive version of variables_read"""
if self._all_variables_read is None:
self._all_variables_read = self._explore_functions(lambda x: x.variables_read)
return self._all_variables_read
def all_variables_written(self) -> List["Variable"]:
"""recursive version of variables_written"""
if self._all_variables_written is None:
self._all_variables_written = self._explore_functions(lambda x: x.variables_written)
return self._all_variables_written
def all_state_variables_read(self) -> List["StateVariable"]:
"""recursive version of variables_read"""
if self._all_state_variables_read is None:
self._all_state_variables_read = self._explore_functions(
lambda x: x.state_variables_read
)
return self._all_state_variables_read
def all_solidity_variables_read(self) -> List[SolidityVariable]:
"""recursive version of solidity_read"""
if self._all_solidity_variables_read is None:
self._all_solidity_variables_read = self._explore_functions(
lambda x: x.solidity_variables_read
)
return self._all_solidity_variables_read
def all_slithir_variables(self) -> List["SlithIRVariable"]:
"""recursive version of slithir_variables"""
if self._all_slithir_variables is None:
self._all_slithir_variables = self._explore_functions(lambda x: x.slithir_variables)
return self._all_slithir_variables
def all_nodes(self) -> List["Node"]:
"""recursive version of nodes"""
if self._all_nodes is None:
self._all_nodes = self._explore_functions(lambda x: x.nodes)
return self._all_nodes
def all_expressions(self) -> List["Expression"]:
"""recursive version of variables_read"""
if self._all_expressions is None:
self._all_expressions = self._explore_functions(lambda x: x.expressions)
return self._all_expressions
def all_slithir_operations(self) -> List["Operation"]:
if self._all_slithir_operations is None:
self._all_slithir_operations = self._explore_functions(lambda x: x.slithir_operations)
return self._all_slithir_operations
def all_state_variables_written(self) -> List[StateVariable]:
"""recursive version of variables_written"""
if self._all_state_variables_written is None:
self._all_state_variables_written = self._explore_functions(
lambda x: x.state_variables_written
)
return self._all_state_variables_written
def all_internal_calls(self) -> List["InternalCall"]:
"""recursive version of internal_calls"""
if self._all_internals_calls is None:
self._all_internals_calls = self._explore_functions(lambda x: x.internal_calls)
return self._all_internals_calls
def all_low_level_calls(self) -> List["LowLevelCall"]:
"""recursive version of low_level calls"""
if self._all_low_level_calls is None:
self._all_low_level_calls = self._explore_functions(lambda x: x.low_level_calls)
return self._all_low_level_calls
def all_high_level_calls(self) -> List[Tuple["Contract", "HighLevelCall"]]:
"""recursive version of high_level calls"""
if self._all_high_level_calls is None:
self._all_high_level_calls = self._explore_functions(lambda x: x.high_level_calls)
return self._all_high_level_calls
def all_library_calls(self) -> List["LibraryCall"]:
"""recursive version of library calls"""
if self._all_library_calls is None:
self._all_library_calls = self._explore_functions(lambda x: x.library_calls)
return self._all_library_calls
def all_solidity_calls(self) -> List["SolidityCall"]:
"""recursive version of solidity calls"""
if self._all_solidity_calls is None:
self._all_solidity_calls = self._explore_functions(lambda x: x.solidity_calls)
return self._all_solidity_calls
@staticmethod
def _explore_func_cond_read(func: "Function", include_loop: bool) -> List["StateVariable"]:
ret = [n.state_variables_read for n in func.nodes if n.is_conditional(include_loop)]
return [item for sublist in ret for item in sublist]
def all_conditional_state_variables_read(self, include_loop=True) -> List["StateVariable"]:
"""
Return the state variable used in a condition
Over approximate and also return index access
It won't work if the variable is assigned to a temp variable
"""
if include_loop:
if self._all_conditional_state_variables_read_with_loop is None:
self._all_conditional_state_variables_read_with_loop = self._explore_functions(
lambda x: self._explore_func_cond_read(x, include_loop)
)
return self._all_conditional_state_variables_read_with_loop
if self._all_conditional_state_variables_read is None:
self._all_conditional_state_variables_read = self._explore_functions(
lambda x: self._explore_func_cond_read(x, include_loop)
)
return self._all_conditional_state_variables_read
@staticmethod
def _solidity_variable_in_binary(node: "Node") -> List[SolidityVariable]:
from slither.slithir.operations.binary import Binary
ret = []
for ir in node.irs:
if isinstance(ir, Binary):
ret += ir.read
return [var for var in ret if isinstance(var, SolidityVariable)]
@staticmethod
def _explore_func_conditional(
func: "Function",
f: Callable[["Node"], List[SolidityVariable]],
include_loop: bool,
) -> List[Any]:
ret = [f(n) for n in func.nodes if n.is_conditional(include_loop)]
return [item for sublist in ret for item in sublist]
def all_conditional_solidity_variables_read(
self, include_loop: bool = True
) -> List[SolidityVariable]:
"""
Return the Soldiity variables directly used in a condtion
Use of the IR to filter index access
Assumption: the solidity vars are used directly in the conditional node
It won't work if the variable is assigned to a temp variable
"""
if include_loop:
if self._all_conditional_solidity_variables_read_with_loop is None:
self._all_conditional_solidity_variables_read_with_loop = self._explore_functions(
lambda x: self._explore_func_conditional(
x, self._solidity_variable_in_binary, include_loop
)
)
return self._all_conditional_solidity_variables_read_with_loop
if self._all_conditional_solidity_variables_read is None:
self._all_conditional_solidity_variables_read = self._explore_functions(
lambda x: self._explore_func_conditional(
x, self._solidity_variable_in_binary, include_loop
)
)
return self._all_conditional_solidity_variables_read
@staticmethod
def _solidity_variable_in_internal_calls(node: "Node") -> List[SolidityVariable]:
from slither.slithir.operations.internal_call import InternalCall
ret = []
for ir in node.irs:
if isinstance(ir, InternalCall):
ret += ir.read
return [var for var in ret if isinstance(var, SolidityVariable)]
@staticmethod
def _explore_func_nodes(
func: "Function", f: Callable[["Node"], List[SolidityVariable]]
) -> List[Union[Any, SolidityVariableComposed]]:
ret = [f(n) for n in func.nodes]
return [item for sublist in ret for item in sublist]
def all_solidity_variables_used_as_args(self) -> List[SolidityVariable]:
"""
Return the Soldiity variables directly used in a call
Use of the IR to filter index access
Used to catch check(msg.sender)
"""
if self._all_solidity_variables_used_as_args is None:
self._all_solidity_variables_used_as_args = self._explore_functions(
lambda x: self._explore_func_nodes(x, self._solidity_variable_in_internal_calls)
)
return self._all_solidity_variables_used_as_args
# endregion
###################################################################################
###################################################################################
# region Visitor
###################################################################################
###################################################################################
def apply_visitor(self, Visitor: Callable) -> List:
"""
Apply a visitor to all the function expressions
Args:
Visitor: slither.visitors
Returns
list(): results of the visit
"""
expressions = self.expressions
v = [Visitor(e).result() for e in expressions]
return [item for sublist in v for item in sublist]
# endregion
###################################################################################
###################################################################################
# region Getters from/to object
###################################################################################
###################################################################################
def get_local_variable_from_name(self, variable_name: str) -> Optional[LocalVariable]:
"""
Return a local variable from a name
Args:
variable_name (str): name of the variable
Returns:
LocalVariable
"""
return next((v for v in self.variables if v.name == variable_name), None)
# endregion
###################################################################################
###################################################################################
# region Export
###################################################################################
###################################################################################
def cfg_to_dot(self, filename: str):
"""
Export the function to a dot file
Args:
filename (str)
"""
with open(filename, "w", encoding="utf8") as f:
f.write("digraph{\n")
for node in self.nodes:
f.write(f'{node.node_id}[label="{str(node)}"];\n')
for son in node.sons:
f.write(f"{node.node_id}->{son.node_id};\n")
f.write("}\n")
def dominator_tree_to_dot(self, filename: str):
"""
Export the dominator tree of the function to a dot file
Args:
filename (str)
"""
def description(node):
desc = f"{node}\n"
desc += f"id: {node.node_id}"
if node.dominance_frontier:
desc += f"\ndominance frontier: {[n.node_id for n in node.dominance_frontier]}"
return desc
with open(filename, "w", encoding="utf8") as f:
f.write("digraph{\n")
for node in self.nodes:
f.write(f'{node.node_id}[label="{description(node)}"];\n')
if node.immediate_dominator:
f.write(f"{node.immediate_dominator.node_id}->{node.node_id};\n")
f.write("}\n")
def slithir_cfg_to_dot(self, filename: str):
"""
Export the CFG to a DOT file. The nodes includes the Solidity expressions and the IRs
:param filename:
:return:
"""
content = self.slithir_cfg_to_dot_str()
with open(filename, "w", encoding="utf8") as f:
f.write(content)
def slithir_cfg_to_dot_str(self, skip_expressions: bool = False) -> str:
"""
Export the CFG to a DOT format. The nodes includes the Solidity expressions and the IRs
:return: the DOT content
:rtype: str
"""
from slither.core.cfg.node import NodeType
content = ""
content += "digraph{\n"
for node in self.nodes:
label = f"Node Type: {node.type.value} {node.node_id}\n"
if node.expression and not skip_expressions:
label += f"\nEXPRESSION:\n{node.expression}\n"
if node.irs and not skip_expressions:
label += "\nIRs:\n" + "\n".join([str(ir) for ir in node.irs])
content += f'{node.node_id}[label="{label}"];\n'
if node.type in [NodeType.IF, NodeType.IFLOOP]:
true_node = node.son_true
if true_node:
content += f'{node.node_id}->{true_node.node_id}[label="True"];\n'
false_node = node.son_false
if false_node:
content += f'{node.node_id}->{false_node.node_id}[label="False"];\n'
else:
for son in node.sons:
content += f"{node.node_id}->{son.node_id};\n"
content += "}\n"
return content
# endregion
###################################################################################
###################################################################################
# region Summary information
###################################################################################
###################################################################################
def is_reading(self, variable: "Variable") -> bool:
"""
Check if the function reads the variable
Args:
variable (Variable):
Returns:
bool: True if the variable is read
"""
return variable in self.variables_read
def is_reading_in_conditional_node(self, variable: "Variable") -> bool:
"""
Check if the function reads the variable in a IF node
Args:
variable (Variable):
Returns:
bool: True if the variable is read
"""
variables_reads = [n.variables_read for n in self.nodes if n.contains_if()]
variables_read = [item for sublist in variables_reads for item in sublist]
return variable in variables_read
def is_reading_in_require_or_assert(self, variable: "Variable") -> bool:
"""
Check if the function reads the variable in an require or assert
Args:
variable (Variable):
Returns:
bool: True if the variable is read
"""
variables_reads = [n.variables_read for n in self.nodes if n.contains_require_or_assert()]
variables_read = [item for sublist in variables_reads for item in sublist]
return variable in variables_read
def is_writing(self, variable: "Variable") -> bool:
"""
Check if the function writes the variable
Args:
variable (Variable):
Returns:
bool: True if the variable is written
"""
return variable in self.variables_written
@abstractmethod
def get_summary(
self,
) -> Tuple[str, str, str, List[str], List[str], List[str], List[str], List[str]]:
pass
def is_protected(self) -> bool:
"""
Determine if the function is protected using a check on msg.sender
Consider onlyOwner as a safe modifier.
If the owner functionality is incorrectly implemented, this will lead to incorrectly
classify the function as protected
Otherwise only detects if msg.sender is directly used in a condition
For example, it wont work for:
address a = msg.sender
require(a == owner)
Returns
(bool)
"""
if self._is_protected is None:
if self.is_constructor:
self._is_protected = True
return True
if "onlyOwner" in [m.name for m in self.modifiers]:
self._is_protected = True
return True
conditional_vars = self.all_conditional_solidity_variables_read(include_loop=False)
args_vars = self.all_solidity_variables_used_as_args()
self._is_protected = (
SolidityVariableComposed("msg.sender") in conditional_vars + args_vars
)
return self._is_protected
@property
def is_reentrant(self) -> bool:
"""
Determine if the function can be re-entered
"""
reentrancy_modifier = "nonReentrant"
if self.function_language == FunctionLanguage.Vyper:
reentrancy_modifier = "nonreentrant(lock)"
# TODO: compare with hash of known nonReentrant modifier instead of the name
if reentrancy_modifier in [m.name for m in self.modifiers]:
return False
if self.visibility in ["public", "external"]:
return True
# If it's an internal function, check if all its entry points have the nonReentrant modifier
all_entry_points = [
f for f in self.all_reachable_from_functions if f.visibility in ["public", "external"]
]
if not all_entry_points:
return True
return not all(
(reentrancy_modifier in [m.name for m in f.modifiers] for f in all_entry_points)
)
# endregion
###################################################################################
###################################################################################
# region Analyses
###################################################################################
###################################################################################
def _analyze_read_write(self) -> None:
"""Compute variables read/written/..."""
write_var = [x.variables_written_as_expression for x in self.nodes]
write_var = [x for x in write_var if x]
write_var = [item for sublist in write_var for item in sublist]
write_var = list(set(write_var))
# Remove duplicate if they share the same string representation
write_var = [
next(obj)
for i, obj in groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))
]
self._expression_vars_written = write_var
write_var = [x.variables_written for x in self.nodes]
write_var = [x for x in write_var if x]
write_var = [item for sublist in write_var for item in sublist]
write_var = list(set(write_var))
# Remove duplicate if they share the same string representation
write_var = [
next(obj)
for i, obj in groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))
]
self._vars_written = write_var
read_var = [x.variables_read_as_expression for x in self.nodes]
read_var = [x for x in read_var if x]
read_var = [item for sublist in read_var for item in sublist]
# Remove duplicate if they share the same string representation
read_var = [
next(obj)
for i, obj in groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))
]
self._expression_vars_read = read_var
read_var = [x.variables_read for x in self.nodes]
read_var = [x for x in read_var if x]
read_var = [item for sublist in read_var for item in sublist]
# Remove duplicate if they share the same string representation
read_var = [
next(obj)
for i, obj in groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))
]
self._vars_read = read_var
self._state_vars_written = [
x for x in self.variables_written if isinstance(x, StateVariable)
]
self._state_vars_read = [x for x in self.variables_read if isinstance(x, StateVariable)]
self._solidity_vars_read = [
x for x in self.variables_read if isinstance(x, SolidityVariable)
]
self._vars_read_or_written = self._vars_written + self._vars_read
slithir_variables = [x.slithir_variables for x in self.nodes]
slithir_variables = [x for x in slithir_variables if x]
self._slithir_variables = [item for sublist in slithir_variables for item in sublist]
def _analyze_calls(self) -> None:
calls = [x.calls_as_expression for x in self.nodes]
calls = [x for x in calls if x]
calls = [item for sublist in calls for item in sublist]
self._expression_calls = list(set(calls))
internal_calls = [x.internal_calls for x in self.nodes]
internal_calls = [x for x in internal_calls if x]
internal_calls = [item for sublist in internal_calls for item in sublist]
self._internal_calls = list(set(internal_calls))
self._solidity_calls = [
ir for ir in internal_calls if isinstance(ir.function, SolidityFunction)
]
low_level_calls = [x.low_level_calls for x in self.nodes]
low_level_calls = [x for x in low_level_calls if x]
low_level_calls = [item for sublist in low_level_calls for item in sublist]
self._low_level_calls = list(set(low_level_calls))
high_level_calls = [x.high_level_calls for x in self.nodes]
high_level_calls = [x for x in high_level_calls if x]
high_level_calls = [item for sublist in high_level_calls for item in sublist]
self._high_level_calls = list(set(high_level_calls))
library_calls = [x.library_calls for x in self.nodes]
library_calls = [x for x in library_calls if x]
library_calls = [item for sublist in library_calls for item in sublist]
self._library_calls = list(set(library_calls))
external_calls_as_expressions = [x.external_calls_as_expressions for x in self.nodes]
external_calls_as_expressions = [x for x in external_calls_as_expressions if x]
external_calls_as_expressions = [
item for sublist in external_calls_as_expressions for item in sublist
]
self._external_calls_as_expressions = list(set(external_calls_as_expressions))
# endregion
###################################################################################
###################################################################################
# region Nodes
###################################################################################
###################################################################################
def new_node(
self, node_type: "NodeType", src: Union[str, Dict], scope: Union[Scope, "Function"]
) -> "Node":
from slither.core.cfg.node import Node
node = Node(node_type, self._counter_nodes, scope, self.file_scope)
node.set_offset(src, self.compilation_unit)
self._counter_nodes += 1
node.set_function(self)
self._nodes.append(node)
return node
# endregion
###################################################################################
###################################################################################
# region SlithIr and SSA
###################################################################################
###################################################################################
def _get_last_ssa_variable_instances(
self, target_state: bool, target_local: bool
) -> Dict[str, Set["SlithIRVariable"]]:
# pylint: disable=too-many-locals,too-many-branches
from slither.slithir.variables import ReferenceVariable
from slither.slithir.operations import OperationWithLValue
from slither.core.cfg.node import NodeType
if not self.is_implemented:
return {}
if self._entry_point is None:
return {}
# node, values
to_explore: List[Tuple["Node", Dict]] = [(self._entry_point, {})]
# node -> values
explored: Dict = {}
# name -> instances
ret: Dict = {}
while to_explore:
node, values = to_explore[0]
to_explore = to_explore[1::]
if node.type != NodeType.ENTRYPOINT:
for ir_ssa in node.irs_ssa:
if isinstance(ir_ssa, OperationWithLValue):
lvalue = ir_ssa.lvalue
if isinstance(lvalue, ReferenceVariable):
lvalue = lvalue.points_to_origin
if isinstance(lvalue, StateVariable) and target_state:
values[lvalue.canonical_name] = {lvalue}
if isinstance(lvalue, LocalVariable) and target_local:
values[lvalue.canonical_name] = {lvalue}
# Check for fixpoint
if node in explored:
if values == explored[node]:
continue
for k, instances in values.items():
if k not in explored[node]:
explored[node][k] = set()
explored[node][k] |= instances
values = explored[node]
else:
explored[node] = values
# Return condition
if node.will_return:
for name, instances in values.items():
if name not in ret:
ret[name] = set()
ret[name] |= instances
for son in node.sons:
to_explore.append((son, dict(values)))
return ret
def get_last_ssa_state_variables_instances(
self,
) -> Dict[str, Set["SlithIRVariable"]]:
return self._get_last_ssa_variable_instances(target_state=True, target_local=False)
def get_last_ssa_local_variables_instances(
self,
) -> Dict[str, Set["SlithIRVariable"]]:
return self._get_last_ssa_variable_instances(target_state=False, target_local=True)
@staticmethod
def _unchange_phi(ir: "Operation") -> bool:
from slither.slithir.operations import Phi, PhiCallback
if not isinstance(ir, (Phi, PhiCallback)) or len(ir.rvalues) > 1:
return False
if not ir.rvalues:
return True
return ir.rvalues[0] == ir.lvalue
def _fix_phi_entry(
self,
node: "Node",
last_state_variables_instances: Dict[str, List["StateVariable"]],
initial_state_variables_instances: Dict[str, "StateVariable"],
) -> None:
from slither.slithir.variables import Constant, StateIRVariable, LocalIRVariable
for ir in node.irs_ssa:
if isinstance(ir.lvalue, StateIRVariable):
additional = [initial_state_variables_instances[ir.lvalue.canonical_name]]
additional += last_state_variables_instances[ir.lvalue.canonical_name]
ir.rvalues = list(set(additional + ir.rvalues))
# function parameter that are storage pointer
else:
# find index of the parameter
idx = self.parameters.index(ir.lvalue.non_ssa_version)
# find non ssa version of that index
additional = [n.ir.arguments[idx] for n in self.reachable_from_nodes]
additional = unroll(additional)
additional = [a for a in additional if not isinstance(a, Constant)]
ir.rvalues = list(set(additional + ir.rvalues))
if isinstance(ir.lvalue, LocalIRVariable) and ir.lvalue.is_storage:
# Update the refers_to to point to the phi rvalues
# This basically means that the local variable is a storage that point to any
# state variable that the storage pointer alias analysis found
ir.lvalue.refers_to = [
rvalue for rvalue in ir.rvalues if isinstance(rvalue, StateIRVariable)
]
def fix_phi(
self,
last_state_variables_instances: Dict[str, List["StateVariable"]],
initial_state_variables_instances: Dict[str, "StateVariable"],
) -> None:
from slither.slithir.operations import InternalCall, PhiCallback, Phi
from slither.slithir.variables import StateIRVariable, LocalIRVariable
for node in self.nodes:
if node == self.entry_point:
self._fix_phi_entry(
node, last_state_variables_instances, initial_state_variables_instances
)
for ir in node.irs_ssa:
if isinstance(ir, PhiCallback):
callee_ir = ir.callee_ir
if isinstance(callee_ir, InternalCall):
last_ssa = callee_ir.function.get_last_ssa_state_variables_instances()
if ir.lvalue.canonical_name in last_ssa:
ir.rvalues = list(last_ssa[ir.lvalue.canonical_name])
else:
ir.rvalues = [ir.lvalue]
else:
additional = last_state_variables_instances[ir.lvalue.canonical_name]
ir.rvalues = list(set(additional + ir.rvalues))
# Propage storage ref information if it does not exist
# This can happen if the refers_to variable was discovered through the phi operator on function parameter
# aka you have storage pointer as function parameter
# instead of having a storage pointer for which the aliases belong to the function body
if (
isinstance(ir, Phi)
and isinstance(ir.lvalue, LocalIRVariable)
and ir.lvalue.is_storage
and not ir.lvalue.refers_to
):
refers_to = []
for candidate in ir.rvalues:
if isinstance(candidate, StateIRVariable):
refers_to.append(candidate)
if isinstance(candidate, LocalIRVariable) and candidate.is_storage:
refers_to += candidate.refers_to
ir.lvalue.refers_to = refers_to
node.irs_ssa = [ir for ir in node.irs_ssa if not self._unchange_phi(ir)]
def generate_slithir_and_analyze(self) -> None:
for node in self.nodes:
node.slithir_generation()
self._analyze_read_write()
self._analyze_calls()
@abstractmethod
def generate_slithir_ssa(self, all_ssa_state_variables_instances):
pass
def update_read_write_using_ssa(self) -> None:
for node in self.nodes:
node.update_read_write_using_ssa()
self._analyze_read_write()
###################################################################################
###################################################################################
# region Built in definitions
###################################################################################
###################################################################################
def __str__(self) -> str:
return self.name
# endregion
| Function |
python | doocs__leetcode | solution/2400-2499/2448.Minimum Cost to Make Array Equal/Solution2.py | {
"start": 0,
"end": 288
} | class ____:
def minCost(self, nums: List[int], cost: List[int]) -> int:
arr = sorted(zip(nums, cost))
mid = sum(cost) // 2
s = 0
for x, c in arr:
s += c
if s > mid:
return sum(abs(v - x) * c for v, c in arr)
| Solution |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/logging_/test_logger_connector.py | {
"start": 6080,
"end": 25161
} | class ____(BoringModel):
def __init__(self, not_supported):
super().__init__()
pl_module_hooks = get_members(LightningModule)
pl_module_hooks.difference_update({"log", "log_dict"})
pl_module_hooks.discard("configure_sharded_model")
# remove `nn.Module` hooks
module_hooks = get_members(torch.nn.Module)
pl_module_hooks.difference_update(module_hooks)
def call(hook, fn, *args, **kwargs):
out = fn(*args, **kwargs)
if hook in not_supported:
with pytest.raises(MisconfigurationException, match=not_supported[hook]):
self.log("anything", 1)
else:
self.log(hook, 1)
return out
for h in pl_module_hooks:
attr = getattr(self, h)
setattr(self, h, partial(call, h, attr))
def test_fx_validator_integration(tmp_path):
"""Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors."""
not_supported = {
None: "`self.trainer` reference is not registered",
"setup": "You can't",
"configure_model": "You can't",
"configure_optimizers": "You can't",
"on_fit_start": "You can't",
"train_dataloader": "You can't",
"val_dataloader": "You can't",
"on_before_batch_transfer": "You can't",
"transfer_batch_to_device": "You can't",
"on_after_batch_transfer": "You can't",
"on_validation_end": "You can't",
"on_train_end": "You can't",
"on_fit_end": "You can't",
"teardown": "You can't",
"on_sanity_check_start": "You can't",
"on_sanity_check_end": "You can't",
"prepare_data": "You can't",
"configure_callbacks": "You can't",
"on_validation_model_zero_grad": "You can't",
"on_validation_model_eval": "You can't",
"on_validation_model_train": "You can't",
"lr_scheduler_step": "You can't",
"on_save_checkpoint": "You can't",
"on_load_checkpoint": "You can't",
"on_exception": "You can't",
}
model = HookedModel(not_supported)
with pytest.warns(UserWarning, match=not_supported[None]):
model.log("foo", 1)
callback = HookedCallback(not_supported)
trainer = Trainer(
default_root_dir=tmp_path,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=1,
limit_test_batches=1,
limit_predict_batches=1,
callbacks=callback,
)
trainer.fit(model)
not_supported.update({
# `lightning_module` ref is now present from the `fit` call
"test_dataloader": "You can't",
"on_test_model_eval": "You can't",
"on_test_model_train": "You can't",
"on_test_end": "You can't",
})
trainer.test(model, verbose=False)
not_supported.update(dict.fromkeys(not_supported, "result collection is not registered yet"))
not_supported.update({
"predict_dataloader": "result collection is not registered yet",
"on_predict_model_eval": "result collection is not registered yet",
"on_predict_start": "result collection is not registered yet",
"on_predict_epoch_start": "result collection is not registered yet",
"on_predict_batch_start": "result collection is not registered yet",
"predict_step": "result collection is not registered yet",
"on_predict_batch_end": "result collection is not registered yet",
"on_predict_epoch_end": "result collection is not registered yet",
"on_predict_end": "result collection is not registered yet",
})
trainer.predict(model)
@pytest.mark.parametrize("add_dataloader_idx", [False, True])
def test_auto_add_dataloader_idx(tmp_path, add_dataloader_idx):
"""Test that auto_add_dataloader_idx argument works."""
class TestModel(BoringModel):
def val_dataloader(self):
dl = super().val_dataloader()
return [dl, dl]
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args[:-1], **kwargs)
name = "val_loss" if add_dataloader_idx else f"val_loss_custom_naming_{args[-1]}"
self.log(name, output["x"], add_dataloader_idx=add_dataloader_idx)
return output
model = TestModel()
trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=2)
trainer.fit(model)
logged = trainer.logged_metrics
# Check that the correct keys exist
if add_dataloader_idx:
assert "val_loss/dataloader_idx_0" in logged
assert "val_loss/dataloader_idx_1" in logged
else:
assert "val_loss_custom_naming_0" in logged
assert "val_loss_custom_naming_1" in logged
def test_metrics_reset(tmp_path):
"""Tests that metrics are reset correctly after the end of the train/val/test epoch."""
class TestModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 1)
def _create_metrics(self):
acc = Accuracy(task="binary") if _TM_GE_0_11 else Accuracy()
acc.reset = mock.Mock(side_effect=acc.reset)
ap = AvgPre(task="binary") if _TM_GE_0_11 else AvgPre(num_classes=1, pos_label=1)
ap.reset = mock.Mock(side_effect=ap.reset)
return acc, ap
def setup(self, stage):
fn = stage.value
if fn == "fit":
for stage in ("train", "validate"):
acc, ap = self._create_metrics()
self.add_module(f"acc_{fn}_{stage}", acc)
self.add_module(f"ap_{fn}_{stage}", ap)
else:
acc, ap = self._create_metrics()
stage = self.trainer.state.stage.value
self.add_module(f"acc_{fn}_{stage}", acc)
self.add_module(f"ap_{fn}_{stage}", ap)
def forward(self, x):
return self.layer(x)
def _step(self, batch):
fn, stage = self.trainer.state.fn.value, self.trainer.state.stage.value
logits = self(batch)
loss = logits.sum()
self.log(f"loss/{fn}_{stage}", loss)
acc = self._modules[f"acc_{fn}_{stage}"]
ap = self._modules[f"ap_{fn}_{stage}"]
preds = torch.rand(len(batch)) # Fake preds
labels = torch.randint(0, 1, [len(batch)]) # Fake targets
acc(preds, labels)
ap(preds, labels)
# Metric.forward calls reset so reset the mocks here
acc.reset.reset_mock()
ap.reset.reset_mock()
self.log(f"acc/{fn}_{stage}", acc)
self.log(f"ap/{fn}_{stage}", ap)
return loss
def training_step(self, batch, batch_idx, *args, **kwargs):
return self._step(batch)
def validation_step(self, batch, batch_idx, *args, **kwargs):
if self.trainer.sanity_checking:
return None
return self._step(batch)
def test_step(self, batch, batch_idx, *args, **kwargs):
return self._step(batch)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def _assert_called(model, fn, stage):
acc = model._modules[f"acc_{fn}_{stage}"]
ap = model._modules[f"ap_{fn}_{stage}"]
acc.reset.assert_called_once()
ap.reset.assert_called_once()
model = TestModel()
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
enable_progress_bar=False,
num_sanity_val_steps=2,
enable_checkpointing=False,
)
trainer.fit(model)
_assert_called(model, "fit", "train")
_assert_called(model, "fit", "validate")
trainer.validate(model)
_assert_called(model, "validate", "validate")
trainer.test(model)
_assert_called(model, "test", "test")
@pytest.mark.skipif(
compare_version("torchmetrics", operator.lt, "0.8.0"), reason="torchmetrics>=0.8.0 required for compute groups"
)
@pytest.mark.parametrize("compute_groups", [True, False])
def test_metriccollection_compute_groups(tmp_path, compute_groups):
def assertion_calls(keep_base: bool, copy_state: bool):
if _TORCHMETRICS_GREATER_EQUAL_0_9_1:
assert copy_state != compute_groups
assert not keep_base
class CustomMetricsCollection(MetricCollection):
wrapped_assertion_calls = Mock(wraps=assertion_calls)
def items(self, keep_base: bool = False, copy_state: bool = True):
if getattr(self, "_is_currently_logging", False):
self.wrapped_assertion_calls(keep_base, copy_state)
return super().items(keep_base=keep_base, copy_state=copy_state)
class DummyModule(LightningModule):
def __init__(self):
super().__init__()
if compare_version("torchmetrics", operator.ge, "0.10.0"):
from torchmetrics.classification import MulticlassAccuracy, MulticlassPrecision
metrics = [
MulticlassAccuracy(num_classes=10, average="micro"),
MulticlassPrecision(num_classes=10, average="micro"),
]
else:
from torchmetrics import Accuracy, Precision
metrics = [Accuracy(num_classes=10, average="micro"), Precision(num_classes=10, average="micro")]
self.metrics = CustomMetricsCollection(
metrics,
compute_groups=compute_groups,
)
self.layer = torch.nn.Linear(32, 10)
def training_step(self, batch):
self.metrics(torch.rand(10, 10).softmax(-1), torch.randint(0, 10, (10,)))
self.metrics._is_currently_logging = True
self.log_dict(self.metrics, on_step=True, on_epoch=True)
self.metrics._is_currently_logging = False
return self.layer(batch).sum()
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=0.1)
def on_train_epoch_end(self) -> None:
self.metrics.wrapped_assertion_calls.call_count == 2
self.metrics.wrapped_assertion_calls.reset_mock()
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=2,
limit_val_batches=0,
max_epochs=1,
enable_progress_bar=False,
enable_checkpointing=False,
)
trainer.fit(DummyModule())
def test_result_collection_on_tensor_with_mean_reduction():
result_collection = _ResultCollection(True)
product = [(True, True), (False, True), (True, False), (False, False)]
values = torch.arange(1, 10)
batches = values * values
for i, v in enumerate(values):
for prog_bar in [False, True]:
for logger in [False, True]:
for on_step, on_epoch in product:
name = "loss"
if on_step:
name += "_on_step"
if on_epoch:
name += "_on_epoch"
if prog_bar:
name += "_prog_bar"
if logger:
name += "_logger"
log_kwargs = {
"fx": "training_step",
"name": name,
"value": v,
"on_step": on_step,
"on_epoch": on_epoch,
"batch_size": batches[i],
"prog_bar": prog_bar,
"logger": logger,
}
if not on_step and not on_epoch:
with pytest.raises(MisconfigurationException, match="on_step=False, on_epoch=False"):
result_collection.log(**log_kwargs)
else:
result_collection.log(**log_kwargs)
total_value = sum(values * batches)
total_batches = sum(batches)
assert result_collection["training_step.loss_on_step_on_epoch"].value == total_value
assert result_collection["training_step.loss_on_step_on_epoch"].cumulated_batch_size == total_batches
batch_metrics = result_collection.metrics(True)
max_ = max(values)
assert batch_metrics["pbar"] == {
"loss_on_step_on_epoch_prog_bar_step": max_,
"loss_on_step_on_epoch_prog_bar_logger_step": max_,
"loss_on_step_prog_bar": max_,
"loss_on_step_prog_bar_logger": max_,
}
assert batch_metrics["log"] == {
"loss_on_step_on_epoch_logger_step": max_,
"loss_on_step_logger": max_,
"loss_on_step_on_epoch_prog_bar_logger_step": max_,
"loss_on_step_prog_bar_logger": max_,
}
assert batch_metrics["callback"] == {
"loss_on_step": max_,
"loss_on_step_logger": max_,
"loss_on_step_on_epoch": max_,
"loss_on_step_on_epoch_logger": max_,
"loss_on_step_on_epoch_logger_step": max_,
"loss_on_step_on_epoch_prog_bar": max_,
"loss_on_step_on_epoch_prog_bar_logger": max_,
"loss_on_step_on_epoch_prog_bar_logger_step": max_,
"loss_on_step_on_epoch_prog_bar_step": max_,
"loss_on_step_on_epoch_step": max_,
"loss_on_step_prog_bar": max_,
"loss_on_step_prog_bar_logger": max_,
}
epoch_metrics = result_collection.metrics(False)
mean = total_value / total_batches
assert epoch_metrics["pbar"] == {
"loss_on_epoch_prog_bar": mean,
"loss_on_epoch_prog_bar_logger": mean,
"loss_on_step_on_epoch_prog_bar_epoch": mean,
"loss_on_step_on_epoch_prog_bar_logger_epoch": mean,
}
assert epoch_metrics["log"] == {
"loss_on_epoch_logger": mean,
"loss_on_epoch_prog_bar_logger": mean,
"loss_on_step_on_epoch_logger_epoch": mean,
"loss_on_step_on_epoch_prog_bar_logger_epoch": mean,
}
assert epoch_metrics["callback"] == {
"loss_on_epoch": mean,
"loss_on_epoch_logger": mean,
"loss_on_epoch_prog_bar": mean,
"loss_on_epoch_prog_bar_logger": mean,
"loss_on_step_on_epoch": mean,
"loss_on_step_on_epoch_epoch": mean,
"loss_on_step_on_epoch_logger": mean,
"loss_on_step_on_epoch_logger_epoch": mean,
"loss_on_step_on_epoch_prog_bar": mean,
"loss_on_step_on_epoch_prog_bar_epoch": mean,
"loss_on_step_on_epoch_prog_bar_logger": mean,
"loss_on_step_on_epoch_prog_bar_logger_epoch": mean,
}
@pytest.mark.parametrize("logger", [False, True])
def test_logged_metrics_has_logged_epoch_value(tmp_path, logger):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.log("epoch", -batch_idx, logger=True)
return super().training_step(batch, batch_idx)
model = TestModel()
trainer_kwargs = {
"default_root_dir": tmp_path,
"limit_train_batches": 2,
"limit_val_batches": 0,
"max_epochs": 1,
"logger": False,
}
if logger:
trainer_kwargs["logger"] = CSVLogger(tmp_path)
trainer = Trainer(**trainer_kwargs)
if not logger:
with pytest.warns(match=r"log\('epoch', ..., logger=True\)` but have no logger"):
trainer.fit(model)
else:
trainer.fit(model)
# should not get overridden if logged manually
assert trainer.logged_metrics == {"epoch": -1}
def test_result_collection_batch_size_extraction():
fx_name = "training_step"
log_val = torch.tensor(7.0)
results = _ResultCollection(training=True)
results.batch = torch.randn(1, 4)
train_mse = MeanSquaredError()
train_mse(torch.randn(4, 5), torch.randn(4, 5))
results.log(fx_name, "mse", train_mse, on_step=False, on_epoch=True)
results.log(fx_name, "log_val", log_val, on_step=False, on_epoch=True)
assert results.batch_size == 1
assert isinstance(results["training_step.mse"].value, MeanSquaredError)
assert results["training_step.log_val"].value == log_val
results = _ResultCollection(training=True)
results.batch = torch.randn(1, 4)
results.log(fx_name, "train_log", log_val, on_step=False, on_epoch=True)
assert results.batch_size == 1
assert results["training_step.train_log"].value == log_val
assert results["training_step.train_log"].cumulated_batch_size == 1
def test_result_collection_no_batch_size_extraction():
results = _ResultCollection(training=True)
results.batch = torch.randn(1, 4)
fx_name = "training_step"
batch_size = 10
log_val = torch.tensor(7.0)
train_mae = MeanAbsoluteError()
train_mae(torch.randn(4, 5), torch.randn(4, 5))
results.log(fx_name, "step_log_val", log_val, on_step=True, on_epoch=False)
results.log(fx_name, "epoch_log_val", log_val, on_step=False, on_epoch=True, batch_size=batch_size)
results.log(fx_name, "epoch_sum_log_val", log_val, on_step=True, on_epoch=True, reduce_fx="sum")
results.log(fx_name, "train_mae", train_mae, on_step=True, on_epoch=False)
assert results.batch_size is None
assert isinstance(results["training_step.train_mae"].value, MeanAbsoluteError)
assert results["training_step.step_log_val"].value == log_val
assert results["training_step.step_log_val"].cumulated_batch_size == 0
assert results["training_step.epoch_log_val"].value == log_val * batch_size
assert results["training_step.epoch_log_val"].cumulated_batch_size == batch_size
assert results["training_step.epoch_sum_log_val"].value == log_val
@RunIf(min_cuda_gpus=1)
def test_result_collection_changes_device():
"""Test that the keys in the ResultCollection are moved to the device together with the collection."""
results = _ResultCollection(training=True)
fx, name = "training_step", "step_log_val"
log_val = torch.tensor(7.0, device="cuda:0")
# same device as the original tensor
results.log(fx, name, log_val, on_step=True, on_epoch=False, reduce_fx="mean")
assert results[f"{fx}.{name}"].cumulated_batch_size.device == log_val.device
# moved to cpu
results.cpu()
assert results[f"{fx}.{name}"].cumulated_batch_size.device == torch.device("cpu")
# same device as the new tensor
results.log(fx, name, log_val, on_step=True, on_epoch=False, reduce_fx="mean")
assert results[f"{fx}.{name}"].cumulated_batch_size.device == log_val.device
| HookedModel |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 14326,
"end": 14487
} | class ____(SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = "test_block_super"
expected_hash = "817b5defb197"
| OfflineCompressBlockSuperTestCase |
python | viewflow__viewflow | tests/fsm/test_fsm__permissions.py | {
"start": 212,
"end": 604
} | class ____(object):
stage = State(ReviewState, default=ReviewState.NEW)
@stage.transition(
source=ReviewState.NEW,
target=ReviewState.REMOVED,
permission=this.can_remove_review
)
def remove(self):
pass
def can_remove_review(self, user):
return State.CONDITION(user.is_staff, unmet="Only staff users can delete reviews")
| _Publication |
python | prabhupant__python-ds | data_structures/binary_trees/print_spiral_tree_two_stacks.py | {
"start": 0,
"end": 693
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def print_spiral(root):
s1 = []
s2 = []
s1.append(root)
while not len(s1) == 0 or not len(s2) == 0:
while not len(s1) == 0:
temp = s1.pop()
print(temp.data, end=' ')
if temp.right:
s2.append(temp.right)
if temp.left:
s2.append(temp.left)
while not len(s2) == 0:
temp = s2.pop()
print(temp.data, end=' ')
if temp.left:
s1.append(temp.left)
if temp.right:
s1.append(temp.right)
| Node |
python | Lightning-AI__lightning | examples/pytorch/servable_module/production.py | {
"start": 1438,
"end": 2120
} | class ____(LightningDataModule):
transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])
def train_dataloader(self, *args, **kwargs):
trainset = torchvision.datasets.CIFAR10(root=DATASETS_PATH, train=True, download=True, transform=self.transform)
return torch.utils.data.DataLoader(trainset, batch_size=2, shuffle=True, num_workers=0)
def val_dataloader(self, *args, **kwargs):
valset = torchvision.datasets.CIFAR10(root=DATASETS_PATH, train=False, download=True, transform=self.transform)
return torch.utils.data.DataLoader(valset, batch_size=2, shuffle=True, num_workers=0)
@dataclass(unsafe_hash=True)
| CIFAR10DataModule |
python | apache__avro | lang/py/avro/errors.py | {
"start": 3996,
"end": 4113
} | class ____(RuntimeError, AvroException):
"""An exception raised when incorrect arguments were passed."""
| UsageError |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_reflection.py | {
"start": 38726,
"end": 43164
} | class ____(fixtures.TestBase):
def test_default_schema_name_not_interpreted_as_tokenized(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2014_VERSION
mock_connection = mock.Mock(scalar=lambda sql: "Jonah.The.Whale")
schema_name = dialect._get_default_schema_name(mock_connection)
eq_(schema_name, "Jonah.The.Whale")
eq_(
base._owner_plus_db(dialect, schema_name),
(None, "Jonah.The.Whale"),
)
def test_owner_database_pairs_dont_use_for_same_db(self):
dialect = mssql.dialect()
identifier = "my_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value="my_db"))
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[mock.call.exec_driver_sql("select db_name()")],
)
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
),
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs_switch_for_different_db(self):
dialect = mssql.dialect()
identifier = "my_other_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value="my_db"))
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[
mock.call.exec_driver_sql("select db_name()"),
mock.call.exec_driver_sql("use my_other_db"),
mock.call.exec_driver_sql("use my_db"),
],
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
),
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
@testing.combinations(
("foo", None, "foo", "use foo"),
("foo.bar", "foo", "bar", "use foo"),
("Foo.Bar", "Foo", "Bar", "use [Foo]"),
("[Foo.Bar]", None, "Foo.Bar", "use [Foo.Bar]"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat", "use [Foo.Bar]"),
(
"[foo].]do something; select [foo",
"foo",
"do something; select foo",
"use foo",
),
(
"something; select [foo].bar",
"something; select foo",
"bar",
"use [something; select foo]",
),
(
"[abc].[def].[efg].[hij]",
"[abc].[def].[efg]",
"hij",
"use [abc].[def].[efg]",
),
("abc.def.efg.hij", "abc.def.efg", "hij", "use [abc.def.efg]"),
)
def test_owner_database_pairs(
self, identifier, expected_schema, expected_owner, use_stmt
):
dialect = mssql.dialect()
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(
scalar=mock.Mock(return_value="Some Database")
)
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
if schema is None:
eq_(mock_connection.mock_calls, [])
else:
eq_(
mock_connection.mock_calls,
[
mock.call.exec_driver_sql("select db_name()"),
mock.call.exec_driver_sql(use_stmt),
mock.call.exec_driver_sql("use [Some Database]"),
],
)
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
| OwnerPlusDBTest |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 191752,
"end": 191974
} | class ____:
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
| TestExponpow |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 5268,
"end": 5415
} | class ____(BasicHeader):
"""
Reader for header of tables with tab separated header.
"""
splitter_class = TabHeaderSplitter
| TabHeader |
python | redis__redis-py | redis/commands/search/reducers.py | {
"start": 696,
"end": 908
} | class ____(FieldOnlyReducer):
"""
Calculates the smallest value in the given field within the group
"""
NAME = "MIN"
def __init__(self, field: str) -> None:
super().__init__(field)
| min |
python | crytic__slither | slither/slithir/operations/new_contract.py | {
"start": 512,
"end": 3981
} | class ____(Call, OperationWithLValue): # pylint: disable=too-many-instance-attributes
def __init__(
self,
contract_name: UserDefinedType,
lvalue: Union[TemporaryVariableSSA, TemporaryVariable],
names: Optional[List[str]] = None,
) -> None:
"""
#### Parameters
names -
For calls of the form f({argName1 : arg1, ...}), the names of parameters listed in call order.
Otherwise, None.
"""
assert isinstance(
contract_name.type, Contract
), f"contract_name is {contract_name} of type {type(contract_name)}"
assert is_valid_lvalue(lvalue)
super().__init__(names=names)
self._contract_name = contract_name
# todo create analyze to add the contract instance
self._lvalue = lvalue
self._callid = None # only used if gas/value != 0
self._call_value = None
self._call_salt = None
@property
def call_value(self):
return self._call_value
@call_value.setter
def call_value(self, v):
self._call_value = v
@property
def call_id(self):
return self._callid
@call_id.setter
def call_id(self, c):
self._callid = c
@property
def call_salt(self):
return self._call_salt
@call_salt.setter
def call_salt(self, s):
self._call_salt = s
@property
def contract_name(self) -> UserDefinedType:
return self._contract_name
@property
def read(self) -> List[Any]:
all_read = [self.call_salt, self.call_value] + self._unroll(self.arguments)
# remove None
return [x for x in all_read if x]
@property
def contract_created(self) -> Contract:
return self.contract_name.type
###################################################################################
###################################################################################
# region Analyses
###################################################################################
###################################################################################
def can_reenter(self, callstack: Optional[List[Union[Function, Variable]]] = None) -> bool:
"""
Must be called after slithIR analysis pass
For Solidity > 0.5, filter access to public variables and constant/pure/view
For call to this. check if the destination can re-enter
:param callstack: check for recursion
:return: bool
"""
callstack = [] if callstack is None else callstack
constructor = self.contract_created.constructor
if constructor is None:
return False
if constructor in callstack:
return False
callstack = callstack + [constructor]
return constructor.can_reenter(callstack)
def can_send_eth(self) -> bool:
"""
Must be called after slithIR analysis pass
:return: bool
"""
return self._call_value is not None
# endregion
def __str__(self) -> str:
options = ""
if self.call_value:
options = f"value:{self.call_value} "
if self.call_salt:
options += f"salt:{self.call_salt} "
args = [str(a) for a in self.arguments]
lvalue = self.lvalue
return f"{lvalue}({lvalue.type}) = new {self.contract_name}({','.join(args)}) {options}"
| NewContract |
python | ansible__ansible | test/lib/ansible_test/_internal/core_ci.py | {
"start": 2558,
"end": 3331
} | class ____(Resource):
"""Details needed to request cloud credentials from Ansible Core CI."""
platform: str
def as_tuple(self) -> tuple[str, str, str, str]:
"""Return the resource as a tuple of platform, version, architecture and provider."""
return self.platform, '', '', self.platform
def get_label(self) -> str:
"""Return a user-friendly label for this resource."""
return self.platform
@property
def persist(self) -> bool:
"""True if the resource is persistent, otherwise false."""
return False
def get_config(self, core_ci: AnsibleCoreCI) -> dict[str, object]:
"""Return the configuration for this resource."""
return dict(
type="cloud",
)
| CloudResource |
python | realpython__materials | python-class/mro.py | {
"start": 61,
"end": 125
} | class ____(A):
def method(self):
print("B.method()")
| B |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/buffer.py | {
"start": 1759,
"end": 2077
} | class ____(enum.Enum):
# Reward signals
REWARDS = "rewards"
VALUE_ESTIMATES = "value_estimates"
RETURNS = "returns"
ADVANTAGE = "advantage"
BASELINES = "baselines"
AgentBufferKey = Union[
BufferKey, Tuple[ObservationKeyPrefix, int], Tuple[RewardSignalKeyPrefix, str]
]
| RewardSignalKeyPrefix |
python | PrefectHQ__prefect | src/prefect/_result_records.py | {
"start": 605,
"end": 2044
} | class ____(BaseModel):
"""
Metadata for a result record.
"""
storage_key: Optional[str] = Field(
default=None
) # optional for backwards compatibility
expiration: Optional[DateTime] = Field(default=None)
serializer: Serializer = Field(default_factory=PickleSerializer)
prefect_version: str = Field(default=prefect.__version__)
storage_block_id: Optional[uuid.UUID] = Field(default=None)
def dump_bytes(self) -> bytes:
"""
Serialize the metadata to bytes.
Returns:
bytes: the serialized metadata
"""
return self.model_dump_json(serialize_as_any=True).encode()
@classmethod
def load_bytes(cls, data: bytes) -> "ResultRecordMetadata":
"""
Deserialize metadata from bytes.
Args:
data: the serialized metadata
Returns:
ResultRecordMetadata: the deserialized metadata
"""
return cls.model_validate_json(data)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, ResultRecordMetadata):
return False
return (
self.storage_key == other.storage_key
and self.expiration == other.expiration
and self.serializer == other.serializer
and self.prefect_version == other.prefect_version
and self.storage_block_id == other.storage_block_id
)
| ResultRecordMetadata |
python | pypa__build | src/build/_exceptions.py | {
"start": 175,
"end": 876
} | class ____(Exception):
"""
Exception raised when a backend operation fails.
"""
def __init__(
self,
exception: Exception,
description: str | None = None,
exc_info: tuple[type[BaseException], BaseException, types.TracebackType] | tuple[None, None, None] = (
None,
None,
None,
),
) -> None:
super().__init__()
self.exception = exception
self.exc_info = exc_info
self._description = description
def __str__(self) -> str:
if self._description:
return self._description
return f'Backend operation failed: {self.exception!r}'
| BuildBackendException |
python | pypa__warehouse | warehouse/oidc/services.py | {
"start": 3549,
"end": 14031
} | class ____:
def __init__(
self,
session: Session,
publisher: str,
issuer_url: str,
audience: str,
cache_url: str,
metrics: IMetricsService,
):
self.db = session
self.publisher = publisher
self.issuer_url = issuer_url
self.audience = audience
self.cache_url = cache_url
self.metrics = metrics
def _store_keyset(self, issuer_url: str, keys: dict) -> None:
"""
Store the given keyset for the given **issuer**, setting the timeout key
in the process.
"""
_publisher_jwk_key = f"/warehouse/oidc/jwks/{issuer_url}"
_publisher_timeout_key = f"{_publisher_jwk_key}/timeout"
with redis.StrictRedis.from_url(self.cache_url) as r:
r.set(_publisher_jwk_key, json.dumps(keys))
r.setex(_publisher_timeout_key, 60, "placeholder")
def _get_keyset(self, issuer_url: str) -> tuple[dict[str, dict], bool]:
"""
Return the cached keyset for the given issuer, or an empty
keyset if no keys are currently cached.
"""
_publisher_jwk_key = f"/warehouse/oidc/jwks/{issuer_url}"
_publisher_timeout_key = f"{_publisher_jwk_key}/timeout"
with redis.StrictRedis.from_url(self.cache_url) as r:
keys = r.get(_publisher_jwk_key)
timeout = bool(r.exists(_publisher_timeout_key))
if keys is not None:
return json.loads(keys), timeout
else:
return {}, timeout
def _refresh_keyset(self, issuer_url: str) -> dict[str, dict]:
"""
Attempt to refresh the keyset from the OIDC issuer, assuming no
timeout is in effect.
Returns the refreshed keyset, or the cached keyset if a timeout is
in effect.
Returns the cached keyset on any publisher access or format errors.
"""
# Fast path: we're in a cooldown from a previous refresh.
keys, timeout = self._get_keyset(issuer_url=issuer_url)
if timeout:
self.metrics.increment(
"warehouse.oidc.refresh_keyset.timeout",
tags=[f"publisher:{self.publisher}", f"issuer_url:{issuer_url}"],
)
return keys
oidc_url = f"{issuer_url}/.well-known/openid-configuration"
resp = requests.get(oidc_url, timeout=5)
# For whatever reason, an OIDC publisher's configuration URL might be
# offline. We don't want to completely explode here, since other
# publishers might still be online (and need updating), so we spit
# out an error and return None instead of raising.
if not resp.ok:
sentry_sdk.capture_message(
f"OIDC publisher {self.publisher} failed to return configuration: "
f"{oidc_url}"
)
return keys
oidc_conf = resp.json()
jwks_url = oidc_conf.get("jwks_uri")
# A valid OIDC configuration MUST have a `jwks_uri`, but we
# defend against its absence anyways.
if jwks_url is None:
sentry_sdk.capture_message(
f"OIDC publisher {self.publisher} is returning malformed "
"configuration (no jwks_uri)"
)
return keys
resp = requests.get(jwks_url, timeout=5)
# Same reasoning as above.
if not resp.ok:
sentry_sdk.capture_message(
f"OIDC publisher {self.publisher} failed to return JWKS JSON: "
f"{jwks_url}"
)
return keys
jwks_conf = resp.json()
new_keys = jwks_conf.get("keys")
# Another sanity test: an OIDC publisher should never return an empty
# keyset, but there's nothing stopping them from doing so. We don't
# want to cache an empty keyset just in case it's a short-lived error,
# so we check here, error, and return the current cache instead.
if not new_keys:
sentry_sdk.capture_message(
f"OIDC publisher {self.publisher} returned JWKS JSON but no keys"
)
return keys
keys = {key["kid"]: key for key in new_keys}
self._store_keyset(issuer_url, keys)
return keys
def _get_key(self, key_id: str, issuer_url: str) -> jwt.PyJWK:
"""
Return a JWK for the given key ID, or None if the key can't be found
in this publisher's keyset.
"""
keyset, _ = self._get_keyset(issuer_url)
if key_id not in keyset:
keyset = self._refresh_keyset(issuer_url)
if key_id not in keyset:
self.metrics.increment(
"warehouse.oidc.get_key.error",
tags=[
f"publisher:{self.publisher}",
f"key_id:{key_id}",
f"issuer_url:{self.issuer_url}",
],
)
raise jwt.PyJWTError(
f"Key ID {key_id!r} not found for issuer {issuer_url!r}"
)
return jwt.PyJWK(keyset[key_id])
def _get_key_for_token(self, token, issuer_url: str) -> jwt.PyJWK:
"""
Return a JWK suitable for verifying the given JWT.
The JWT is not verified at this point, and this step happens
prior to any verification.
"""
unverified_header = jwt.get_unverified_header(token)
return self._get_key(unverified_header["kid"], issuer_url)
def jwt_identifier_exists(self, jti: str) -> bool:
"""
Check if a JWT Token Identifier has already been used.
"""
with redis.StrictRedis.from_url(self.cache_url) as r:
return bool(r.exists(f"/warehouse/oidc/{self.issuer_url}/{jti}"))
def store_jwt_identifier(self, jti: str, expiration: int) -> None:
"""
Store the JTI with its expiration date if the key does not exist.
"""
with redis.StrictRedis.from_url(self.cache_url) as r:
# Defensive: to prevent races, we expire the JTI slightly after
# the token expiration date. Thus, the lock will not be
# released before the token invalidation.
r.set(
f"/warehouse/oidc/{self.issuer_url}/{jti}",
exat=expiration + 5,
value="", # empty value to lower memory usage
nx=True,
)
def verify_jwt_signature(
self, unverified_token: str, issuer_url: str
) -> SignedClaims | None:
"""
Verify the signature of the given JWT, returning the signed claims.
"""
try:
key = self._get_key_for_token(unverified_token, issuer_url)
except jwt.PyJWTError:
# The user might feed us an entirely nonsense JWT, e.g. one
# with missing components.
self.metrics.increment(
"warehouse.oidc.verify_jwt_signature.malformed_jwt",
tags=[f"publisher:{self.publisher}", f"issuer_url:{issuer_url}"],
)
return None
try:
# NOTE: Many of the keyword arguments here are defaults, but we
# set them explicitly to assert the intended verification behavior.
signed_payload = jwt.decode(
unverified_token,
key=key,
algorithms=["RS256"],
options=dict(
verify_signature=True,
# "require" only checks for the presence of these claims, not
# their validity. Each has a corresponding "verify_" kwarg
# that enforces their actual validity.
require=["iss", "iat", "exp", "aud"],
verify_iss=True,
verify_iat=True,
verify_exp=True,
verify_aud=True,
# We don't require the nbf claim, but verify it if present
verify_nbf=True,
# We don't accept JWTs with multiple audiences; we
# want to be the ONLY audience listed.
strict_aud=True,
),
issuer=issuer_url,
audience=self.audience,
leeway=30,
)
return SignedClaims(signed_payload)
except Exception as e:
self.metrics.increment(
"warehouse.oidc.verify_jwt_signature.invalid_signature",
tags=[f"publisher:{self.publisher}", f"issuer_url:{issuer_url}"],
)
if not isinstance(e, jwt.PyJWTError):
with sentry_sdk.new_scope() as scope:
scope.fingerprint = [e]
# We expect pyjwt to only raise subclasses of PyJWTError, but
# we can't enforce this. Other exceptions indicate an abstraction
# leak, so we log them for upstream reporting.
sentry_sdk.capture_message(f"JWT backend raised generic error: {e}")
return None
def find_publisher(
self, signed_claims: SignedClaims, *, pending: bool = False
) -> OIDCPublisher | PendingOIDCPublisher:
"""Returns a publisher for the given claims, or raises an error."""
metrics_tags = [
f"publisher:{self.publisher}",
f"issuer_url:{signed_claims['iss']}",
]
self.metrics.increment(
"warehouse.oidc.find_publisher.attempt",
tags=metrics_tags,
)
try:
publisher = find_publisher_by_issuer(
self.db, self.issuer_url, signed_claims, pending=pending
)
publisher.verify_claims(signed_claims, self)
self.metrics.increment(
"warehouse.oidc.find_publisher.ok",
tags=metrics_tags,
)
return publisher
except InvalidPublisherError as e:
self.metrics.increment(
"warehouse.oidc.find_publisher.publisher_not_found",
tags=metrics_tags,
)
raise e
def reify_pending_publisher(
self, pending_publisher: PendingOIDCPublisher, project: Project
) -> OIDCPublisher:
new_publisher = pending_publisher.reify(self.db)
project.oidc_publishers.append(new_publisher)
return new_publisher
| OIDCPublisherService |
python | davidhalter__jedi | test/completion/pep0484_generic_passthroughs.py | {
"start": 3582,
"end": 4164
} | class ____(List):
def get_first(self):
return self[0]
#? str()
CustomList[str]()[0]
#? str()
CustomList[str]().get_first()
#? str()
typed_fully_generic_passthrough(CustomList[str]())[0]
#?
typed_list_generic_passthrough(CustomList[str])[0]
def typed_bound_type_implicit_any_generic_passthrough(x: TType) -> TType:
#? Type()
x
return x
def typed_bound_type_any_generic_passthrough(x: TTypeAny) -> TTypeAny:
# Should be Type(), though we don't get the handling of the nested argument
# to `Type[...]` quite right here.
x
return x
| CustomList |
python | getsentry__sentry | src/sentry/sentry_apps/api/parsers/sentry_app_installation.py | {
"start": 166,
"end": 601
} | class ____(Serializer):
status = serializers.CharField()
def validate_status(self, new_status):
# can only set status to installed
if new_status != SentryAppInstallationStatus.INSTALLED_STR:
raise ValidationError(
f"Invalid value '{new_status}' for status. Valid values: '{SentryAppInstallationStatus.INSTALLED_STR}'"
)
return new_status
| SentryAppInstallationParser |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/xent_op_d9m_test.py | {
"start": 2830,
"end": 8291
} | class ____(xent_op_test_base.XentOpTestBase):
"""Test that SoftmaxCrossEntropyWithLogits operates reproducibly.
Inheriting from xent_op_test_base.XentTestBase ensures that regular op
functionality is correct when the deterministic code-path is selected.
Note that because nn_ops.softmax_cross_entropy_with_logits calls
nn_ops.cross_entropy_with_logits_v2, the focus of testing is on the
former in order to test both.
"""
def _randomFloats(self, shape, dtype, normalized_rows=False):
a = (2 * np.random.random_sample(shape) - 1).astype(dtype)
if normalized_rows:
def normalize(row):
return row / row.sum()
a = np.apply_along_axis(normalize, 1, a)
return constant_op.constant(a)
def _generateInputs(self, dtype, seed=123, forward_not_backward=False):
batch_size = 1024
if forward_not_backward and dtype == np.float16:
# Generate more noise to expose the internal float32 implementation.
# This is associated with significantly slower test cases (esp. on CPU).
classes_count = 20000
else:
classes_count = 3000
shape = (batch_size, classes_count)
np.random.seed(seed)
labels = self._randomFloats(shape, dtype, normalized_rows=True)
logits = self._randomFloats(shape, dtype)
return labels, logits
@test_util.run_in_graph_and_eager_modes
def testForward(self):
with self.cached_session():
for dtype in [np.float16, np.float32, np.float64, \
dtypes.bfloat16.as_numpy_dtype]:
for trial in range(5):
seed = 123 + trial
labels, logits = self._generateInputs(
dtype, seed=seed, forward_not_backward=True)
result_a = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
result_b = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
self.assertAllEqual(result_a, result_b)
@test_util.run_in_graph_and_eager_modes
def testBackward(self):
with self.cached_session():
for dtype in [np.float16, np.float32, np.float64, \
dtypes.bfloat16.as_numpy_dtype]:
labels, logits = self._generateInputs(dtype, seed=456)
output_shape = labels.shape[0]
def gradients(seed):
np.random.seed(seed)
upstream_gradients = self._randomFloats(output_shape, dtype)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(labels)
tape.watch(logits)
op_output = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
gradient_injector_output = op_output * upstream_gradients
return tape.gradient(gradient_injector_output, [labels, logits])
for trial in range(5):
seed = 456 + trial
labels_grad_a, logits_grad_a = gradients(seed=seed)
labels_grad_b, logits_grad_b = gradients(seed=seed)
self.assertAllEqual(labels_grad_a, labels_grad_b)
self.assertAllEqual(logits_grad_a, logits_grad_b)
# Modifications to the parent class (xent_op_test_base.XentOpTestBase) follow
def testSingleClass(self):
"""Modify testing of gradient for single-class case.
The deterministic implementation does not produce the gradients expected by
the original test (for the nondeterministic functionality) when the labels
vector is not a valid probability distribution.
labels: [[-1.], [0.], [1.], [1.]]
logits: [[1.], [-1.], [0.], [1.]]
nondeterministic deterministic
dloss/dlogits: [[2.0], [1.0], [0.0], [0.0]] [[0.0], [0.0], [0.0], [0.0]]
Note that only the second two label vectors are valid probability
distributions (as required by the API) and that the gradient matches for
those cases.
TODO(duncanriach): Further investigate the source of the difference in
the gradients for this case.
"""
self._testSingleClass(expected_gradient=[[0.0], [0.0], [0.0], [0.0]])
def testLabelsBroadcast(self):
"""Modify testing of gradient for labels-broadcast case.
The deterministic implementation does not produce the gradients expected by
the original test (for the nondeterministic functionality) when the labels
vector (after broadcasting) is not a valid probability distribution.
labels: [[0.], [2.], [0.25]]
logits: [[1., 1., 1., 1.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]]
dloss/dlogits (nondeterministic):
[[ 0.25 , 0.25 , 0.25 , 0.25 ],
[-1.968, -1.913, -1.763, -1.355],
[-0.218, -0.163, -0.013, 0.394]]
dloss/dlogits (determinsitic):
[[ 0. , 0. , 0. , 0. ],
[-1.743, -1.303, -0.105, 3.150],
[-0.218, -0.163, -0.013, 0.394]]
Note that neither of the first two broadcast label vectors is a valid
probability distribution (as required by the API) and that these are the
cases that yield different gradients for nondeterministic vs determinsitic
implementations.
TODO(duncanriach): Further investigate the source of the difference in
the gradient for this case.
"""
self._testLabelsBroadcast(uniform_labels_gradient=[[
0., 0., 0., 0.
], [-1.743, -1.303, -0.105, 3.150], [-0.218, -0.163, -0.013, 0.394]])
if __name__ == "__main__":
config.enable_op_determinism()
test.main()
| XentOpDeterministicTest |
python | numba__numba | numba/cuda/tests/cudapy/test_freevar.py | {
"start": 99,
"end": 745
} | class ____(CUDATestCase):
def test_freevar(self):
"""Make sure we can compile the following kernel with freevar reference
in arguments to shared.array
"""
from numba import float32
size = 1024
nbtype = float32
@cuda.jit("(float32[::1], intp)")
def foo(A, i):
"Dummy function"
sdata = cuda.shared.array(size, # size is freevar
dtype=nbtype) # nbtype is freevar
A[i] = sdata[i]
A = np.arange(2, dtype="float32")
foo[1, 1](A, 0)
if __name__ == '__main__':
unittest.main()
| TestFreeVar |
python | keras-team__keras | keras/src/backend/common/thread_safe_test.py | {
"start": 127,
"end": 908
} | class ____(testing.TestCase):
def test_is_thread_safe(self):
if backend.IS_THREAD_SAFE:
executor = concurrent.futures.ThreadPoolExecutor()
def sum(x, axis):
return ops.sum(x, axis=axis)
futures = []
for i in range(10000):
futures.clear()
x = ops.convert_to_tensor(np.random.rand(100, 100))
futures.append(executor.submit(sum, x, 1))
x = ops.convert_to_tensor(np.random.rand(100))
futures.append(executor.submit(sum, x, 0))
concurrent.futures.wait(
futures, return_when=concurrent.futures.ALL_COMPLETED
)
[future.result() for future in futures]
| TestThreadSafe |
python | walkccc__LeetCode | solutions/3273. Minimum Amount of Damage Dealt to Bob/3273.py | {
"start": 60,
"end": 110
} | class ____:
damage: int
timeTakenDown: int
| Enemy |
python | ZoranPandovski__al-go-rithms | data_structures/Tree/Binary-tree/left-view.py | {
"start": 2505,
"end": 2623
} | class ____:
def __init__(self,val):
self.data = val
self.left = None
self.right = None
''' | Node |
python | pypa__build | tests/test_projectbuilder.py | {
"start": 1962,
"end": 2292
} | class ____(MockDistribution):
def read_text(self, filename):
if filename == 'METADATA':
return textwrap.dedent(
"""
Metadata-Version: 2.2
Name: requireless_dep
Version: 1.0.0
"""
).strip()
| RequirelessMockDistribution |
python | django__django | tests/template_tests/filter_tests/test_linebreaksbr.py | {
"start": 170,
"end": 1037
} | class ____(SimpleTestCase):
"""
The contents in "linebreaksbr" are escaped according to the current
autoescape setting.
"""
@setup({"linebreaksbr01": "{{ a|linebreaksbr }} {{ b|linebreaksbr }}"})
def test_linebreaksbr01(self):
output = self.engine.render_to_string(
"linebreaksbr01", {"a": "x&\ny", "b": mark_safe("x&\ny")}
)
self.assertEqual(output, "x&<br>y x&<br>y")
@setup(
{
"linebreaksbr02": (
"{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}"
"{% endautoescape %}"
)
}
)
def test_linebreaksbr02(self):
output = self.engine.render_to_string(
"linebreaksbr02", {"a": "x&\ny", "b": mark_safe("x&\ny")}
)
self.assertEqual(output, "x&<br>y x&<br>y")
| LinebreaksbrTests |
python | tensorflow__tensorflow | tensorflow/python/framework/composite_tensor_test.py | {
"start": 2967,
"end": 3143
} | class ____(CT):
_type_spec_class = CTSpec2
# CompositeTensors with a common supertype are considered to be the same
# structure by tf.nest (e.g. for assert_same_structure).
| CT2 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor26.py | {
"start": 256,
"end": 525
} | class ____(Generic[T, U]):
def __init__(self, t: T, u: U):
pass
def test1(self, ts: list[T], us: list[U]) -> None:
# This should generate an error.
x1: Test1[U, T] = Test1(us, ts)
x2: Test1[list[U], list[T]] = Test1(us, ts)
| Test1 |
python | numba__numba | numba/tests/doc_examples/test_parallel_chunksize.py | {
"start": 348,
"end": 4176
} | class ____(TestCase):
_numba_parallel_test_ = False
def setUp(self):
set_parallel_chunksize(0)
def tearDown(self):
set_parallel_chunksize(0)
def test_unbalanced_example(self):
with captured_stdout():
# magictoken.ex_unbalanced.begin
from numba import (njit,
prange,
)
import numpy as np
@njit(parallel=True)
def func1():
n = 100
vals = np.empty(n)
# The work in each iteration of the following prange
# loop is proportional to its index.
for i in prange(n):
cur = i + 1
for j in range(i):
if cur % 2 == 0:
cur //= 2
else:
cur = cur * 3 + 1
vals[i] = cur
return vals
result = func1()
# magictoken.ex_unbalanced.end
self.assertPreciseEqual(result, func1.py_func())
def test_chunksize_manual(self):
with captured_stdout():
# magictoken.ex_chunksize_manual.begin
from numba import (njit,
prange,
set_parallel_chunksize,
get_parallel_chunksize,
)
@njit(parallel=True)
def func1(n):
acc = 0
print(get_parallel_chunksize()) # Will print 4.
for i in prange(n):
print(get_parallel_chunksize()) # Will print 0.
acc += i
print(get_parallel_chunksize()) # Will print 4.
return acc
@njit(parallel=True)
def func2(n):
acc = 0
# This version gets the previous chunksize explicitly.
old_chunksize = get_parallel_chunksize()
set_parallel_chunksize(8)
for i in prange(n):
acc += i
set_parallel_chunksize(old_chunksize)
return acc
# This version saves the previous chunksize as returned
# by set_parallel_chunksize.
old_chunksize = set_parallel_chunksize(4)
result1 = func1(12)
result2 = func2(12)
result3 = func1(12)
set_parallel_chunksize(old_chunksize)
# magictoken.ex_chunksize_manual.end
self.assertPreciseEqual(result1, func1.py_func(12))
self.assertPreciseEqual(result2, func2.py_func(12))
self.assertPreciseEqual(result3, func1.py_func(12))
def test_chunksize_with(self):
with captured_stdout():
# magictoken.ex_chunksize_with.begin
from numba import njit, prange, parallel_chunksize
@njit(parallel=True)
def func1(n):
acc = 0
for i in prange(n):
acc += i
return acc
@njit(parallel=True)
def func2(n):
acc = 0
with parallel_chunksize(8):
for i in prange(n):
acc += i
return acc
with parallel_chunksize(4):
result1 = func1(12)
result2 = func2(12)
result3 = func1(12)
# magictoken.ex_chunksize_with.end
self.assertPreciseEqual(result1, func1.py_func(12))
self.assertPreciseEqual(result2, func2.py_func(12))
self.assertPreciseEqual(result3, func1.py_func(12))
if __name__ == '__main__':
unittest.main()
| ChunksizeExamplesTest |
python | huggingface__transformers | src/transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py | {
"start": 28501,
"end": 28656
} | class ____(Wav2Vec2ForPreTraining):
def __init__(self, config: Wav2Vec2ConformerConfig):
super().__init__(config)
| Wav2Vec2ConformerForPreTraining |
python | aio-libs__aiohttp | aiohttp/tracing.py | {
"start": 7718,
"end": 7925
} | class ____:
"""Parameters sent by the `on_request_end` signal"""
method: str
url: URL
headers: "CIMultiDict[str]"
response: ClientResponse
@frozen_dataclass_decorator
| TraceRequestEndParams |
python | spyder-ide__spyder | spyder/plugins/completion/api.py | {
"start": 20970,
"end": 21176
} | class ____:
"""LSP text document saving action causes."""
MANUAL = 1
AFTER_DELAY = 2
FOCUS_OUT = 3
# ----------------------- INTERNAL CONSTANTS ------------------------
| TextDocumentSaveReason |
python | kennethreitz__tablib | src/tablib/packages/dbfpy/dbfnew.py | {
"start": 861,
"end": 2595
} | class ____:
"""Field definition.
This is a simple structure, which contains ``name``, ``type``,
``len``, ``dec`` and ``cls`` fields.
Objects also implement get/setitem magic functions, so fields
could be accessed via sequence interface, where 'name' has
index 0, 'type' index 1, 'len' index 2, 'dec' index 3 and
'cls' could be located at index 4.
"""
__slots__ = "name", "type", "len", "dec", "cls"
# WARNING: be attentive - dictionaries are mutable!
FLD_TYPES = {
# type: (cls, len)
"C": (DbfCharacterFieldDef, None),
"N": (DbfNumericFieldDef, None),
"L": (DbfLogicalFieldDef, 1),
# FIXME: support memos
# "M": (DbfMemoFieldDef),
"D": (DbfDateFieldDef, 8),
# FIXME: I'm not sure length should be 14 characters!
# but temporary I use it, cuz date is 8 characters
# and time 6 (hhmmss)
"T": (DbfDateTimeFieldDef, 14),
}
def __init__(self, name, type, len=None, dec=0):
_cls, _len = self.FLD_TYPES[type]
if _len is None:
if len is None:
raise ValueError("Field length must be defined")
_len = len
self.name = name
self.type = type
self.len = _len
self.dec = dec
self.cls = _cls
def getDbfField(self):
"Return `DbfFieldDef` instance from the current definition."
return self.cls(self.name, self.len, self.dec)
def appendToHeader(self, dbfh):
"""Create a `DbfFieldDef` instance and append it to the dbf header.
Arguments:
dbfh: `DbfHeader` instance.
"""
_dbff = self.getDbfField()
dbfh.addField(_dbff)
| _FieldDefinition |
python | pandas-dev__pandas | asv_bench/benchmarks/reshape.py | {
"start": 231,
"end": 684
} | class ____:
params = ["float64", "Float64"]
param_names = ["dtype"]
def setup(self, dtype):
self.df = DataFrame(
np.random.randn(100_000, 3), columns=["A", "B", "C"], dtype=dtype
)
self.df["id1"] = pd.Series(np.random.randint(0, 10, 10000))
self.df["id2"] = pd.Series(np.random.randint(100, 1000, 10000))
def time_melt_dataframe(self, dtype):
melt(self.df, id_vars=["id1", "id2"])
| Melt |
python | django__django | tests/utils_tests/test_lazyobject.py | {
"start": 9537,
"end": 12312
} | class ____(LazyObjectTestCase):
# By inheriting from LazyObjectTestCase and redefining the lazy_wrap()
# method which all testcases use, we get to make sure all behaviors
# tested in the parent testcase also apply to SimpleLazyObject.
def lazy_wrap(self, wrapped_object):
return SimpleLazyObject(lambda: wrapped_object)
def test_repr(self):
# First, for an unevaluated SimpleLazyObject
obj = self.lazy_wrap(42)
# __repr__ contains __repr__ of setup function and does not evaluate
# the SimpleLazyObject
self.assertRegex(repr(obj), "^<SimpleLazyObject:")
self.assertIs(obj._wrapped, empty) # make sure evaluation hasn't been triggered
self.assertEqual(obj, 42) # evaluate the lazy object
self.assertIsInstance(obj._wrapped, int)
self.assertEqual(repr(obj), "<SimpleLazyObject: 42>")
def test_add(self):
obj1 = self.lazy_wrap(1)
self.assertEqual(obj1 + 1, 2)
obj2 = self.lazy_wrap(2)
self.assertEqual(obj2 + obj1, 3)
self.assertEqual(obj1 + obj2, 3)
def test_radd(self):
obj1 = self.lazy_wrap(1)
self.assertEqual(1 + obj1, 2)
def test_trace(self):
# See ticket #19456
old_trace_func = sys.gettrace()
try:
def trace_func(frame, event, arg):
frame.f_locals["self"].__class__
if old_trace_func is not None:
old_trace_func(frame, event, arg)
sys.settrace(trace_func)
self.lazy_wrap(None)
finally:
sys.settrace(old_trace_func)
def test_none(self):
i = [0]
def f():
i[0] += 1
return None
x = SimpleLazyObject(f)
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
def test_dict(self):
# See ticket #18447
lazydict = SimpleLazyObject(lambda: {"one": 1})
self.assertEqual(lazydict["one"], 1)
lazydict["one"] = -1
self.assertEqual(lazydict["one"], -1)
self.assertIn("one", lazydict)
self.assertNotIn("two", lazydict)
self.assertEqual(len(lazydict), 1)
del lazydict["one"]
with self.assertRaises(KeyError):
lazydict["one"]
def test_list_set(self):
lazy_list = SimpleLazyObject(lambda: [1, 2, 3, 4, 5])
lazy_set = SimpleLazyObject(lambda: {1, 2, 3, 4})
self.assertIn(1, lazy_list)
self.assertIn(1, lazy_set)
self.assertNotIn(6, lazy_list)
self.assertNotIn(6, lazy_set)
self.assertEqual(len(lazy_list), 5)
self.assertEqual(len(lazy_set), 4)
| SimpleLazyObjectTestCase |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 52347,
"end": 54762
} | class ____:
def __init__(self, parent, vision_kwargs=None, qformer_kwargs=None, is_training=True):
if vision_kwargs is None:
vision_kwargs = {}
if qformer_kwargs is None:
qformer_kwargs = {"use_qformer_text_input": True}
self.parent = parent
self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs)
self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs)
self.is_training = is_training
self.batch_size = self.vision_model_tester.batch_size # need bs for batching_equivalence test
def get_config(self):
return Blip2Config(
vision_config=self.vision_model_tester.get_config(),
qformer_config=self.qformer_model_tester.get_config(),
)
def prepare_config_and_inputs(self):
_, input_ids, attention_mask = self.qformer_model_tester.prepare_config_and_inputs()
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, input_ids, attention_mask, pixel_values
def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):
model = Blip2ForImageTextRetrieval(config).to(torch_device).eval()
with torch.no_grad():
result = model(pixel_values, input_ids, attention_mask, use_image_text_matching_head=True)
self.parent.assertEqual(
result.logits_per_image.shape,
(self.vision_model_tester.batch_size, 2),
)
with torch.no_grad():
result = model(pixel_values, input_ids, attention_mask)
self.parent.assertEqual(
result.logits_per_image.shape,
(self.vision_model_tester.batch_size, self.qformer_model_tester.batch_size),
)
self.parent.assertEqual(
result.logits_per_text.shape, (self.qformer_model_tester.batch_size, self.vision_model_tester.batch_size)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
}
return config, inputs_dict
@require_torch
| Blip2TextRetrievalModelTester |
python | google__jax | tests/pallas/pallas_test.py | {
"start": 32904,
"end": 42702
} | class ____(PallasBaseTest):
def test_pallas_call_kernel_args_mismatch(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda x_ref: None, # Missing o_ref
out_shape=a)
with self.assertRaisesRegex(
TypeError,
"takes 1 positional argument but 2 were given"):
f(a)
@parameterized.named_parameters(
("array", 0),
("empty_tuple", ())
)
def test_pallas_call_error_kernel_returns_something(self, returns):
a = np.arange(256, dtype=np.int32)
# The kernel should not return anything
def my_kernel(x_ref, o1_ref, o2_ref):
return returns
f = self.pallas_call(my_kernel,
out_shape=(a, a))
with self.assertRaisesRegex(
ValueError,
"The kernel function .* my_kernel at .*pallas_test.py:.* should return None"):
f(a)
def test_pallas_call_kernel_with_no_signature_returns_something(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda *args: 0, # Returns 0
out_shape=a)
with self.assertRaisesRegex(
ValueError,
"The kernel function .* at .*pallas_test.py:.* should return None"):
f(a)
def test_pallas_call_in_specs_not_a_sequence(self):
a = np.arange(256, dtype=np.int32)
with self.assertRaisesRegex(
ValueError,
"`in_specs` must be a tuple or a list"):
_ = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=a,
in_specs=pl.BlockSpec((4,), lambda: 0))
def test_pallas_call_in_specs_mismatch_inputs(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=a,
in_specs=[pl.BlockSpec((4,), lambda: 0),
pl.BlockSpec((4,), lambda: 0)])
with self.assertRaisesRegex(
ValueError,
re.compile("Pytree for `in_specs` and `inputs` do not match. "
"There are 1 mismatches, including:"
".* at \\[1\\], `in_specs` is a pytree leaf but "
"`inputs` is a.*", re.DOTALL)):
f(a, dict(a=a))
def test_pallas_call_index_map_wrong_number_of_arguments(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=a,
in_specs=[pl.BlockSpec((4,), lambda i, j: 0)])
with self.assertRaisesRegex(
TypeError,
"missing 2 required positional arguments: 'i' and 'j'"):
f(a)
def test_pallas_call_index_map_wrong_number_of_results(self):
a = np.arange(256, dtype=np.int32)
def my_index_map():
return 0, 0
f = self.pallas_call(lambda x_ref, o_ref: None,
out_shape=a,
in_specs=[pl.BlockSpec((4,), my_index_map)])
with self.assertRaisesRegex(
ValueError,
"Index map function my_index_map at .*pallas_test.py.* "
"for args\\[0\\] must return 1 values to match .*"
"Currently returning 2 values."):
f(a)
def test_pallas_call_index_map_pytree_input_wrong_number_of_results(self):
a = np.arange(256, dtype=np.int32)
def my_index_map():
return 0, 0
f = self.pallas_call(lambda x_ref, o_ref: None,
out_shape=a,
in_specs=[dict(one=pl.BlockSpec((4,), my_index_map),
two=pl.BlockSpec((8,), my_index_map))])
with self.assertRaisesRegex(
ValueError,
"Index map function my_index_map at .*pallas_test.py.* "
"for args\\[0\\]\\['one'\\] must return 1 values to match .*"
"Currently returning 2 values."):
f(dict(one=a, two=a))
def test_pallas_call_index_map_wrong_return_type(self):
a = np.arange(256, dtype=np.int32)
def my_index_map(i):
return 5.
f = self.pallas_call(lambda x_ref, o_ref: None,
out_shape=a,
grid=(1,),
in_specs=[pl.BlockSpec((4,), my_index_map)])
with self.assertRaisesRegex(
ValueError,
"Index map function my_index_map at .*pallas_test.py.* "
"for args\\[0\\] must return integer scalars. Output\\[0\\] has "
"type .*float"):
f(a)
def test_pallas_call_index_map_wrong_return_shape(self):
a = np.arange(256, dtype=np.int32)
def my_index_map(i):
return jnp.arange(4, dtype=np.int32)
f = self.pallas_call(lambda x_ref, o_ref: None,
out_shape=a,
grid=(1,),
in_specs=[pl.BlockSpec((4,), my_index_map)])
with self.assertRaisesRegex(
ValueError,
"Index map function my_index_map at .*pallas_test.py.* "
"for args\\[0\\] must return integer scalars. Output\\[0\\] has "
"type .*int32\\[4\\]"):
f(a)
def test_pallas_call_index_map_captures_consts(self):
if config.use_simplified_jaxpr_constants.value:
self.skipTest("TODO: decide if we want to keep these errors")
a = np.arange(256, dtype=np.int32)
index_map_result = np.array([0], dtype=np.int32)
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=a,
grid=(1,),
in_specs=[pl.BlockSpec((4,),
lambda i: jnp.array(index_map_result)[i])])
with self.assertRaisesRegex(
ValueError,
"Index map function .* for args\\[0\\] must not capture constants:"):
f(a)
def test_pallas_call_out_specs_mismatch_shape(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=[a, a],
out_specs=[pl.BlockSpec((6,), lambda i: i)])
with self.assertRaisesRegex(
ValueError,
re.compile("Pytree for `out_specs` and `out_shape` do not match. There are 1 mismatches, including:"
".* `out_specs` is a tuple of length 1 but `out_shape` is a tuple of length 2.*", re.DOTALL)):
f(a)
def test_pallas_call_block_shape_ndim_mismatch(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=[a],
in_specs=[pl.BlockSpec((1, 1), lambda: (0, 0))])
with self.assertRaisesRegex(
ValueError,
"Block shape for args\\[0\\] .* must have the same number of dimensions as the "
"array shape"):
f(a)
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=[a],
out_specs=[pl.BlockSpec((1, 1), lambda: 0)])
with self.assertRaisesRegex(
ValueError,
"Block shape for outputs\\[0\\] .* must have the same number of dimensions as the "
"array shape"):
f(a)
def test_pallas_call_input_output_aliases_errors(self):
x = np.arange(8 * 128, dtype=np.int32).reshape((8, 128))
with self.assertRaisesRegex(
ValueError,
"input_output_aliases contains the mapping '2:0' with input index 2 "
"outside the range .*"):
self.pallas_call(lambda x_ref, y_ref, o1_ref: None,
out_shape=[x],
input_output_aliases={2: 0})(x, x)
with self.assertRaisesRegex(
ValueError,
"input_output_aliases contains the mapping '1:1' with output index 1 "
"outside the range .*"):
self.pallas_call(lambda x_ref, y_ref, o1_ref: None,
out_shape=[x],
input_output_aliases={1: 1})(x, x)
y = np.concatenate([x, x], axis=0)
with self.assertRaisesRegex(
ValueError,
"input_output_aliases contains the mapping '1:0' referring to "
"input\\[1\\] with abstract value .*int32\\[16,128\\].* "
"output\\[0\\] with a different abstract value .*int32\\[8,128\\]"):
self.pallas_call(lambda x_ref, y_ref, o1_ref: None,
out_shape=[x],
input_output_aliases={1: 0})(x, y)
with self.assertRaisesRegex(
ValueError,
"input_output_aliases contains the mapping '1:0' referring to "
"input\\[1\\] with abstract value .*int32\\[8,128\\].* "
"output\\[0\\] with a different abstract value .*float32\\[8,128\\]"):
self.pallas_call(lambda x_ref, y_ref, o1_ref: None,
out_shape=[jax.ShapeDtypeStruct(x.shape, jnp.float32)],
input_output_aliases={1: 0})(x, x)
def test_pallas_error_for_ref_to_jax(self):
m, n, k = 8, 16, 32
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
)
def dot_general_kernel(x_ref, y_ref, o_ref):
o_ref[...] = jax.lax.dot_general(x_ref, y_ref, (((2), (1)), ((1,), (2,))))
key1, key2 = random.split(random.key(0))
x = random.normal(key1, (m, k), dtype=jnp.float32)
y = random.normal(key2, (k, n), dtype=jnp.float32)
with self.assertRaisesRegex(
ValueError,
r"Attempting to pass a Ref"
r" Ref{float32\[8,32\]}"
r" to a primitive: dot_general -- did you forget to unpack \(\[...\]\)"
r" the ref?",
):
dot_general_kernel(x, y)
def test_pallas_error_for_writing_ref_to_ref(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref
x = jnp.ones((8, 128), dtype=jnp.float32)
with self.assertRaisesRegex(
ValueError, "Cannot store a Ref into another Ref",
):
kernel(x)
| ApiErrorTest |
python | django__django | tests/forms_tests/field_tests/test_multiplechoicefield.py | {
"start": 137,
"end": 3755
} | class ____(SimpleTestCase):
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[("1", "One"), ("2", "Two")])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(["1"], f.clean([1]))
self.assertEqual(["1"], f.clean(["1"]))
self.assertEqual(["1", "2"], f.clean(["1", "2"]))
self.assertEqual(["1", "2"], f.clean([1, "2"]))
self.assertEqual(["1", "2"], f.clean((1, "2")))
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean("hello")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean([])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(())
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(["3"])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[("1", "One"), ("2", "Two")], required=False)
self.assertEqual([], f.clean(""))
self.assertEqual([], f.clean(None))
self.assertEqual(["1"], f.clean([1]))
self.assertEqual(["1"], f.clean(["1"]))
self.assertEqual(["1", "2"], f.clean(["1", "2"]))
self.assertEqual(["1", "2"], f.clean([1, "2"]))
self.assertEqual(["1", "2"], f.clean((1, "2")))
with self.assertRaisesMessage(ValidationError, "'Enter a list of values.'"):
f.clean("hello")
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(["3"])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(
choices=[
("Numbers", (("1", "One"), ("2", "Two"))),
("Letters", (("3", "A"), ("4", "B"))),
("5", "Other"),
]
)
self.assertEqual(["1"], f.clean([1]))
self.assertEqual(["1"], f.clean(["1"]))
self.assertEqual(["1", "5"], f.clean([1, 5]))
self.assertEqual(["1", "5"], f.clean([1, "5"]))
self.assertEqual(["1", "5"], f.clean(["1", 5]))
self.assertEqual(["1", "5"], f.clean(["1", "5"]))
msg = "'Select a valid choice. 6 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(["6"])
msg = "'Select a valid choice. 6 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(["1", "6"])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[("1", "One"), ("2", "Two"), ("3", "Three")])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ["1"]))
self.assertFalse(f.has_changed([1, 2], ["1", "2"]))
self.assertFalse(f.has_changed([2, 1], ["1", "2"]))
self.assertTrue(f.has_changed([1, 2], ["1"]))
self.assertTrue(f.has_changed([1, 2], ["1", "3"]))
def test_disabled_has_changed(self):
f = MultipleChoiceField(choices=[("1", "One"), ("2", "Two")], disabled=True)
self.assertIs(f.has_changed("x", "y"), False)
| MultipleChoiceFieldTest |
python | redis__redis-py | redis/commands/search/reducers.py | {
"start": 87,
"end": 310
} | class ____(Reducer):
"""See https://redis.io/docs/interact/search-and-query/search/aggregations/"""
def __init__(self, field: str) -> None:
super().__init__(field)
self._field = field
| FieldOnlyReducer |
python | django-guardian__django-guardian | example_project_custom_group/articles/models.py | {
"start": 1107,
"end": 1404
} | class ____(UserObjectPermissionAbstract):
class Meta(UserObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*UserObjectPermissionAbstract.Meta.indexes,
models.Index(fields=["content_type", "object_pk", "user"]),
]
| BigUserObjectPermission |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_tool_text_editor_20250124_param.py | {
"start": 361,
"end": 1077
} | class ____(TypedDict, total=False):
name: Required[Literal["str_replace_editor"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["text_editor_20250124"]]
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
input_examples: Iterable[Dict[str, object]]
strict: bool
| BetaToolTextEditor20250124Param |
python | matplotlib__matplotlib | lib/matplotlib/sphinxext/figmpl_directive.py | {
"start": 864,
"end": 9308
} | class ____(Figure):
"""
Implements a directive to allow an optional hidpi image.
Meant to be used with the *plot_srcset* configuration option in conf.py,
and gets set in the TEMPLATE of plot_directive.py
e.g.::
.. figure-mpl:: plot_directive/some_plots-1.png
:alt: bar
:srcset: plot_directive/some_plots-1.png,
plot_directive/some_plots-1.2x.png 2.00x
:class: plot-directive
The resulting html (at ``some_plots.html``) is::
<img src="sphx_glr_bar_001_hidpi.png"
srcset="_images/some_plot-1.png,
_images/some_plots-1.2x.png 2.00x",
alt="bar"
class="plot_directive" />
Note that the handling of subdirectories is different than that used by the sphinx
figure directive::
.. figure-mpl:: plot_directive/nestedpage/index-1.png
:alt: bar
:srcset: plot_directive/nestedpage/index-1.png
plot_directive/nestedpage/index-1.2x.png 2.00x
:class: plot_directive
The resulting html (at ``nestedpage/index.html``)::
<img src="../_images/nestedpage-index-1.png"
srcset="../_images/nestedpage-index-1.png,
../_images/_images/nestedpage-index-1.2x.png 2.00x",
alt="bar"
class="sphx-glr-single-img" />
where the subdirectory is included in the image name for uniqueness.
"""
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': Image.align,
'class': directives.class_option,
'caption': directives.unchanged,
'srcset': directives.unchanged,
}
def run(self):
image_node = figmplnode()
imagenm = self.arguments[0]
image_node['alt'] = self.options.get('alt', '')
image_node['align'] = self.options.get('align', None)
image_node['class'] = self.options.get('class', None)
image_node['width'] = self.options.get('width', None)
image_node['height'] = self.options.get('height', None)
image_node['scale'] = self.options.get('scale', None)
image_node['caption'] = self.options.get('caption', None)
# we would like uri to be the highest dpi version so that
# latex etc will use that. But for now, lets just make
# imagenm... maybe pdf one day?
image_node['uri'] = imagenm
image_node['srcset'] = self.options.get('srcset', None)
return [image_node]
def _parse_srcsetNodes(st):
"""
parse srcset...
"""
entries = st.split(',')
srcset = {}
for entry in entries:
spl = entry.strip().split(' ')
if len(spl) == 1:
srcset[0] = spl[0]
elif len(spl) == 2:
mult = spl[1][:-1]
srcset[float(mult)] = spl[0]
else:
raise ExtensionError(f'srcset argument "{entry}" is invalid.')
return srcset
def _copy_images_figmpl(self, node):
# these will be the temporary place the plot-directive put the images eg:
# ../../../build/html/plot_directive/users/explain/artists/index-1.png
if node['srcset']:
srcset = _parse_srcsetNodes(node['srcset'])
else:
srcset = None
# the rst file's location: eg /Users/username/matplotlib/doc/users/explain/artists
docsource = PurePath(self.document['source']).parent
# get the relpath relative to root:
srctop = self.builder.srcdir
rel = relpath(docsource, srctop).replace('.', '').replace(os.sep, '-')
if len(rel):
rel += '-'
# eg: users/explain/artists
imagedir = PurePath(self.builder.outdir, self.builder.imagedir)
# eg: /Users/username/matplotlib/doc/build/html/_images/users/explain/artists
Path(imagedir).mkdir(parents=True, exist_ok=True)
# copy all the sources to the imagedir:
if srcset:
for src in srcset.values():
# the entries in srcset are relative to docsource's directory
abspath = PurePath(docsource, src)
name = rel + abspath.name
shutil.copyfile(abspath, imagedir / name)
else:
abspath = PurePath(docsource, node['uri'])
name = rel + abspath.name
shutil.copyfile(abspath, imagedir / name)
return imagedir, srcset, rel
def visit_figmpl_html(self, node):
imagedir, srcset, rel = _copy_images_figmpl(self, node)
# /doc/examples/subd/plot_1.rst
docsource = PurePath(self.document['source'])
# /doc/
# make sure to add the trailing slash:
srctop = PurePath(self.builder.srcdir, '')
# examples/subd/plot_1.rst
relsource = relpath(docsource, srctop)
# /doc/build/html
desttop = PurePath(self.builder.outdir, '')
# /doc/build/html/examples/subd
dest = desttop / relsource
# ../../_images/ for dirhtml and ../_images/ for html
imagerel = PurePath(relpath(imagedir, dest.parent)).as_posix()
if self.builder.name == "dirhtml":
imagerel = f'..{imagerel}'
# make uri also be relative...
nm = PurePath(node['uri'][1:]).name
uri = f'{imagerel}/{rel}{nm}'
img_attrs = {'src': uri, 'alt': node['alt']}
# make srcset str. Need to change all the prefixes!
maxsrc = uri
if srcset:
maxmult = -1
srcsetst = ''
for mult, src in srcset.items():
nm = PurePath(src[1:]).name
# ../../_images/plot_1_2_0x.png
path = f'{imagerel}/{rel}{nm}'
srcsetst += path
if mult == 0:
srcsetst += ', '
else:
srcsetst += f' {mult:1.2f}x, '
if mult > maxmult:
maxmult = mult
maxsrc = path
# trim trailing comma and space...
img_attrs['srcset'] = srcsetst[:-2]
if node['class'] is not None:
img_attrs['class'] = ' '.join(node['class'])
for style in ['width', 'height', 'scale']:
if node[style]:
if 'style' not in img_attrs:
img_attrs['style'] = f'{style}: {node[style]};'
else:
img_attrs['style'] += f'{style}: {node[style]};'
# <figure class="align-default" id="id1">
# <a class="reference internal image-reference" href="_images/index-1.2x.png">
# <img alt="_images/index-1.2x.png"
# src="_images/index-1.2x.png" style="width: 53%;" />
# </a>
# <figcaption>
# <p><span class="caption-text">Figure caption is here....</span>
# <a class="headerlink" href="#id1" title="Permalink to this image">#</a></p>
# </figcaption>
# </figure>
self.body.append(
self.starttag(
node, 'figure',
CLASS=f'align-{node["align"]}' if node['align'] else 'align-center'))
self.body.append(
self.starttag(node, 'a', CLASS='reference internal image-reference',
href=maxsrc) +
self.emptytag(node, 'img', **img_attrs) +
'</a>\n')
if node['caption']:
self.body.append(self.starttag(node, 'figcaption'))
self.body.append(self.starttag(node, 'p'))
self.body.append(self.starttag(node, 'span', CLASS='caption-text'))
self.body.append(node['caption'])
self.body.append('</span></p></figcaption>\n')
self.body.append('</figure>\n')
def visit_figmpl_latex(self, node):
if node['srcset'] is not None:
imagedir, srcset = _copy_images_figmpl(self, node)
maxmult = -1
# choose the highest res version for latex:
maxmult = max(srcset, default=-1)
node['uri'] = PurePath(srcset[maxmult]).name
self.visit_figure(node)
def depart_figmpl_html(self, node):
pass
def depart_figmpl_latex(self, node):
self.depart_figure(node)
def figurempl_addnode(app):
app.add_node(figmplnode,
html=(visit_figmpl_html, depart_figmpl_html),
latex=(visit_figmpl_latex, depart_figmpl_latex))
def setup(app):
app.add_directive("figure-mpl", FigureMpl)
figurempl_addnode(app)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True,
'version': matplotlib.__version__}
return metadata
| FigureMpl |
python | fastai__fastai | fastai/tabular/core.py | {
"start": 10134,
"end": 10820
} | class ____(Tabular):
"A `Tabular` object with transforms"
def transform(self, cols, f, all_col=True):
if not all_col: cols = [c for c in cols if c in self.items.columns]
if len(cols) > 0: self[cols] = self[cols].transform(f)
# %% ../../nbs/40_tabular.core.ipynb 52
def _add_prop(cls, nm):
@property
def f(o): return o[list(getattr(o,nm+'_names'))]
@f.setter
def fset(o, v): o[getattr(o,nm+'_names')] = v
setattr(cls, nm+'s', f)
setattr(cls, nm+'s', fset)
_add_prop(Tabular, 'cat')
_add_prop(Tabular, 'cont')
_add_prop(Tabular, 'y')
_add_prop(Tabular, 'x')
_add_prop(Tabular, 'all_col')
# %% ../../nbs/40_tabular.core.ipynb 56
| TabularPandas |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 30035,
"end": 30497
} | class ____:
count = 0 # number of calls to __eq__
trigger = 1 # count value when to trigger side effect
def __eq__(self, other):
if self.__class__.count == self.__class__.trigger:
self.side_effect()
self.__class__.count += 1
return True
def __hash__(self):
# all instances represent the same key
return -1
def side_effect(self):
raise NotImplementedError
| _TriggerSideEffectOnEqual |
python | walkccc__LeetCode | solutions/1471. The k Strongest Values in an Array/1471.py | {
"start": 0,
"end": 357
} | class ____:
def getStrongest(self, arr: list[int], k: int) -> list[int]:
arr.sort()
ans = []
median = arr[(len(arr) - 1) // 2]
l = 0
r = len(arr) - 1
for _ in range(k):
if median - arr[l] > arr[r] - median:
ans.append(arr[l])
l -= 1
else:
ans.append(arr[r])
r += 1
return ans
| Solution |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 118129,
"end": 119836
} | class ____(ASTBase):
def __init__(self, type: ASTType, init: ASTType) -> None:
assert type
self.type = type
self.init = init
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTemplateParamConstrainedTypeWithInit):
return NotImplemented
return self.type == other.type and self.init == other.init
def __hash__(self) -> int:
return hash((self.type, self.init))
@property
def name(self) -> ASTNestedName:
return self.type.name
@property
def isPack(self) -> bool:
return self.type.isPack
def get_id(
self, version: int, objectType: str | None = None, symbol: Symbol | None = None
) -> str:
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=False)
else:
return self.type.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.type)
if self.init:
res += ' = '
res += transform(self.init)
return res
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation('=', '=')
signode += addnodes.desc_sig_space()
self.init.describe_signature(signode, mode, env, symbol)
| ASTTemplateParamConstrainedTypeWithInit |
python | keon__algorithms | algorithms/tree/bst/height.py | {
"start": 974,
"end": 1441
} | class ____(unittest.TestCase):
def setUp(self):
self.tree = bst()
self.tree.insert(9)
self.tree.insert(6)
self.tree.insert(12)
self.tree.insert(3)
self.tree.insert(8)
self.tree.insert(10)
self.tree.insert(15)
self.tree.insert(7)
self.tree.insert(18)
def test_height(self):
self.assertEqual(4, height(self.tree.root))
if __name__ == '__main__':
unittest.main()
| TestSuite |
python | django__django | tests/handlers/tests.py | {
"start": 10792,
"end": 11731
} | class ____(SimpleTestCase):
def test_get_script_name(self):
# Regression test for #23173
# Test first without PATH_INFO
script_name = get_script_name({"SCRIPT_URL": "/foobar/"})
self.assertEqual(script_name, "/foobar/")
script_name = get_script_name({"SCRIPT_URL": "/foobar/", "PATH_INFO": "/"})
self.assertEqual(script_name, "/foobar")
def test_get_script_name_double_slashes(self):
"""
WSGI squashes multiple successive slashes in PATH_INFO, get_script_name
should take that into account when forming SCRIPT_NAME (#17133).
"""
script_name = get_script_name(
{
"SCRIPT_URL": "/mst/milestones//accounts/login//help",
"PATH_INFO": "/milestones/accounts/login/help",
}
)
self.assertEqual(script_name, "/mst")
@override_settings(ROOT_URLCONF="handlers.urls")
| ScriptNameTests |
python | pypa__virtualenv | src/virtualenv/util/lock.py | {
"start": 348,
"end": 1525
} | class ____(FileLock):
def __init__(self, lock_file) -> None:
parent = os.path.dirname(lock_file)
if not os.path.isdir(parent):
with suppress(OSError):
os.makedirs(parent)
super().__init__(lock_file)
self.count = 0
self.thread_safe = RLock()
def acquire(self, timeout=None, poll_interval=0.05):
if not self.thread_safe.acquire(timeout=-1 if timeout is None else timeout):
raise Timeout(self.lock_file)
if self.count == 0:
try:
super().acquire(timeout, poll_interval)
except BaseException:
self.thread_safe.release()
raise
self.count += 1
def release(self, force=False): # noqa: FBT002
with self.thread_safe:
if self.count > 0:
if self.count == 1:
super().release(force=force)
self.count -= 1
if self.count == 0:
# if we have no more users of this lock, release the thread lock
self.thread_safe.release()
_lock_store = {}
_store_lock = Lock()
| _CountedFileLock |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 46042,
"end": 46116
} | class ____(Exception):
pass
## Error Checking ##
| NVMLLibraryMismatchError |
python | django__django | tests/gis_tests/gdal_tests/test_driver.py | {
"start": 717,
"end": 1932
} | class ____(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid GDAL/OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid GDAL/OGR Data Source Drivers."
for i in invalid_drivers:
with self.assertRaises(GDALException):
Driver(i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
@mock.patch("django.contrib.gis.gdal.driver.capi.get_driver_count")
@mock.patch("django.contrib.gis.gdal.driver.capi.register_all")
def test_registered(self, reg, count):
"""
Prototypes are registered only if the driver count is zero.
"""
def check(count_val):
reg.reset_mock()
count.return_value = count_val
Driver.ensure_registered()
if count_val:
self.assertFalse(reg.called)
else:
reg.assert_called_once_with()
check(0)
check(120)
| DriverTest |
python | pytorch__pytorch | torch/fx/experimental/partitioner_utils.py | {
"start": 2426,
"end": 2710
} | class ____(NamedTuple):
# Sum of all nodes' memory latency on the critical path
mem_latency_sec: float
# Sum of all nodes' compute latency on the critical path
computer_latency_sec: float
# Latency of the critical path
overall_latency_sec: float
| PartitionLatency |
python | spyder-ide__spyder | spyder/config/user.py | {
"start": 35776,
"end": 35850
} | class ____(UserConfig):
"""Plugin configuration handler."""
| PluginConfig |
python | urllib3__urllib3 | test/test_util.py | {
"start": 37763,
"end": 43233
} | class ____:
"""Test utils that use an SSL backend."""
@pytest.mark.parametrize(
"candidate, requirements",
[
(None, ssl.CERT_REQUIRED),
(ssl.CERT_NONE, ssl.CERT_NONE),
(ssl.CERT_REQUIRED, ssl.CERT_REQUIRED),
("REQUIRED", ssl.CERT_REQUIRED),
("CERT_REQUIRED", ssl.CERT_REQUIRED),
],
)
def test_resolve_cert_reqs(
self, candidate: int | str | None, requirements: int
) -> None:
assert resolve_cert_reqs(candidate) == requirements
@pytest.mark.parametrize(
"candidate, version",
[
(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1),
("PROTOCOL_TLSv1", ssl.PROTOCOL_TLSv1),
("TLSv1", ssl.PROTOCOL_TLSv1),
(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23),
],
)
def test_resolve_ssl_version(self, candidate: int | str, version: int) -> None:
assert resolve_ssl_version(candidate) == version
def test_ssl_wrap_socket_loads_the_cert_chain(self) -> None:
socket = Mock()
mock_context = Mock()
ssl_wrap_socket(
ssl_context=mock_context, sock=socket, certfile="/path/to/certfile"
)
mock_context.load_cert_chain.assert_called_once_with("/path/to/certfile", None)
@patch("urllib3.util.ssl_.create_urllib3_context")
def test_ssl_wrap_socket_creates_new_context(
self, create_urllib3_context: mock.MagicMock
) -> None:
socket = Mock()
ssl_wrap_socket(socket, cert_reqs=ssl.CERT_REQUIRED)
create_urllib3_context.assert_called_once_with(None, 2, ciphers=None)
def test_ssl_wrap_socket_loads_verify_locations(self) -> None:
socket = Mock()
mock_context = Mock()
ssl_wrap_socket(ssl_context=mock_context, ca_certs="/path/to/pem", sock=socket)
mock_context.load_verify_locations.assert_called_once_with(
"/path/to/pem", None, None
)
def test_ssl_wrap_socket_loads_certificate_directories(self) -> None:
socket = Mock()
mock_context = Mock()
ssl_wrap_socket(
ssl_context=mock_context, ca_cert_dir="/path/to/pems", sock=socket
)
mock_context.load_verify_locations.assert_called_once_with(
None, "/path/to/pems", None
)
def test_ssl_wrap_socket_loads_certificate_data(self) -> None:
socket = Mock()
mock_context = Mock()
ssl_wrap_socket(
ssl_context=mock_context, ca_cert_data="TOTALLY PEM DATA", sock=socket
)
mock_context.load_verify_locations.assert_called_once_with(
None, None, "TOTALLY PEM DATA"
)
def _wrap_socket_and_mock_warn(
self, sock: socket.socket, server_hostname: str | None
) -> tuple[Mock, MagicMock]:
mock_context = Mock()
with patch("warnings.warn") as warn:
ssl_wrap_socket(
ssl_context=mock_context,
sock=sock,
server_hostname=server_hostname,
)
return mock_context, warn
def test_ssl_wrap_socket_sni_ip_address_no_warn(self) -> None:
"""Test that a warning is not made if server_hostname is an IP address."""
sock = Mock()
context, warn = self._wrap_socket_and_mock_warn(sock, "8.8.8.8")
context.wrap_socket.assert_called_once_with(sock, server_hostname="8.8.8.8")
warn.assert_not_called()
def test_ssl_wrap_socket_sni_none_no_warn(self) -> None:
"""Test that a warning is not made if server_hostname is not given."""
sock = Mock()
context, warn = self._wrap_socket_and_mock_warn(sock, None)
context.wrap_socket.assert_called_once_with(sock, server_hostname=None)
warn.assert_not_called()
@pytest.mark.parametrize(
"openssl_version, openssl_version_number, implementation_name, version_info, pypy_version_info, reliable",
[
# OpenSSL and Python OK -> reliable
("OpenSSL 1.1.1", 0x101010CF, "cpython", (3, 9, 3), None, True),
# Python OK -> reliable
("OpenSSL 1.1.1", 0x10101000, "cpython", (3, 9, 3), None, True),
# PyPy: depends on the version
("OpenSSL 1.1.1", 0x10101000, "pypy", (3, 9, 9), (7, 3, 7), False),
("OpenSSL 1.1.1", 0x101010CF, "pypy", (3, 9, 19), (7, 3, 16), True),
# OpenSSL OK -> reliable
("OpenSSL 1.1.1", 0x101010CF, "cpython", (3, 9, 2), None, True),
# not OpenSSSL -> unreliable
("LibreSSL 2.8.3", 0x101010CF, "cpython", (3, 10, 0), None, False),
# old OpenSSL and old Python, unreliable
("OpenSSL 1.1.0", 0x10101000, "cpython", (3, 9, 2), None, False),
],
)
def test_is_has_never_check_common_name_reliable(
self,
openssl_version: str,
openssl_version_number: int,
implementation_name: str,
version_info: _TYPE_VERSION_INFO,
pypy_version_info: _TYPE_VERSION_INFO | None,
reliable: bool,
) -> None:
assert (
_is_has_never_check_common_name_reliable(
openssl_version,
openssl_version_number,
implementation_name,
version_info,
pypy_version_info,
)
== reliable
)
idna_blocker = ImportBlocker("idna")
module_stash = ModuleStash("urllib3")
| TestUtilSSL |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol41.py | {
"start": 472,
"end": 556
} | class ____(Protocol):
def __buffer__(self, __flags: int) -> memoryview: ...
| Buffer |
python | mwaskom__seaborn | seaborn/_base.py | {
"start": 56600,
"end": 66543
} | class ____(UserString):
"""
Prevent comparisons elsewhere in the library from using the wrong name.
Errors are simple assertions because users should not be able to trigger
them. If that changes, they should be more verbose.
"""
# TODO we can replace this with typing.Literal on Python 3.8+
allowed = "numeric", "datetime", "categorical"
def __init__(self, data):
assert data in self.allowed, data
super().__init__(data)
def __eq__(self, other):
assert other in self.allowed, other
return self.data == other
def variable_type(vector, boolean_type="numeric"):
"""
Determine whether a vector contains numeric, categorical, or datetime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
boolean_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
vector = pd.Series(vector)
# If a categorical dtype is set, infer categorical
if isinstance(vector.dtype, pd.CategoricalDtype):
return VariableType("categorical")
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return VariableType("numeric")
# At this point, drop nans to simplify further type inference
vector = vector.dropna()
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
try:
if np.isin(vector, [0, 1]).all():
return VariableType(boolean_type)
except TypeError:
# .isin comparison is not guaranteed to be possible under NumPy
# casting rules, depending on the (unknown) dtype of 'vector'
pass
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return VariableType("numeric")
if pd.api.types.is_datetime64_dtype(vector):
return VariableType("datetime")
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return VariableType("numeric")
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return VariableType("datetime")
# Otherwise, our final fallback is to consider things categorical
return VariableType("categorical")
def infer_orient(x=None, y=None, orient=None, require_numeric=True):
"""Determine how the plot should be oriented based on the data.
For historical reasons, the convention is to call a plot "horizontally"
or "vertically" oriented based on the axis representing its dependent
variable. Practically, this is used when determining the axis for
numerical aggregation.
Parameters
----------
x, y : Vector data or None
Positional data vectors for the plot.
orient : string or None
Specified orientation. If not None, can be "x" or "y", or otherwise
must start with "v" or "h".
require_numeric : bool
If set, raise when the implied dependent variable is not numeric.
Returns
-------
orient : "x" or "y"
Raises
------
ValueError: When `orient` is an unknown string.
TypeError: When dependent variable is not numeric, with `require_numeric`
"""
x_type = None if x is None else variable_type(x)
y_type = None if y is None else variable_type(y)
nonnumeric_dv_error = "{} orientation requires numeric `{}` variable."
single_var_warning = "{} orientation ignored with only `{}` specified."
if x is None:
if str(orient).startswith("h"):
warnings.warn(single_var_warning.format("Horizontal", "y"))
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "x"
elif y is None:
if str(orient).startswith("v"):
warnings.warn(single_var_warning.format("Vertical", "x"))
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "y"
elif str(orient).startswith("v") or orient == "x":
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "x"
elif str(orient).startswith("h") or orient == "y":
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "y"
elif orient is not None:
err = (
"`orient` must start with 'v' or 'h' or be None, "
f"but `{repr(orient)}` was passed."
)
raise ValueError(err)
elif x_type != "categorical" and y_type == "categorical":
return "y"
elif x_type != "numeric" and y_type == "numeric":
return "x"
elif x_type == "numeric" and y_type != "numeric":
return "y"
elif require_numeric and "numeric" not in (x_type, y_type):
err = "Neither the `x` nor `y` variable appears to be numeric."
raise TypeError(err)
else:
return "x"
def unique_dashes(n):
"""Build an arbitrarily long list of unique dash styles for lines.
Parameters
----------
n : int
Number of unique dash specs to generate.
Returns
-------
dashes : list of strings or tuples
Valid arguments for the ``dashes`` parameter on
:class:`matplotlib.lines.Line2D`. The first spec is a solid
line (``""``), the remainder are sequences of long and short
dashes.
"""
# Start with dash specs that are well distinguishable
dashes = [
"",
(4, 1.5),
(1, 1),
(3, 1.25, 1.5, 1.25),
(5, 1, 1, 1),
]
# Now programmatically build as many as we need
p = 3
while len(dashes) < n:
# Take combinations of long and short dashes
a = itertools.combinations_with_replacement([3, 1.25], p)
b = itertools.combinations_with_replacement([4, 1], p)
# Interleave the combinations, reversing one of the streams
segment_list = itertools.chain(*zip(
list(a)[1:-1][::-1],
list(b)[1:-1]
))
# Now insert the gaps
for segments in segment_list:
gap = min(segments)
spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
dashes.append(spec)
p += 1
return dashes[:n]
def unique_markers(n):
"""Build an arbitrarily long list of unique marker styles for points.
Parameters
----------
n : int
Number of unique marker specs to generate.
Returns
-------
markers : list of string or tuples
Values for defining :class:`matplotlib.markers.MarkerStyle` objects.
All markers will be filled.
"""
# Start with marker specs that are well distinguishable
markers = [
"o",
"X",
(4, 0, 45),
"P",
(4, 0, 0),
(4, 1, 0),
"^",
(4, 1, 45),
"v",
]
# Now generate more from regular polygons of increasing order
s = 5
while len(markers) < n:
a = 360 / (s + 1) / 2
markers.extend([
(s + 1, 1, a),
(s + 1, 0, a),
(s, 1, 0),
(s, 0, 0),
])
s += 1
# Convert to MarkerStyle object, using only exactly what we need
# markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]
return markers[:n]
def categorical_order(vector, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
vector : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(vector, "categories"):
order = vector.categories
else:
try:
order = vector.cat.categories
except (TypeError, AttributeError):
order = pd.Series(vector).unique()
if variable_type(vector) == "numeric":
order = np.sort(order)
order = filter(pd.notnull, order)
return list(order)
| VariableType |
python | Netflix__metaflow | test/unit/inheritance/test_inheritance.py | {
"start": 4012,
"end": 6302
} | class ____:
"""Test FlowMutator in base class using config from derived class"""
def test_flow_completes(self, mutator_with_derived_config_run):
"""Test that flow completes successfully"""
assert mutator_with_derived_config_run.successful
assert mutator_with_derived_config_run.finished
def test_all_parameters_accessible(self, mutator_with_derived_config_run):
"""Test that all parameters from hierarchy are accessible"""
start_task = mutator_with_derived_config_run["start"].task
assert start_task["result_base_param"].data == "base_value"
assert start_task["result_middle_param"].data == 200
assert start_task["result_final_param"].data == 999
def test_all_configs_accessible(self, mutator_with_derived_config_run):
"""Test that all configs from hierarchy are accessible"""
start_task = mutator_with_derived_config_run["start"].task
middle_config = start_task["result_middle_config"].data
assert middle_config["env"] == "staging"
runtime_config = start_task["result_runtime_config"].data
assert runtime_config["features"] == ["logging", "metrics"]
assert runtime_config["worker_count"] == 16
def test_base_mutator_uses_derived_config(self, mutator_with_derived_config_run):
"""Test that base class mutator injects parameters from derived config"""
start_task = mutator_with_derived_config_run["start"].task
# These parameters should be injected by base mutator using derived runtime_config
assert start_task["result_feature_logging"].data is True
assert start_task["result_feature_metrics"].data is True
assert start_task["result_worker_count"].data == 16
def test_computation_with_forward_injected_params(
self, mutator_with_derived_config_run
):
"""Test computation using parameters injected from derived config"""
start_task = mutator_with_derived_config_run["start"].task
# result_computation = worker_count * enabled_features + final_param
# enabled_features = feature_logging (True=1) + feature_metrics (True=1) = 2
# = 16 * 2 + 999 = 1031
assert start_task["result_computation"].data == 1031
| TestMutatorWithDerivedConfig |
python | getsentry__sentry | tests/acceptance/test_create_organization.py | {
"start": 151,
"end": 1134
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.login_as(self.user)
def test_simple(self) -> None:
settings.PRIVACY_URL = "https://sentry.io/privacy/"
settings.TERMS_URL = "https://sentry.io/terms/"
self.browser.get("/organizations/new/")
assert self.browser.wait_until('input[name="name"]')
assert self.browser.element_exists('input[name="name"]')
assert self.browser.element_exists('input[name="agreeTerms"]')
self.browser.element('input[name="name"]').send_keys("new org")
self.browser.element('input[name="agreeTerms"]').click()
self.browser.click('button[type="submit"]')
# After creating an org should end up on create project
self.browser.wait_until_test_id("platform-javascript-react")
assert self.browser.element_exists_by_test_id("create-project")
| CreateOrganizationTest |
python | python__mypy | mypy/report.py | {
"start": 15074,
"end": 16577
} | class ____(AbstractReporter):
"""Exact line coverage reporter.
This reporter writes a JSON dictionary with one field 'lines' to
the file 'coverage.json' in the specified report directory. The
value of that field is a dictionary which associates to each
source file's absolute pathname the list of line numbers that
belong to typed functions in that file.
"""
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.lines_covered: dict[str, list[int]] = {}
def on_file(
self,
tree: MypyFile,
modules: dict[str, MypyFile],
type_map: dict[Expression, Type],
options: Options,
) -> None:
if os.path.isdir(tree.path): # can happen with namespace packages
return
with open(tree.path) as f:
tree_source = f.readlines()
coverage_visitor = LineCoverageVisitor(tree_source)
tree.accept(coverage_visitor)
covered_lines = []
for line_number, (_, typed) in enumerate(coverage_visitor.lines_covered):
if typed:
covered_lines.append(line_number + 1)
self.lines_covered[os.path.abspath(tree.path)] = covered_lines
def on_finish(self) -> None:
with open(os.path.join(self.output_dir, "coverage.json"), "w") as f:
json.dump({"lines": self.lines_covered}, f)
register_reporter("linecoverage", LineCoverageReporter)
| LineCoverageReporter |
python | faif__python-patterns | patterns/structural/mvc.py | {
"start": 664,
"end": 1425
} | class ____(Model):
"""The Model is the data layer of the application."""
class Price(float):
"""A polymorphic way to pass a float with a particular
__str__ functionality."""
def __str__(self) -> str:
return f"{self:.2f}"
products = {
"milk": {"price": Price(1.50), "quantity": 10},
"eggs": {"price": Price(0.20), "quantity": 100},
"cheese": {"price": Price(2.00), "quantity": 10},
}
item_type = "product"
def __iter__(self) -> Any:
yield from self.products
def get(self, product: str) -> dict:
try:
return self.products[product]
except KeyError as e:
raise KeyError(str(e) + " not in the model's item list.")
| ProductModel |
python | pytorch__pytorch | torch/_inductor/autotune_process.py | {
"start": 29808,
"end": 32123
} | class ____(GPUDeviceBenchmarkMixin, BenchmarkRequest):
"""Benchmark request for CuteDSL (CUTLASS Python DSL) kernels."""
def __init__(
self,
kernel_name: str,
input_tensor_meta: Union[TensorMeta, list[TensorMeta]],
output_tensor_meta: Union[TensorMeta, list[TensorMeta]],
extra_args: tuple[Any, ...],
source_code: PartialRender,
) -> None:
super().__init__(kernel_name, input_tensor_meta, output_tensor_meta, extra_args)
finalized_code = source_code.finalize_all()
self.module_cache_key, self.module_path = PyCodeCache.write(finalized_code)
def make_run_fn(
self, *input_tensors: torch.Tensor, out: torch.Tensor
) -> Callable[[], None]:
"""
Create a function to run the CuteDSL kernel with the given input and output tensors.
Similar to TritonBenchmarkRequest.make_run_fn but for CuteDSL kernels.
"""
mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path)
# Logic replicated async_compile
from .codegen.cutedsl.cutedsl_kernel import MAIN_SUFFIX
main_func_name = f"{self.kernel_name}_{MAIN_SUFFIX}"
if not hasattr(mod, main_func_name):
available = [name for name in dir(mod) if callable(getattr(mod, name))]
raise RuntimeError(
f"Could not find CuteDSL main kernel function '{main_func_name}'. Available callables: {available}"
)
kernel_func = getattr(mod, main_func_name)
def run_kernel():
device_interface = get_interface_for_device("cuda")
stream = device_interface.get_raw_stream(out.device.index)
return kernel_func(*input_tensors, out, stream=stream)
return run_kernel
def cleanup_run_fn(self) -> None:
"""Clean up any resources used by the kernel."""
@functools.cache
def get_tuning_process_pool() -> TuningProcessPool:
pool = TuningProcessPool()
atexit.register(pool.shutdown)
return pool
def benchmark_in_sub_process(
choices: list[TritonTemplateCaller],
) -> dict[TritonTemplateCaller, float]:
"""
Do benchmarking in a subprocess and return the perf number (latency).
"""
return get_tuning_process_pool().benchmark(choices)
| CuteDSLBenchmarkRequest |
python | doocs__leetcode | solution/0200-0299/0249.Group Shifted Strings/Solution.py | {
"start": 0,
"end": 418
} | class ____:
def groupStrings(self, strings: List[str]) -> List[List[str]]:
g = defaultdict(list)
for s in strings:
diff = ord(s[0]) - ord("a")
t = []
for c in s:
c = ord(c) - diff
if c < ord("a"):
c += 26
t.append(chr(c))
g["".join(t)].append(s)
return list(g.values())
| Solution |
python | django__django | django/contrib/auth/models.py | {
"start": 18126,
"end": 21304
} | class ____:
id = None
pk = None
username = ""
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __str__(self):
return "AnonymousUser"
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return 1 # instances always return the same hash value
def __int__(self):
raise TypeError(
"Cannot cast AnonymousUser to int. Are you trying to use it in place of "
"User?"
)
def save(self):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
def delete(self):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
def set_password(self, raw_password):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
def check_password(self, raw_password):
raise NotImplementedError(
"Django doesn't provide a DB representation for AnonymousUser."
)
@property
def groups(self):
return self._groups
@property
def user_permissions(self):
return self._user_permissions
def get_user_permissions(self, obj=None):
return _user_get_permissions(self, obj, "user")
async def aget_user_permissions(self, obj=None):
return await _auser_get_permissions(self, obj, "user")
def get_group_permissions(self, obj=None):
return set()
async def aget_group_permissions(self, obj=None):
return self.get_group_permissions(obj)
def get_all_permissions(self, obj=None):
return _user_get_permissions(self, obj, "all")
async def aget_all_permissions(self, obj=None):
return await _auser_get_permissions(self, obj, "all")
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
async def ahas_perm(self, perm, obj=None):
return await _auser_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
return all(self.has_perm(perm, obj) for perm in perm_list)
async def ahas_perms(self, perm_list, obj=None):
if not isinstance(perm_list, Iterable) or isinstance(perm_list, str):
raise ValueError("perm_list must be an iterable of permissions.")
for perm in perm_list:
if not await self.ahas_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
async def ahas_module_perms(self, module):
return await _auser_has_module_perms(self, module)
@property
def is_anonymous(self):
return True
@property
def is_authenticated(self):
return False
def get_username(self):
return self.username
| AnonymousUser |
python | encode__django-rest-framework | tests/test_serializer.py | {
"start": 7640,
"end": 8669
} | class ____:
def test_non_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError('Non field error')
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['Non field error']}
def test_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError({'char': 'Field error'})
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'char': ['Field error']}
| TestValidateMethod |
python | kamyu104__LeetCode-Solutions | Python/bulls-and-cows.py | {
"start": 651,
"end": 958
} | class ____(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
A = sum(imap(operator.eq, secret, guess))
B = sum((Counter(secret) & Counter(guess)).values()) - A
return "%dA%dB" % (A, B)
| Solution2 |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 36216,
"end": 36266
} | class ____(SpmConverter):
pass
| ReformerConverter |
python | great-expectations__great_expectations | great_expectations/render/components.py | {
"start": 6507,
"end": 7222
} | class ____(RenderedContent):
def __init__(self, content_block_type, styling=None) -> None:
self.content_block_type = content_block_type
if styling is None:
styling = {}
self.styling = styling
@override
def to_json_dict(self) -> dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this RenderedComponentContent.
Returns:
A JSON-serializable dict representation of this RenderedComponentContent.
"""
d = super().to_json_dict()
d["content_block_type"] = self.content_block_type
if len(self.styling) > 0:
d["styling"] = self.styling
return d
| RenderedComponentContent |
python | ansible__ansible | lib/ansible/plugins/strategy/linear.py | {
"start": 2025,
"end": 17713
} | class ____(StrategyBase):
def _get_next_task_lockstep(self, hosts: list[Host], iterator: PlayIterator) -> list[tuple[Host, Task]]:
"""
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
"""
state_task_per_host = {}
for host in hosts:
state, task = iterator.get_next_task_for_host(host, peek=True)
if task is not None:
state_task_per_host[host] = state, task
if not state_task_per_host:
return []
task_uuids = {t._uuid for s, t in state_task_per_host.values()}
_loop_cnt = 0
while _loop_cnt <= 1:
try:
cur_task = iterator.all_tasks[iterator.cur_task]
except IndexError:
# pick up any tasks left after clear_host_errors
iterator.cur_task = 0
_loop_cnt += 1
else:
iterator.cur_task += 1
if cur_task._uuid in task_uuids:
break
else:
# prevent infinite loop
raise AnsibleAssertionError(
'BUG: There seems to be a mismatch between tasks in PlayIterator and HostStates.'
)
host_tasks = []
for host, (state, task) in state_task_per_host.items():
if cur_task._uuid == task._uuid:
iterator.set_state_for_host(host.name, state)
host_tasks.append((host, task))
if cur_task._get_meta() == 'flush_handlers':
iterator.all_tasks[iterator.cur_task:iterator.cur_task] = [h for b in iterator._play.handlers for h in b.block]
return host_tasks
def run(self, iterator, play_context: PlayContext): # type: ignore[override]
"""
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
"""
# iterate over each task, while there is one left to run
result = int(self._tqm.RUN_OK)
work_to_do = True
self._set_hosts_cache(iterator._play)
while work_to_do and not self._tqm._terminated:
try:
display.debug("getting the remaining hosts for this loop")
hosts_left = self.get_hosts_left(iterator)
display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
# flag set if task is set to any_errors_fatal
any_errors_fatal = False
results: list[_task_result._RawTaskResult] = []
for (host, task) in host_tasks:
if self._tqm._terminated:
break
run_once = False
work_to_do = True
host_name = host.get_name()
display.debug("getting variables")
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = TemplateEngine(loader=self._loader, variables=task_vars)
display.debug("done getting variables")
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
task_action = templar.template(task.action)
except AnsibleValueOmittedError:
raise AnsibleParserError("Omit is not valid for the `action` keyword.", obj=task.action) from None
try:
action = action_loader.get(task_action, class_only=True, collection_list=task.collections)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
if task_action in C._ACTION_META:
# for the linear strategy, we run meta tasks just once and for
# all hosts currently being iterated over rather than one host
results.extend(self._execute_meta(task, play_context, iterator, host))
if task._get_meta() not in ('noop', 'reset_connection', 'end_host', 'role_complete', 'flush_handlers', 'end_role'):
run_once = True
if (task.any_errors_fatal or run_once) and not task.ignore_errors:
any_errors_fatal = True
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
run_once = action and getattr(action, 'BYPASS_HOST_LOOP', False) or templar.template(task.run_once)
if (task.any_errors_fatal or run_once) and not task.ignore_errors:
any_errors_fatal = True
if not callback_sent:
task.post_validate_attribute("name", templar=templar)
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
callback_sent = True
self._blocked_hosts[host_name] = True
self._queue_task(host, task, task_vars, play_context)
del task_vars
if isinstance(task, Handler):
if run_once:
task.clear_hosts()
else:
task.remove_host(host)
# if we're bypassing the host loop, break out now
if run_once:
break
results.extend(self._process_pending_results(iterator, max_passes=max(1, int(len(self._tqm._workers) * 0.1))))
# go to next host/task group
if skip_rest:
continue
display.debug("done queuing things up, now waiting for results queue to drain")
if self._pending_results > 0:
results.extend(self._wait_on_pending_results(iterator))
self.update_active_connections(results)
included_files = IncludedFile.process_include_results(
results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
if len(included_files) > 0:
display.debug("we have included files to process")
display.debug("generating all_blocks data")
all_blocks = dict((host, []) for host in hosts_left)
display.debug("done generating all_blocks data")
included_tasks = []
failed_includes_hosts = set()
for included_file in included_files:
display.debug("processing included file: %s" % included_file._filename)
is_handler = False
try:
if included_file._is_role:
new_ir = self._copy_included_file(included_file)
new_blocks, handler_blocks = new_ir.get_block_list(
play=iterator._play,
variable_manager=self._variable_manager,
loader=self._loader,
)
else:
is_handler = isinstance(included_file._task, Handler)
new_blocks = self._load_included_file(
included_file,
iterator=iterator,
is_handler=is_handler,
)
# let PlayIterator know about any new handlers included via include_role or
# import_role within include_role/include_taks
iterator.handlers = [h for b in iterator._play.handlers for h in b.block]
display.debug("iterating over new_blocks loaded from include file")
for new_block in new_blocks:
if is_handler:
for task in new_block.block:
task.notified_hosts = included_file._hosts[:]
final_block = new_block
else:
task_vars = self._variable_manager.get_vars(
play=iterator._play,
task=new_block.get_first_parent_include(),
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all,
)
display.debug("filtering new block on tags")
final_block = new_block.filter_tagged_tasks(task_vars)
display.debug("done filtering new block on tags")
included_tasks.extend(final_block.get_tasks())
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(final_block)
display.debug("done iterating over new_blocks loaded from include file")
except AnsibleParserError:
raise
except AnsibleError as ex:
# FIXME: send the error to the callback; don't directly write to display here
display.error(ex)
for r in included_file._results:
r._return_data['failed'] = True
r._return_data['reason'] = str(ex)
self._tqm._stats.increment('failures', r.host.name)
self._tqm.send_callback('v2_runner_on_failed', r)
failed_includes_hosts.add(r.host)
else:
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_playbook_on_include', included_file)
for host in failed_includes_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
# finally go through all of the hosts and append the
# accumulated blocks to their list of tasks
display.debug("extending task lists for all hosts with included blocks")
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
iterator.all_tasks[iterator.cur_task:iterator.cur_task] = included_tasks
display.debug("done extending task lists")
display.debug("done processing included files")
display.debug("results queue empty")
display.debug("checking for any_errors_fatal")
failed_hosts = []
unreachable_hosts = []
for res in results:
if res.is_failed():
failed_hosts.append(res.host.name)
elif res.is_unreachable():
unreachable_hosts.append(res.host.name)
if any_errors_fatal and (failed_hosts or unreachable_hosts):
for host in hosts_left:
if host.name not in failed_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
display.debug("done checking for any_errors_fatal")
display.debug("checking for max_fail_percentage")
if iterator._play.max_fail_percentage is not None and len(results) > 0:
percentage = iterator._play.max_fail_percentage / 100.0
if (len(self._tqm._failed_hosts) / iterator.batch_size) > percentage:
for host in hosts_left:
# don't double-mark hosts, or the iterator will potentially
# fail them out of the rescue/always states
if host.name not in failed_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result |= self._tqm.RUN_FAILED_BREAK_PLAY
display.debug('(%s failed / %s total )> %s max fail' % (len(self._tqm._failed_hosts), iterator.batch_size, percentage))
display.debug("done checking for max_fail_percentage")
display.debug("checking to see if all hosts have failed and the running result is not ok")
if result != self._tqm.RUN_OK and len(self._tqm._failed_hosts) >= len(hosts_left):
display.debug("^ not ok, so returning result now")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
return result
display.debug("done checking to see if all hosts have failed")
finally:
# removed unnecessary exception handler, don't want to mis-attribute the entire code block by changing indentation
pass
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| StrategyModule |
python | langchain-ai__langchain | libs/partners/anthropic/tests/integration_tests/test_chat_models.py | {
"start": 31608,
"end": 35772
} | class ____(TypedDict):
"""Person data as a TypedDict."""
name: str
age: int
nicknames: list[str] | None
@pytest.mark.parametrize("schema", [Person, Person.model_json_schema(), PersonDict])
def test_response_format(schema: dict | type) -> None:
model = ChatAnthropic(
model="claude-sonnet-4-5", # type: ignore[call-arg]
betas=["structured-outputs-2025-11-13"],
)
query = "Chester (a.k.a. Chet) is 100 years old."
response = model.invoke(query, response_format=schema)
parsed = json.loads(response.text)
if isinstance(schema, type) and issubclass(schema, BaseModel):
schema.model_validate(parsed)
else:
assert isinstance(parsed, dict)
assert parsed["name"]
assert parsed["age"]
def test_response_format_in_agent() -> None:
class Weather(BaseModel):
temperature: float
units: str
# no tools
agent = create_agent(
"anthropic:claude-sonnet-4-5", response_format=ProviderStrategy(Weather)
)
result = agent.invoke({"messages": [{"role": "user", "content": "75 degrees F."}]})
assert len(result["messages"]) == 2
parsed = json.loads(result["messages"][-1].text)
assert Weather(**parsed) == result["structured_response"]
# with tools
def get_weather(location: str) -> str:
"""Get the weather at a location."""
return "75 degrees Fahrenheit."
agent = create_agent(
"anthropic:claude-sonnet-4-5",
tools=[get_weather],
response_format=ProviderStrategy(Weather),
)
result = agent.invoke(
{"messages": [{"role": "user", "content": "What's the weather in SF?"}]},
)
assert len(result["messages"]) == 4
assert result["messages"][1].tool_calls
parsed = json.loads(result["messages"][-1].text)
assert Weather(**parsed) == result["structured_response"]
@pytest.mark.vcr
def test_strict_tool_use() -> None:
model = ChatAnthropic(
model="claude-sonnet-4-5", # type: ignore[call-arg]
betas=["structured-outputs-2025-11-13"],
)
def get_weather(location: str, unit: Literal["C", "F"]) -> str:
"""Get the weather at a location."""
return "75 degrees Fahrenheit."
model_with_tools = model.bind_tools([get_weather], strict=True)
response = model_with_tools.invoke("What's the weather in Boston, in Celsius?")
assert response.tool_calls
def test_get_num_tokens_from_messages() -> None:
llm = ChatAnthropic(model=MODEL_NAME) # type: ignore[call-arg]
# Test simple case
messages = [
SystemMessage(content="You are a scientist"),
HumanMessage(content="Hello, Claude"),
]
num_tokens = llm.get_num_tokens_from_messages(messages)
assert num_tokens > 0
# Test tool use
@tool(parse_docstring=True)
def get_weather(location: str) -> str:
"""Get the current weather in a given location.
Args:
location: The city and state, e.g. San Francisco, CA
"""
return "Sunny"
messages = [
HumanMessage(content="What's the weather like in San Francisco?"),
]
num_tokens = llm.get_num_tokens_from_messages(messages, tools=[get_weather])
assert num_tokens > 0
messages = [
HumanMessage(content="What's the weather like in San Francisco?"),
AIMessage(
content=[
{"text": "Let's see.", "type": "text"},
{
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"input": {"location": "SF"},
"name": "get_weather",
"type": "tool_use",
},
],
tool_calls=[
{
"name": "get_weather",
"args": {"location": "SF"},
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
"type": "tool_call",
},
],
),
ToolMessage(content="Sunny", tool_call_id="toolu_01V6d6W32QGGSmQm4BT98EKk"),
]
num_tokens = llm.get_num_tokens_from_messages(messages, tools=[get_weather])
assert num_tokens > 0
| PersonDict |
python | keras-team__keras | keras/src/ops/image.py | {
"start": 31612,
"end": 35050
} | class ____(Operation):
def __init__(self, order, fill_mode="constant", fill_value=0, *, name=None):
super().__init__(name=name)
self.order = order
self.fill_mode = fill_mode
self.fill_value = fill_value
def call(self, inputs, coordinates):
return backend.image.map_coordinates(
inputs,
coordinates,
order=self.order,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
def compute_output_spec(self, inputs, coordinates):
if coordinates.shape[0] != len(inputs.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`inputs`. "
f"Received inputs with shape: {inputs.shape} and coordinate "
f"leading dim of {coordinates.shape[0]}"
)
if len(coordinates.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinates.shape}"
)
return KerasTensor(coordinates.shape[1:], dtype=inputs.dtype)
@keras_export("keras.ops.image.map_coordinates")
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0
):
"""Map the input array to new coordinates by interpolation.
Note that interpolation near boundaries differs from the scipy function,
because we fixed an outstanding bug
[scipy/issues/2640](https://github.com/scipy/scipy/issues/2640).
Args:
inputs: The input array.
coordinates: The coordinates at which inputs is evaluated.
order: The order of the spline interpolation. The order must be `0` or
`1`. `0` indicates the nearest neighbor and `1` indicates the linear
interpolation.
fill_mode: Points outside the boundaries of the inputs are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"mirror"` and `"reflect"`. Defaults to
`"constant"`.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The inputs is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The inputs is extended by the nearest pixel.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The inputs is extended by wrapping around to the opposite edge.
- `"mirror"`: `(c d c b | a b c d | c b a b)`
The inputs is extended by mirroring about the edge.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The inputs is extended by reflecting about the edge of the last
pixel.
fill_value: Value used for points outside the boundaries of the inputs
if `fill_mode="constant"`. Defaults to `0`.
Returns:
Output input or batch of inputs.
"""
if any_symbolic_tensors((inputs, coordinates)):
return MapCoordinates(
order,
fill_mode,
fill_value,
).symbolic_call(inputs, coordinates)
return backend.image.map_coordinates(
inputs,
coordinates,
order,
fill_mode,
fill_value,
)
| MapCoordinates |
python | sympy__sympy | sympy/series/sequences.py | {
"start": 12320,
"end": 13279
} | class ____(SeqBase):
"""Sequence expression class.
Various sequences should inherit from this class.
Examples
========
>>> from sympy.series.sequences import SeqExpr
>>> from sympy.abc import x
>>> from sympy import Tuple
>>> s = SeqExpr(Tuple(1, 2, 3), Tuple(x, 0, 10))
>>> s.gen
(1, 2, 3)
>>> s.interval
Interval(0, 10)
>>> s.length
11
See Also
========
sympy.series.sequences.SeqPer
sympy.series.sequences.SeqFormula
"""
@property
def gen(self):
return self.args[0]
@property
def interval(self):
return Interval(self.args[1][1], self.args[1][2])
@property
def start(self):
return self.interval.inf
@property
def stop(self):
return self.interval.sup
@property
def length(self):
return self.stop - self.start + 1
@property
def variables(self):
return (self.args[1][0],)
| SeqExpr |
python | protocolbuffers__protobuf | python/google/protobuf/internal/python_message.py | {
"start": 24938,
"end": 55685
} | class ____(property):
__slots__ = ('DESCRIPTOR',)
def __init__(self, descriptor, getter, setter, doc):
property.__init__(self, getter, setter, doc=doc)
self.DESCRIPTOR = descriptor
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
RepeatedScalarFieldContainer or RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
def getter(self):
# TODO: This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def field_setter(self, new_value):
# pylint: disable=protected-access
# Testing the value for truthiness captures all of the implicit presence
# defaults (0, 0.0, enum 0, and False), except for -0.0.
try:
new_value = type_checker.CheckValue(new_value)
except TypeError as e:
raise TypeError(
'Cannot set %s to %.1024r: %s' % (field.full_name, new_value, e))
if not field.has_presence and decoder.IsDefaultScalarValue(new_value):
self._fields.pop(field, None)
else:
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# TODO: Remove duplication with similar method
# for non-repeated scalars.
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
if field.message_type.full_name == 'google.protobuf.Timestamp':
getter(self)
self._fields[field].FromDatetime(new_value)
elif field.message_type.full_name == 'google.protobuf.Duration':
getter(self)
self._fields[field].FromTimedelta(new_value)
elif field.message_type.full_name == _StructFullTypeName:
getter(self)
self._fields[field].Clear()
self._fields[field].update(new_value)
elif field.message_type.full_name == _ListValueFullTypeName:
getter(self)
self._fields[field].Clear()
self._fields[field].extend(new_value)
else:
raise AttributeError(
'Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name
)
# Add a property to encapsulate the getter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, _FieldProperty(field, getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extensions = descriptor.extensions_by_name
for extension_name, extension_field in extensions.items():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
# TODO: Migrate all users of these attributes to functions like
# pool.FindExtensionByNumber(descriptor).
if descriptor.file is not None:
# TODO: Use cls.MESSAGE_FACTORY.pool when available.
pool = descriptor.file.pool
def _AddStaticMethods(cls):
def RegisterExtension(_):
"""no-op to keep generated code <=4.23 working with new runtimes."""
# This was originally removed in 5.26 (cl/595989309).
pass
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].is_repeated:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in self._fields.items() if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
hassable_fields = {}
for field in message_descriptor.fields:
if field.is_repeated:
continue
# For proto3, only submessages and fields inside a oneof have presence.
if not field.has_presence:
continue
hassable_fields[field.name] = field
# Has methods are supported for oneof descriptors.
for oneof in message_descriptor.oneofs:
hassable_fields[oneof.name] = oneof
def HasField(self, field_name):
try:
field = hassable_fields[field_name]
except KeyError as exc:
raise ValueError('Protocol message %s has no non-repeated field "%s" '
'nor has presence is not available for this field.' % (
message_descriptor.full_name, field_name)) from exc
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name))
if field in self._fields:
# To match the C++ implementation, we need to invalidate iterators
# for map fields when ClearField() happens.
if hasattr(self._fields[field], 'InvalidateIterators'):
self._fields[field].InvalidateIterators()
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, field_descriptor):
extension_dict._VerifyExtensionHandle(self, field_descriptor)
# Similar to ClearField(), above.
if field_descriptor in self._fields:
del self._fields[field_descriptor]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, field_descriptor):
extension_dict._VerifyExtensionHandle(self, field_descriptor)
if field_descriptor.is_repeated:
raise KeyError('"%s" is repeated.' % field_descriptor.full_name)
if field_descriptor.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field_descriptor)
return value is not None and value._is_present_in_parent
else:
return field_descriptor in self._fields
cls.HasExtension = HasExtension
def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO: Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO: For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
# Unable to import message_factory at top because of circular import.
# pylint: disable=g-import-not-at-top
from google.protobuf import message_factory
message_class = message_factory.GetMessageClass(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if self.DESCRIPTOR.full_name == _ListValueFullTypeName and isinstance(
other, list
):
return self._internal_compare(other)
if self.DESCRIPTOR.full_name == _StructFullTypeName and isinstance(
other, dict
):
return self._internal_compare(other)
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return NotImplemented
if self is other:
return True
if self.DESCRIPTOR.full_name == _AnyFullTypeName:
any_a = _InternalUnpackAny(self)
any_b = _InternalUnpackAny(other)
if any_a and any_b:
return any_a == any_b
if not self.ListFields() == other.ListFields():
return False
# TODO: Fix UnknownFieldSet to consider MessageSet extensions,
# then use it for the comparison.
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddReprMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __repr__(self):
return text_format.MessageToString(self)
cls.__repr__ = __repr__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddContainsMethod(message_descriptor, cls):
if message_descriptor.full_name == 'google.protobuf.Struct':
def __contains__(self, key):
return key in self.fields
elif message_descriptor.full_name == 'google.protobuf.ListValue':
def __contains__(self, value):
return value in self.items()
else:
def __contains__(self, field):
return self.HasField(field)
cls.__contains__ = __contains__
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
descriptor = self.DESCRIPTOR
if descriptor._is_map_entry:
# Fields of map entry should always be serialized.
key_field = descriptor.fields_by_name['key']
_MaybeAddEncoder(cls, key_field)
size = key_field._sizer(self.key)
value_field = descriptor.fields_by_name['value']
_MaybeAddEncoder(cls, value_field)
size += value_field._sizer(self.value)
else:
for field_descriptor, field_value in self.ListFields():
_MaybeAddEncoder(cls, field_descriptor)
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self, **kwargs):
# Check if the message has all of its required fields set.
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString(**kwargs)
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self, **kwargs):
out = BytesIO()
self._InternalSerialize(out.write, **kwargs)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes, deterministic=None):
if deterministic is None:
deterministic = (
api_implementation.IsPythonDefaultSerializationDeterministic())
else:
deterministic = bool(deterministic)
descriptor = self.DESCRIPTOR
if descriptor._is_map_entry:
# Fields of map entry should always be serialized.
key_field = descriptor.fields_by_name['key']
_MaybeAddEncoder(cls, key_field)
key_field._encoder(write_bytes, self.key, deterministic)
value_field = descriptor.fields_by_name['value']
_MaybeAddEncoder(cls, value_field)
value_field._encoder(write_bytes, self.value, deterministic)
else:
for field_descriptor, field_value in self.ListFields():
_MaybeAddEncoder(cls, field_descriptor)
field_descriptor._encoder(write_bytes, field_value, deterministic)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
serialized = memoryview(serialized)
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
# Now ord(buf[p:p+1]) == ord('') gets TypeError.
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
fields_by_tag = cls._fields_by_tag
message_set_decoders_by_tag = cls._message_set_decoders_by_tag
def InternalParse(self, buffer, pos, end, current_depth=0):
"""Create a message from serialized bytes.
Args:
self: Message, instance of the proto message object.
buffer: memoryview of the serialized data.
pos: int, position to start in the serialized data.
end: int, end position of the serialized data.
Returns:
Message object.
"""
# Guard against internal misuse, since this function is called internally
# quite extensively, and its easy to accidentally pass bytes.
assert isinstance(buffer, memoryview)
self._Modified()
field_dict = self._fields
while pos != end:
(tag_bytes, new_pos) = decoder.ReadTag(buffer, pos)
field_decoder, field_des = message_set_decoders_by_tag.get(
tag_bytes, (None, None)
)
if field_decoder:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
continue
field_des, is_packed = fields_by_tag.get(tag_bytes, (None, None))
if field_des is None:
if not self._unknown_fields: # pylint: disable=protected-access
self._unknown_fields = [] # pylint: disable=protected-access
field_number, wire_type = decoder.DecodeTag(tag_bytes)
if field_number == 0:
raise message_mod.DecodeError('Field number 0 is illegal.')
(data, new_pos) = decoder._DecodeUnknownField(
buffer, new_pos, end, field_number, wire_type
) # pylint: disable=protected-access
if new_pos == -1:
return pos
self._unknown_fields.append(
(tag_bytes, buffer[pos + len(tag_bytes) : new_pos].tobytes())
)
pos = new_pos
else:
_MaybeAddDecoder(cls, field_des)
field_decoder = field_des._decoders[is_packed]
pos = field_decoder(
buffer, new_pos, end, self, field_dict, current_depth
)
if field_des.containing_oneof:
self._UpdateOneofState(field_des)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.is_required]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_repeated:
if (field.message_type._is_map_entry):
continue
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = '(%s)' % field.full_name
else:
name = field.name
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
element = value[key]
prefix = '%s[%s].' % (name, key)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
# ScalarMaps can't have any initialization errors.
pass
elif field.is_repeated:
for i in range(len(value)):
element = value[i]
prefix = '%s[%d].' % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
prefix = name + '.'
sub_errors = value.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _FullyQualifiedClassName(klass):
module = klass.__module__
name = getattr(klass, '__qualname__', klass.__name__)
if module in (None, 'builtins', '__builtin__'):
return name
return module + '.' + name
def _AddMergeFromMethod(cls):
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
'Parameter to MergeFrom() must be instance of same class: '
'expected %s got %s.' % (_FullyQualifiedClassName(cls),
_FullyQualifiedClassName(msg.__class__)))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in msg._fields.items():
if field.is_repeated:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if field.containing_oneof:
self._UpdateOneofState(field)
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
"""Returns the name of the currently set field inside a oneof, or None."""
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _Clear(self):
# Clear fields.
self._fields = {}
self._unknown_fields = ()
self._oneofs = {}
self._Modified()
def _UnknownFields(self):
raise NotImplementedError('Please use the add-on feaure '
'unknown_fields.UnknownFieldSet(message) in '
'unknown_fields.py instead.')
def _DiscardUnknownFields(self):
self._unknown_fields = []
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
value[key].DiscardUnknownFields()
elif field.is_repeated:
for sub_message in value:
sub_message.DiscardUnknownFields()
else:
value.DiscardUnknownFields()
def _SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddReprMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddContainsMethod(message_descriptor, cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
# Adds methods which do not depend on cls.
cls.Clear = _Clear
cls.DiscardUnknownFields = _DiscardUnknownFields
cls._SetListener = _SetListener
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
| _FieldProperty |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/utils/utils.py | {
"start": 30468,
"end": 32780
} | class ____(InfoJsonEncodable):
"""Defines encoding BaseOperator/AbstractOperator object to JSON."""
renames = {
"_BaseOperator__from_mapped": "mapped",
"_downstream_task_ids": "downstream_task_ids",
"_upstream_task_ids": "upstream_task_ids",
"_is_setup": "is_setup",
"_is_teardown": "is_teardown",
}
includes = [
"deferrable",
"depends_on_past",
"downstream_task_ids",
"execution_timeout",
"executor_config",
"ignore_first_depends_on_past",
"max_active_tis_per_dag",
"max_active_tis_per_dagrun",
"max_retry_delay",
"multiple_outputs",
"owner",
"priority_weight",
"queue",
"retries",
"retry_exponential_backoff",
"run_as_user",
"sla",
"task_id",
"trigger_rule",
"upstream_task_ids",
"wait_for_downstream",
"wait_for_past_depends_before_skipping",
# Operator-specific useful attributes
"trigger_dag_id", # TriggerDagRunOperator
"trigger_run_id", # TriggerDagRunOperator
"external_dag_id", # ExternalTaskSensor and ExternalTaskMarker (if run, as it's EmptyOperator)
"external_task_id", # ExternalTaskSensor and ExternalTaskMarker (if run, as it's EmptyOperator)
"external_task_ids", # ExternalTaskSensor
"external_task_group_id", # ExternalTaskSensor
"external_dates_filter", # ExternalTaskSensor
"logical_date", # AF 3 ExternalTaskMarker (if run, as it's EmptyOperator)
"execution_date", # AF 2 ExternalTaskMarker (if run, as it's EmptyOperator)
]
casts = {
"operator_class": lambda task: task.task_type,
"operator_class_path": lambda task: get_fully_qualified_class_name(task),
"task_group": lambda task: (
TaskGroupInfo(task.task_group)
if hasattr(task, "task_group") and getattr(task.task_group, "_group_id", None)
else None
),
"inlets": lambda task: [AssetInfo(i) for i in task.inlets if isinstance(i, Asset)],
"outlets": lambda task: [AssetInfo(o) for o in task.outlets if isinstance(o, Asset)],
"operator_provider_version": lambda task: get_operator_provider_version(task),
}
| TaskInfo |
python | django__django | tests/responses/test_cookie.py | {
"start": 317,
"end": 5274
} | class ____(SimpleTestCase):
def test_near_expiration(self):
"""Cookie will expire when a near expiration time is provided."""
response = HttpResponse()
# There's a timing weakness in this test; The expected result for
# max-age requires that there be a very slight difference between the
# evaluated expiration time and the time evaluated in set_cookie(). If
# this difference doesn't exist, the cookie time will be 1 second
# larger. The sleep guarantees that there will be a time difference.
expires = datetime.now(tz=UTC).replace(tzinfo=None) + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie("datetime", expires=expires)
datetime_cookie = response.cookies["datetime"]
self.assertEqual(datetime_cookie["max-age"], 10)
def test_aware_expiration(self):
"""set_cookie() accepts an aware datetime as expiration time."""
response = HttpResponse()
expires = datetime.now(tz=UTC) + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie("datetime", expires=expires)
datetime_cookie = response.cookies["datetime"]
self.assertEqual(datetime_cookie["max-age"], 10)
def test_create_cookie_after_deleting_cookie(self):
"""Setting a cookie after deletion clears the expiry date."""
response = HttpResponse()
response.set_cookie("c", "old-value")
self.assertEqual(response.cookies["c"]["expires"], "")
response.delete_cookie("c")
self.assertEqual(
response.cookies["c"]["expires"], "Thu, 01 Jan 1970 00:00:00 GMT"
)
response.set_cookie("c", "new-value")
self.assertEqual(response.cookies["c"]["expires"], "")
def test_far_expiration(self):
"""Cookie will expire when a distant expiration time is provided."""
response = HttpResponse()
future_datetime = datetime(date.today().year + 2, 1, 1, 4, 5, 6, tzinfo=UTC)
response.set_cookie("datetime", expires=future_datetime)
datetime_cookie = response.cookies["datetime"]
self.assertIn(
datetime_cookie["expires"],
# assertIn accounts for slight time dependency (#23450)
(
format_datetime_rfc5322(future_datetime, usegmt=True),
format_datetime_rfc5322(future_datetime.replace(second=7), usegmt=True),
),
)
def test_max_age_expiration(self):
"""Cookie will expire if max_age is provided."""
response = HttpResponse()
set_cookie_time = time.time()
with freeze_time(set_cookie_time):
response.set_cookie("max_age", max_age=10)
max_age_cookie = response.cookies["max_age"]
self.assertEqual(max_age_cookie["max-age"], 10)
self.assertEqual(max_age_cookie["expires"], http_date(set_cookie_time + 10))
def test_max_age_int(self):
response = HttpResponse()
response.set_cookie("max_age", max_age=10.6)
self.assertEqual(response.cookies["max_age"]["max-age"], 10)
def test_max_age_timedelta(self):
response = HttpResponse()
response.set_cookie("max_age", max_age=timedelta(hours=1))
self.assertEqual(response.cookies["max_age"]["max-age"], 3600)
def test_max_age_with_expires(self):
response = HttpResponse()
msg = "'expires' and 'max_age' can't be used together."
with self.assertRaisesMessage(ValueError, msg):
response.set_cookie(
"max_age", expires=datetime(2000, 1, 1), max_age=timedelta(hours=1)
)
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie("example", httponly=True)
example_cookie = response.cookies["example"]
self.assertIn(
"; %s" % cookies.Morsel._reserved["httponly"], str(example_cookie)
)
self.assertIs(example_cookie["httponly"], True)
def test_unicode_cookie(self):
"""HttpResponse.set_cookie() works with Unicode data."""
response = HttpResponse()
cookie_value = "清風"
response.set_cookie("test", cookie_value)
self.assertEqual(response.cookies["test"].value, cookie_value)
def test_samesite(self):
response = HttpResponse()
response.set_cookie("example", samesite="None")
self.assertEqual(response.cookies["example"]["samesite"], "None")
response.set_cookie("example", samesite="Lax")
self.assertEqual(response.cookies["example"]["samesite"], "Lax")
response.set_cookie("example", samesite="strict")
self.assertEqual(response.cookies["example"]["samesite"], "strict")
def test_invalid_samesite(self):
msg = 'samesite must be "lax", "none", or "strict".'
with self.assertRaisesMessage(ValueError, msg):
HttpResponse().set_cookie("example", samesite="invalid")
| SetCookieTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 546827,
"end": 547241
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("CreatedCommitContribution", graphql_name="node")
"""The item at the end of the edge."""
| CreatedCommitContributionEdge |
python | pydantic__pydantic | pydantic/functional_validators.py | {
"start": 9664,
"end": 18831
} | class ____:
"""!!! abstract "Usage Documentation"
[field *wrap* validators](../concepts/validators.md#field-wrap-validator)
A metadata class that indicates that a validation should be applied **around** the inner validation logic.
Attributes:
func: The validator function.
json_schema_input_type: The input type used to generate the appropriate
JSON Schema (in validation mode). The actual input type is `Any`.
```python
from datetime import datetime
from typing import Annotated
from pydantic import BaseModel, ValidationError, WrapValidator
def validate_timestamp(v, handler):
if v == 'now':
# we don't want to bother with further validation, just return the new value
return datetime.now()
try:
return handler(v)
except ValidationError:
# validation failed, in this case we want to return a default value
return datetime(2000, 1, 1)
MyTimestamp = Annotated[datetime, WrapValidator(validate_timestamp)]
class Model(BaseModel):
a: MyTimestamp
print(Model(a='now').a)
#> 2032-01-02 03:04:05.000006
print(Model(a='invalid').a)
#> 2000-01-01 00:00:00
```
"""
func: core_schema.NoInfoWrapValidatorFunction | core_schema.WithInfoWrapValidatorFunction
json_schema_input_type: Any = PydanticUndefined
def __get_pydantic_core_schema__(self, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
schema = handler(source_type)
input_schema = (
None
if self.json_schema_input_type is PydanticUndefined
else handler.generate_schema(self.json_schema_input_type)
)
info_arg = _inspect_validator(self.func, mode='wrap', type='field')
if info_arg:
func = cast(core_schema.WithInfoWrapValidatorFunction, self.func)
return core_schema.with_info_wrap_validator_function(
func,
schema=schema,
json_schema_input_schema=input_schema,
)
else:
func = cast(core_schema.NoInfoWrapValidatorFunction, self.func)
return core_schema.no_info_wrap_validator_function(
func,
schema=schema,
json_schema_input_schema=input_schema,
)
@classmethod
def _from_decorator(cls, decorator: _decorators.Decorator[_decorators.FieldValidatorDecoratorInfo]) -> Self:
return cls(
func=decorator.func,
json_schema_input_type=decorator.info.json_schema_input_type,
)
if TYPE_CHECKING:
class _OnlyValueValidatorClsMethod(Protocol):
def __call__(self, cls: Any, value: Any, /) -> Any: ...
class _V2ValidatorClsMethod(Protocol):
def __call__(self, cls: Any, value: Any, info: core_schema.ValidationInfo[Any], /) -> Any: ...
class _OnlyValueWrapValidatorClsMethod(Protocol):
def __call__(self, cls: Any, value: Any, handler: core_schema.ValidatorFunctionWrapHandler, /) -> Any: ...
class _V2WrapValidatorClsMethod(Protocol):
def __call__(
self,
cls: Any,
value: Any,
handler: core_schema.ValidatorFunctionWrapHandler,
info: core_schema.ValidationInfo[Any],
/,
) -> Any: ...
_V2Validator = Union[
_V2ValidatorClsMethod,
core_schema.WithInfoValidatorFunction,
_OnlyValueValidatorClsMethod,
core_schema.NoInfoValidatorFunction,
]
_V2WrapValidator = Union[
_V2WrapValidatorClsMethod,
core_schema.WithInfoWrapValidatorFunction,
_OnlyValueWrapValidatorClsMethod,
core_schema.NoInfoWrapValidatorFunction,
]
_PartialClsOrStaticMethod: TypeAlias = Union[classmethod[Any, Any, Any], staticmethod[Any, Any], partialmethod[Any]]
_V2BeforeAfterOrPlainValidatorType = TypeVar(
'_V2BeforeAfterOrPlainValidatorType',
bound=Union[_V2Validator, _PartialClsOrStaticMethod],
)
_V2WrapValidatorType = TypeVar('_V2WrapValidatorType', bound=Union[_V2WrapValidator, _PartialClsOrStaticMethod])
FieldValidatorModes: TypeAlias = Literal['before', 'after', 'wrap', 'plain']
@overload
def field_validator(
field: str,
/,
*fields: str,
mode: Literal['wrap'],
check_fields: bool | None = ...,
json_schema_input_type: Any = ...,
) -> Callable[[_V2WrapValidatorType], _V2WrapValidatorType]: ...
@overload
def field_validator(
field: str,
/,
*fields: str,
mode: Literal['before', 'plain'],
check_fields: bool | None = ...,
json_schema_input_type: Any = ...,
) -> Callable[[_V2BeforeAfterOrPlainValidatorType], _V2BeforeAfterOrPlainValidatorType]: ...
@overload
def field_validator(
field: str,
/,
*fields: str,
mode: Literal['after'] = ...,
check_fields: bool | None = ...,
) -> Callable[[_V2BeforeAfterOrPlainValidatorType], _V2BeforeAfterOrPlainValidatorType]: ...
def field_validator( # noqa: D417
field: str,
/,
*fields: str,
mode: FieldValidatorModes = 'after',
check_fields: bool | None = None,
json_schema_input_type: Any = PydanticUndefined,
) -> Callable[[Any], Any]:
"""!!! abstract "Usage Documentation"
[field validators](../concepts/validators.md#field-validators)
Decorate methods on the class indicating that they should be used to validate fields.
Example usage:
```python
from typing import Any
from pydantic import (
BaseModel,
ValidationError,
field_validator,
)
class Model(BaseModel):
a: str
@field_validator('a')
@classmethod
def ensure_foobar(cls, v: Any):
if 'foobar' not in v:
raise ValueError('"foobar" not found in a')
return v
print(repr(Model(a='this is foobar good')))
#> Model(a='this is foobar good')
try:
Model(a='snap')
except ValidationError as exc_info:
print(exc_info)
'''
1 validation error for Model
a
Value error, "foobar" not found in a [type=value_error, input_value='snap', input_type=str]
'''
```
For more in depth examples, see [Field Validators](../concepts/validators.md#field-validators).
Args:
*fields: The field names the validator should apply to.
mode: Specifies whether to validate the fields before or after validation.
check_fields: Whether to check that the fields actually exist on the model.
json_schema_input_type: The input type of the function. This is only used to generate
the appropriate JSON Schema (in validation mode) and can only specified
when `mode` is either `'before'`, `'plain'` or `'wrap'`.
Raises:
PydanticUserError:
- If the decorator is used without any arguments (at least one field name must be provided).
- If the provided field names are not strings.
- If `json_schema_input_type` is provided with an unsupported `mode`.
- If the decorator is applied to an instance method.
"""
if callable(field) or isinstance(field, classmethod):
raise PydanticUserError(
'The `@field_validator` decorator cannot be used without arguments, at least one field must be provided. '
"For example: `@field_validator('<field_name>', ...)`.",
code='decorator-missing-arguments',
)
if mode not in ('before', 'plain', 'wrap') and json_schema_input_type is not PydanticUndefined:
raise PydanticUserError(
f"`json_schema_input_type` can't be used when mode is set to {mode!r}",
code='validator-input-type',
)
if json_schema_input_type is PydanticUndefined and mode == 'plain':
json_schema_input_type = Any
fields = field, *fields
if not all(isinstance(field, str) for field in fields):
raise PydanticUserError(
'The provided field names to the `@field_validator` decorator should be strings. '
"For example: `@field_validator('<field_name_1>', '<field_name_2>', ...).`",
code='decorator-invalid-fields',
)
def dec(
f: Callable[..., Any] | staticmethod[Any, Any] | classmethod[Any, Any, Any],
) -> _decorators.PydanticDescriptorProxy[Any]:
if _decorators.is_instance_method_from_sig(f):
raise PydanticUserError(
'The `@field_validator` decorator cannot be applied to instance methods',
code='validator-instance-method',
)
# auto apply the @classmethod decorator
f = _decorators.ensure_classmethod_based_on_signature(f)
dec_info = _decorators.FieldValidatorDecoratorInfo(
fields=fields, mode=mode, check_fields=check_fields, json_schema_input_type=json_schema_input_type
)
return _decorators.PydanticDescriptorProxy(f, dec_info)
return dec
_ModelType = TypeVar('_ModelType')
_ModelTypeCo = TypeVar('_ModelTypeCo', covariant=True)
| WrapValidator |
python | walkccc__LeetCode | solutions/1044. Longest Duplicate Substring/1044.py | {
"start": 0,
"end": 1288
} | class ____:
def longestDupSubstring(self, s: str) -> str:
BASE = 26
HASH = 1_000_000_007
bestStart = -1
l = 1
r = len(s)
def val(c: str) -> int:
return ord(c) - ord('a')
# k := the length of the substring to be hashed
def getStart(k: int) -> int | None:
maxPow = pow(BASE, k - 1, HASH)
hashToStart = collections.defaultdict(list)
h = 0
# Compute the hash value of s[:k].
for i in range(k):
h = (h * BASE + val(s[i])) % HASH
hashToStart[h].append(0)
# Compute the rolling hash by Rabin Karp.
for i in range(k, len(s)):
startIndex = i - k + 1
h = (h - maxPow * val(s[i - k])) % HASH
h = (h * BASE + val(s[i])) % HASH
if h in hashToStart:
currSub = s[startIndex:startIndex + k]
for start in hashToStart[h]:
if s[start:start + k] == currSub:
return startIndex
hashToStart[h].append(startIndex)
while l < r:
m = (l + r) // 2
start: int | None = getStart(m)
if start:
bestStart = start
l = m + 1
else:
r = m
if bestStart == -1:
return ''
if getStart(l):
return s[bestStart:bestStart + l]
return s[bestStart:bestStart + l - 1]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-steps-to-convert-string-with-operations.py | {
"start": 68,
"end": 1501
} | class ____(object):
def minOperations(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
n = len(word1)
dp = [[0]*n for _ in xrange(n)]
for i in xrange(2*n-1):
cnt = collections.defaultdict(int)
curr = 1 # 1 for reversing
left, right = i//2, (i+1)//2
while 0 <= left and right < n:
for j in xrange(2):
x, y = word1[left], word2[right]
if x != y and (left != right or j == 0):
if cnt[y, x]:
cnt[y, x] -= 1
else:
cnt[x, y] += 1
curr += 1
right, left = left, right
dp[left][right] = curr
left -= 1
right += 1
dp2 = [float("inf")]*(n+1)
dp2[0] = 0
for i in xrange(n):
cnt = collections.defaultdict(int)
curr = 0
for j in reversed(xrange(i+1)):
x, y = word1[j], word2[j]
if x != y:
if cnt[y, x]:
cnt[y, x] -= 1
else:
cnt[x, y] += 1
curr += 1
dp2[i+1] = min(dp2[i+1], dp2[j]+min(curr, dp[j][i]))
return dp2[n]
| Solution |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/model_parallel.py | {
"start": 2579,
"end": 14057
} | class ____(ParallelStrategy):
"""Enables user-defined parallelism applied to a model.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Currently supports up to 2D parallelism. Specifically, it supports the combination of
Fully Sharded Data-Parallel 2 (FSDP2) with Tensor Parallelism (DTensor). These PyTorch APIs are currently still
experimental in PyTorch. Requires PyTorch 2.4 or newer.
Arguments:
parallelize_fn: A function that applies parallelisms to a module. The strategy will provide the
model and device mesh as input.
data_parallel_size: The number of devices within a data-parallel group. Defaults to ``"auto"``, which
sets this size to the number of nodes in the cluster.
tensor_parallel_size: The number of devices within a tensor-parallel group. Defaults to ``"auto"``, which
sets this size to the number of GPUs in a single node.
save_distributed_checkpoint: If ``True``, each rank saves its shard of weights and optimizer states to a file.
The checkpoint is a folder with as many files as the world size.
If ``False``, the full weights and optimizer states get assembled on rank 0 and saved to a single file.
"""
def __init__(
self,
parallelize_fn: Callable[[TModel, "DeviceMesh"], TModel],
data_parallel_size: Union[Literal["auto"], int] = "auto",
tensor_parallel_size: Union[Literal["auto"], int] = "auto",
save_distributed_checkpoint: bool = True,
process_group_backend: Optional[str] = None,
timeout: Optional[timedelta] = default_pg_timeout,
) -> None:
super().__init__()
if not _TORCH_GREATER_EQUAL_2_4:
raise ImportError(f"{type(self).__name__} requires PyTorch 2.4 or higher.")
self._parallelize_fn = parallelize_fn
self._data_parallel_size = data_parallel_size
self._tensor_parallel_size = tensor_parallel_size
self._num_nodes = 1
self._save_distributed_checkpoint = save_distributed_checkpoint
self._process_group_backend: Optional[str] = process_group_backend
self._timeout: Optional[timedelta] = timeout
self._backward_sync_control = _ParallelBackwardSyncControl()
self._device_mesh: Optional[DeviceMesh] = None
@property
def device_mesh(self) -> "DeviceMesh":
if self._device_mesh is None:
raise RuntimeError("Accessing the device mesh before processes have initialized is not allowed.")
return self._device_mesh
@property
@override
def checkpoint_io(self) -> CheckpointIO:
raise NotImplementedError(f"The `{type(self).__name__}` does not use the `CheckpointIO` plugin interface.")
@checkpoint_io.setter
@override
def checkpoint_io(self, io: CheckpointIO) -> None:
raise NotImplementedError(f"The `{type(self).__name__}` does not support setting a `CheckpointIO` plugin.")
@property
@override
def root_device(self) -> torch.device:
assert self.parallel_devices is not None
return self.parallel_devices[self.local_rank]
@property
def num_nodes(self) -> int:
return self._num_nodes
@num_nodes.setter
def num_nodes(self, num_nodes: int) -> None:
self._num_nodes = num_nodes
@property
def num_processes(self) -> int:
return len(self.parallel_devices) if self.parallel_devices is not None else 0
@property
@override
def distributed_sampler_kwargs(self) -> dict[str, Any]:
assert self.device_mesh is not None
data_parallel_mesh = self.device_mesh["data_parallel"]
return {"num_replicas": data_parallel_mesh.size(), "rank": data_parallel_mesh.get_local_rank()}
@property
def process_group_backend(self) -> Optional[str]:
return self._process_group_backend
@override
def _configure_launcher(self) -> None:
assert self.cluster_environment is not None
if not self.cluster_environment.creates_processes_externally:
self._launcher = _SubprocessScriptLauncher(self.cluster_environment, self.num_processes, self.num_nodes)
@override
def setup_environment(self) -> None:
super().setup_environment()
self._setup_distributed()
if self._data_parallel_size == "auto":
self._data_parallel_size = self.num_nodes
if self._tensor_parallel_size == "auto":
self._tensor_parallel_size = self.num_processes
self._device_mesh = _setup_device_mesh(
self._data_parallel_size, self._tensor_parallel_size, self.world_size, self.root_device
)
@override
def setup_module(self, module: Module) -> Module:
from torch.distributed.fsdp import FullyShardedDataParallel
if any(isinstance(mod, FullyShardedDataParallel) for mod in module.modules()):
raise TypeError(
"Found modules that are wrapped with `torch.distributed.fsdp.FullyShardedDataParallel`."
f" The `{self.__class__.__name__}` only supports the new FSDP2 APIs in PyTorch >= 2.4."
)
module = self._parallelize_fn(module, self.device_mesh) # type: ignore[arg-type]
if not isinstance(module, Module):
raise TypeError(
f"The `parallelize_fn` must return a `nn.Module` instance, but got: {type(module).__name__}"
)
_materialize_distributed_module(module, self.root_device)
return module
@override
def module_to_device(self, module: Module) -> None:
pass
@override
def module_init_context(self, empty_init: Optional[bool] = None) -> AbstractContextManager:
precision_init_ctx = self.precision.module_init_context()
stack = ExitStack()
if empty_init:
# Materializaton happens in `setup_module`
# TODO: Introduce `Fabric.materialize(module)` to give user control over materialization
stack.enter_context(torch.device("meta"))
stack.enter_context(precision_init_ctx)
return stack
@override
def all_reduce(
self, tensor: Tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = "mean"
) -> Tensor:
if isinstance(tensor, Tensor):
return _sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
return tensor
@override
def barrier(self, *args: Any, **kwargs: Any) -> None:
if not _distributed_is_initialized():
return
if torch.distributed.get_backend() == "nccl":
torch.distributed.barrier(device_ids=[self.root_device.index])
else:
torch.distributed.barrier()
@override
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
if not _distributed_is_initialized():
return obj
obj = [obj]
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
return obj[0]
@override
def save_checkpoint(
self,
path: _PATH,
state: dict[str, Union[Module, Optimizer, Any]],
storage_options: Optional[Any] = None,
filter: Optional[dict[str, Callable[[str, Any], bool]]] = None,
) -> None:
"""Save model, optimizer, and other state to a checkpoint on disk.
If distributed checkpointing is enabled (default), the checkpoint gets saved as a directory containing one file
per process, with model- and optimizer shards stored per file. Additionally, it creates a metadata file
`meta.pt` with the rest of the user's state (only saved from rank 0).
If distributed checkpointing is disabled (``save_distributed_checkpoint=False``), the checkpoint will be
written to a single file containing the weights, optimizer state and other metadata.
"""
if storage_options is not None:
raise TypeError(
f"`{type(self).__name__}.save_checkpoint(..., storage_options=...)` is not supported because"
f" `{type(self).__name__}` does not use the `CheckpointIO`."
)
if filter is not None and self._save_distributed_checkpoint:
# https://github.com/pytorch/pytorch/issues/105379
raise NotImplementedError(
f"{type(self).__name__} doesn't support loading distributed filtered checkpoints,"
" so saving them is disabled."
)
# broadcast the path from rank 0 to ensure all the states are saved in a common path
path = Path(self.broadcast(path))
_save_checkpoint(
path=path,
state=state,
full_state_dict=(not self._save_distributed_checkpoint),
rank=self.global_rank,
filter=filter,
)
@override
def load_checkpoint(
self,
path: _PATH,
state: Optional[Union[Module, Optimizer, dict[str, Union[Module, Optimizer, Any]]]] = None,
strict: bool = True,
weights_only: Optional[bool] = None,
) -> dict[str, Any]:
"""Load the contents from a checkpoint and restore the state of the given objects."""
if not state:
raise ValueError(
f"Got {type(self).__name__}.load_checkpoint(..., state={state!r}) but a state with at least "
" a model instance to reload is required. Pass it in like so:"
f" {type(self).__name__}.load_checkpoint(..., state={{'model': model, ...}})"
)
# broadcast the path from rank 0 to ensure all the states are loaded from a common path
path = Path(self.broadcast(path))
if isinstance(state, Module):
_load_raw_module_state_from_path(path, module=state, world_size=self.world_size, strict=strict)
return {}
if isinstance(state, Optimizer):
raise NotImplementedError(
f"Loading a single optimizer object from a checkpoint is not supported yet with {type(self).__name__}."
)
return _load_checkpoint(path=path, state=state, strict=strict, weights_only=weights_only)
def _setup_distributed(self) -> None:
reset_seed()
self._set_world_ranks()
self._process_group_backend = self._get_process_group_backend()
assert self.cluster_environment is not None
kwargs: dict[str, Any] = {"timeout": self._timeout}
if _TORCH_GREATER_EQUAL_2_3:
kwargs["device_id"] = self.root_device if self.root_device.type != "cpu" else None
_init_dist_connection(self.cluster_environment, self._process_group_backend, **kwargs)
def _get_process_group_backend(self) -> str:
return self._process_group_backend or _get_default_process_group_backend_for_device(self.root_device)
def _set_world_ranks(self) -> None:
if self.cluster_environment is not None:
self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
# `LightningEnvironment.set_global_rank` will do this too, but we cannot rely on that implementation detail
# additionally, for some implementations, the setter is a no-op, so it's safer to access the getter
rank_zero_only.rank = utils_rank_zero_only.rank = self.global_rank
| ModelParallelStrategy |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/auth_manager/avp/test_facade.py | {
"start": 2053,
"end": 13079
} | class ____:
def test_avp_client(self, facade):
assert hasattr(facade, "avp_client")
def test_avp_policy_store_id(self, facade):
assert hasattr(facade, "avp_policy_store_id")
@pytest.mark.parametrize(
("entity_id", "context", "user", "expected_entities", "expected_context", "avp_response", "expected"),
[
# User with groups with no permissions
(
None,
None,
test_user,
[
{
"identifier": {"entityType": "Airflow::User", "entityId": "test_user"},
"parents": [
{"entityType": "Airflow::Group", "entityId": "group1"},
{"entityType": "Airflow::Group", "entityId": "group2"},
],
},
{
"identifier": {"entityType": "Airflow::Group", "entityId": "group1"},
},
{
"identifier": {"entityType": "Airflow::Group", "entityId": "group2"},
},
],
None,
{"decision": "DENY"},
False,
),
# User with groups with permissions
(
"dummy_id",
None,
test_user,
[
{
"identifier": {"entityType": "Airflow::User", "entityId": "test_user"},
"parents": [
{"entityType": "Airflow::Group", "entityId": "group1"},
{"entityType": "Airflow::Group", "entityId": "group2"},
],
},
{
"identifier": {"entityType": "Airflow::Group", "entityId": "group1"},
},
{
"identifier": {"entityType": "Airflow::Group", "entityId": "group2"},
},
],
None,
{"decision": "ALLOW"},
True,
),
# User without group without permission
(
None,
None,
test_user_no_group,
[
{
"identifier": {"entityType": "Airflow::User", "entityId": "test_user_no_group"},
"parents": [],
},
],
None,
{"decision": "DENY"},
False,
),
# With context
(
"dummy_id",
{"context_param": {"string": "value"}},
test_user,
[
{
"identifier": {"entityType": "Airflow::User", "entityId": "test_user"},
"parents": [
{"entityType": "Airflow::Group", "entityId": "group1"},
{"entityType": "Airflow::Group", "entityId": "group2"},
],
},
{
"identifier": {"entityType": "Airflow::Group", "entityId": "group1"},
},
{
"identifier": {"entityType": "Airflow::Group", "entityId": "group2"},
},
],
{"contextMap": {"context_param": {"string": "value"}}},
{"decision": "ALLOW"},
True,
),
],
)
def test_is_authorized_successful(
self, facade, entity_id, context, user, expected_entities, expected_context, avp_response, expected
):
mock_is_authorized = Mock(return_value=avp_response)
facade.avp_client.is_authorized = mock_is_authorized
method: ResourceMethod = "GET"
entity_type = AvpEntities.VARIABLE
result = facade.is_authorized(
method=method,
entity_type=entity_type,
entity_id=entity_id,
user=user,
context=context,
)
params = prune_dict(
{
"policyStoreId": AVP_POLICY_STORE_ID,
"principal": {"entityType": "Airflow::User", "entityId": user.get_id()},
"action": {
"actionType": "Airflow::Action",
"actionId": get_action_id(entity_type, method, entity_id),
},
"resource": {"entityType": get_entity_type(entity_type), "entityId": entity_id or "*"},
"entities": {"entityList": expected_entities},
"context": expected_context,
}
)
mock_is_authorized.assert_called_once_with(**params)
assert result == expected
def test_is_authorized_unsuccessful(self, facade):
avp_response = {"errors": ["Error"]}
mock_is_authorized = Mock(return_value=avp_response)
facade.avp_client.is_authorized = mock_is_authorized
with pytest.raises(AirflowException, match="Error occurred while making an authorization decision."):
facade.is_authorized(method="GET", entity_type=AvpEntities.VARIABLE, user=test_user)
@pytest.mark.parametrize(
("user", "avp_response", "expected"),
[
(
test_user,
{"results": [{"decision": "ALLOW"}, {"decision": "DENY"}]},
False,
),
(
test_user,
{"results": [{"decision": "ALLOW"}, {"decision": "ALLOW"}]},
True,
),
(
None,
{"results": [{"decision": "ALLOW"}, {"decision": "ALLOW"}]},
False,
),
],
)
def test_batch_is_authorized_successful(self, facade, user, avp_response, expected):
mock_batch_is_authorized = Mock(return_value=avp_response)
facade.avp_client.batch_is_authorized = mock_batch_is_authorized
result = facade.batch_is_authorized(
requests=[
{"method": "GET", "entity_type": AvpEntities.VARIABLE, "entity_id": "var1"},
{"method": "GET", "entity_type": AvpEntities.VARIABLE, "entity_id": "var1"},
],
user=user,
)
assert result == expected
def test_batch_is_authorized_unsuccessful(self, facade):
avp_response = {"results": [{}, {"errors": []}, {"errors": [{"errorDescription": "Error"}]}]}
mock_batch_is_authorized = Mock(return_value=avp_response)
facade.avp_client.batch_is_authorized = mock_batch_is_authorized
with pytest.raises(
AirflowException, match="Error occurred while making a batch authorization decision."
):
facade.batch_is_authorized(
requests=[
{"method": "GET", "entity_type": AvpEntities.VARIABLE, "entity_id": "var1"},
{"method": "GET", "entity_type": AvpEntities.VARIABLE, "entity_id": "var1"},
],
user=test_user,
)
def test_get_batch_is_authorized_single_result_successful(self, facade):
single_result = {
"request": {
"principal": {"entityType": "Airflow::User", "entityId": "test_user"},
"action": {"actionType": "Airflow::Action", "actionId": "Connection.LIST"},
"resource": {"entityType": "Airflow::Connection", "entityId": "*"},
},
"decision": "ALLOW",
}
result = facade.get_batch_is_authorized_single_result(
batch_is_authorized_results=[
{
"request": {
"principal": {"entityType": "Airflow::User", "entityId": "test_user"},
"action": {"actionType": "Airflow::Action", "actionId": "Variable.LIST"},
"resource": {"entityType": "Airflow::Variable", "entityId": "*"},
},
"decision": "ALLOW",
},
single_result,
],
request={
"method": "GET",
"entity_type": AvpEntities.CONNECTION,
},
user=test_user,
)
assert result == single_result
def test_get_batch_is_authorized_single_result_unsuccessful(self, facade):
with pytest.raises(AirflowException, match="Could not find the authorization result."):
facade.get_batch_is_authorized_single_result(
batch_is_authorized_results=[
{
"request": {
"principal": {"entityType": "Airflow::User", "entityId": "test_user"},
"action": {"actionType": "Airflow::Action", "actionId": "Variable.GET"},
"resource": {"entityType": "Airflow::Variable", "entityId": "*"},
},
"decision": "ALLOW",
},
{
"request": {
"principal": {"entityType": "Airflow::User", "entityId": "test_user"},
"action": {"actionType": "Airflow::Action", "actionId": "Variable.POST"},
"resource": {"entityType": "Airflow::Variable", "entityId": "*"},
},
"decision": "ALLOW",
},
],
request={
"method": "GET",
"entity_type": AvpEntities.CONNECTION,
},
user=test_user,
)
def test_is_policy_store_schema_up_to_date_when_schema_up_to_date(self, facade):
from airflow.providers.amazon.aws.auth_manager import avp
schema_path = Path(avp.__file__).parent / "schema.json"
with open(schema_path) as schema_file:
avp_response = {"schema": schema_file.read()}
mock_get_schema = Mock(return_value=avp_response)
facade.avp_client.get_schema = mock_get_schema
assert facade.is_policy_store_schema_up_to_date()
def test_is_policy_store_schema_up_to_date_when_schema_is_modified(self, facade):
from airflow.providers.amazon.aws.auth_manager import avp
schema_path = Path(avp.__file__).parent / "schema.json"
with open(schema_path) as schema_file:
schema = json.loads(schema_file.read())
schema["new_field"] = "new_value"
avp_response = {"schema": json.dumps(schema)}
mock_get_schema = Mock(return_value=avp_response)
facade.avp_client.get_schema = mock_get_schema
assert not facade.is_policy_store_schema_up_to_date()
| TestAwsAuthManagerAmazonVerifiedPermissionsFacade |
python | getsentry__sentry | tests/sentry/explore/endpoints/test_explore_saved_query_starred.py | {
"start": 175,
"end": 2223
} | class ____(APITestCase, SnubaTestCase):
feature_name = "organizations:visibility-explore-view"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.project_ids = [
self.create_project(organization=self.org).id,
self.create_project(organization=self.org).id,
]
query = {"query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org, created_by_id=self.user.id, name="Test query", query=query
)
model.set_projects(self.project_ids)
self.query_id = model.id
self.url = reverse(
"sentry-api-0-explore-saved-query-starred", args=[self.org.slug, self.query_id]
)
def test_post(self) -> None:
with self.feature(self.feature_name):
assert not ExploreSavedQuery.objects.filter(
id__in=ExploreSavedQueryStarred.objects.filter(
organization=self.org, user_id=self.user.id
).values_list("explore_saved_query_id", flat=True)
).exists()
response = self.client.post(self.url, data={"starred": "1"})
assert response.status_code == 200, response.content
assert ExploreSavedQuery.objects.filter(
id__in=ExploreSavedQueryStarred.objects.filter(
organization=self.org, user_id=self.user.id
).values_list("explore_saved_query_id", flat=True)
).exists()
response = self.client.post(self.url, data={"starred": "0"})
assert response.status_code == 200, response.content
assert not ExploreSavedQuery.objects.filter(
id__in=ExploreSavedQueryStarred.objects.filter(
organization=self.org, user_id=self.user.id
).values_list("explore_saved_query_id", flat=True)
).exists()
| ExploreSavedQueryStarredTest |
python | numba__numba | numba/tests/npyufunc/test_vectorize_decor.py | {
"start": 461,
"end": 2220
} | class ____(object):
target = None
wrapper = None
funcs = {
'func1': sinc,
'func2': scaled_sinc,
'func3': vector_add,
}
@classmethod
def _run_and_compare(cls, func, sig, A, *args, **kwargs):
if cls.wrapper is not None:
func = cls.wrapper(func)
numba_func = vectorize(sig, target=cls.target)(func)
numpy_func = np.vectorize(func)
result = numba_func(A, *args)
gold = numpy_func(A, *args)
np.testing.assert_allclose(result, gold, **kwargs)
def test_1(self):
sig = ['float64(float64)', 'float32(float32)']
func = self.funcs['func1']
A = np.arange(100, dtype=np.float64)
self._run_and_compare(func, sig, A)
def test_2(self):
sig = [float64(float64), float32(float32)]
func = self.funcs['func1']
A = np.arange(100, dtype=np.float64)
self._run_and_compare(func, sig, A)
def test_3(self):
sig = ['float64(float64, uint32)']
func = self.funcs['func2']
A = np.arange(100, dtype=np.float64)
scale = np.uint32(3)
self._run_and_compare(func, sig, A, scale, atol=1e-8)
def test_4(self):
sig = [
int32(int32, int32),
uint32(uint32, uint32),
float32(float32, float32),
float64(float64, float64),
]
func = self.funcs['func3']
A = np.arange(100, dtype=np.float64)
self._run_and_compare(func, sig, A, A)
A = A.astype(np.float32)
self._run_and_compare(func, sig, A, A)
A = A.astype(np.int32)
self._run_and_compare(func, sig, A, A)
A = A.astype(np.uint32)
self._run_and_compare(func, sig, A, A)
| BaseVectorizeDecor |
python | keras-team__keras | benchmarks/model_benchmark/benchmark_utils.py | {
"start": 28,
"end": 790
} | class ____(keras.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self.start_batch = start_batch
self.stop_batch = stop_batch
# Store the throughput of each epoch.
self.state = {"throughput": []}
def on_train_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["epoch_begin_time"] = time.time()
def on_train_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
epoch_end_time = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
epoch_end_time - self.state["epoch_begin_time"]
)
self.state["throughput"].append(throughput)
| BenchmarkMetricsCallback |
python | scrapy__scrapy | scrapy/robotstxt.py | {
"start": 3686,
"end": 4342
} | class ____(RobotParser):
def __init__(self, robotstxt_body: bytes, spider: Spider | None):
self.spider: Spider | None = spider
body_decoded = decode_robotstxt(robotstxt_body, spider)
self.rp = Protego.parse(body_decoded)
@classmethod
def from_crawler(cls, crawler: Crawler, robotstxt_body: bytes) -> Self:
spider = None if not crawler else crawler.spider
return cls(robotstxt_body, spider)
def allowed(self, url: str | bytes, user_agent: str | bytes) -> bool:
user_agent = to_unicode(user_agent)
url = to_unicode(url)
return self.rp.can_fetch(url, user_agent)
| ProtegoRobotParser |
python | gevent__gevent | src/gevent/tests/test__example_wsgiserver_ssl.py | {
"start": 200,
"end": 649
} | class ____(test__example_wsgiserver.Test_wsgiserver):
example = 'wsgiserver_ssl.py'
URL = 'https://%s:8443' % (params.DEFAULT_LOCAL_HOST_ADDR,)
PORT = 8443
_use_ssl = True
if hasattr(ssl, '_create_unverified_context'):
# Disable verification for our self-signed cert
# on Python >= 2.7.9 and 3.4
ssl_ctx = ssl._create_unverified_context()
if __name__ == '__main__':
greentest.main()
| Test_wsgiserver_ssl |
python | tensorflow__tensorflow | tensorflow/python/data/ops/dataset_ops.py | {
"start": 195794,
"end": 198485
} | class ____(resource_lib.CapturableResource):
"""Allows export of functions capturing a Dataset in SavedModels.
When saving a SavedModel, `tf.saved_model.save` traverses the object
graph. Since Datasets reference _VariantTracker objects, that traversal will
find a _VariantTracker for each Dataset and so know how to save and restore
functions which reference the Dataset's variant Tensor.
"""
def __init__(self, variant_tensor, resource_creator):
"""Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore.
"""
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
if not isinstance(resource_creator, def_function.Function):
# Internal validation -- _VariantTracker assumes that resource creator is
# already a tf.function.
raise TypeError("Resource creator should already be a tf.function.")
self._create_resource = resource_creator
def _trackable_children(self,
save_type=tracking_base.SaveType.CHECKPOINT,
**kwargs):
if save_type != tracking_base.SaveType.SAVEDMODEL:
return {}
children = super(_VariantTracker,
self)._trackable_children(save_type, **kwargs)
# Overwrite the _create_resource function, since `self._create_resource`
# is already a tf.function.
children["_create_resource"] = self._create_resource
return children
# TODO(b/254291122): Remove.
# Loaded lazily due to a circular dependency (dataset_ops ->
# batch_op -> dataset_ops).
batch_op = lazy_loader.LazyLoader(
"batch_op", globals(),
"tensorflow.python.data.ops.batch_op")
BatchDataset = batch_op._BatchDataset # pylint: disable=protected-access
PrefetchDataset = prefetch_op._PrefetchDataset # pylint: disable=protected-access
ShuffleDataset = shuffle_op._ShuffleDataset # pylint: disable=protected-access
# TODO(b/254291122): Remove.
# Loaded lazily due to a circular dependency (dataset_ops ->
# repeat_op -> dataset_ops).
repeat_op = lazy_loader.LazyLoader(
"repeat_op", globals(),
"tensorflow.python.data.ops.repeat_op")
RepeatDataset = repeat_op._RepeatDataset # pylint: disable=protected-access
| _VariantTracker |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.