language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/convnextv2/modeling_convnextv2.py
|
{
"start": 9327,
"end": 10678
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.stages = nn.ModuleList()
drop_path_rates = [
x.tolist()
for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").split(config.depths)
]
prev_chs = config.hidden_sizes[0]
for i in range(config.num_stages):
out_chs = config.hidden_sizes[i]
stage = ConvNextV2Stage(
config,
in_channels=prev_chs,
out_channels=out_chs,
stride=2 if i > 0 else 1,
depth=config.depths[i],
drop_path_rates=drop_path_rates[i],
)
self.stages.append(stage)
prev_chs = out_chs
def forward(
self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool] = False
) -> BaseModelOutputWithNoAttention:
all_hidden_states = [hidden_states] if output_hidden_states else None
for layer_module in self.stages:
hidden_states = layer_module(hidden_states)
if all_hidden_states is not None:
all_hidden_states.append(hidden_states)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
@auto_docstring
|
ConvNextV2Encoder
|
python
|
django__django
|
tests/postgres_tests/test_indexes.py
|
{
"start": 8611,
"end": 32967
}
|
class ____(PostgreSQLTestCase):
get_opclass_query = """
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
"""
def get_constraints(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_gin_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn(
"field", self.get_constraints(IntegerArrayModel._meta.db_table)
)
# Add the index
index_name = "integer_array_model_field_gin"
index = GinIndex(fields=["field"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
# Check gin index was added
self.assertEqual(constraints[index_name]["type"], GinIndex.suffix)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(
index_name, self.get_constraints(IntegerArrayModel._meta.db_table)
)
def test_gin_fastupdate(self):
index_name = "integer_array_gin_fastupdate"
index = GinIndex(fields=["field"], name=index_name, fastupdate=False)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], "gin")
self.assertEqual(constraints[index_name]["options"], ["fastupdate=off"])
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(
index_name, self.get_constraints(IntegerArrayModel._meta.db_table)
)
def test_partial_gin_index(self):
with register_lookup(CharField, Length):
index_name = "char_field_gin_partial_idx"
index = GinIndex(
fields=["field"], name=index_name, condition=Q(field__length=40)
)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], "gin")
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_partial_gin_index_with_tablespace(self):
with register_lookup(CharField, Length):
index_name = "char_field_gin_partial_idx"
index = GinIndex(
fields=["field"],
name=index_name,
condition=Q(field__length=40),
db_tablespace="pg_default",
)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
self.assertIn(
'TABLESPACE "pg_default" ',
str(index.create_sql(CharFieldModel, editor)),
)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], "gin")
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_gin_parameters(self):
index_name = "integer_array_gin_params"
index = GinIndex(
fields=["field"],
name=index_name,
fastupdate=True,
gin_pending_list_limit=64,
db_tablespace="pg_default",
)
with connection.schema_editor() as editor:
editor.add_index(IntegerArrayModel, index)
self.assertIn(
") WITH (gin_pending_list_limit = 64, fastupdate = on) TABLESPACE",
str(index.create_sql(IntegerArrayModel, editor)),
)
constraints = self.get_constraints(IntegerArrayModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], "gin")
self.assertEqual(
constraints[index_name]["options"],
["gin_pending_list_limit=64", "fastupdate=on"],
)
with connection.schema_editor() as editor:
editor.remove_index(IntegerArrayModel, index)
self.assertNotIn(
index_name, self.get_constraints(IntegerArrayModel._meta.db_table)
)
def test_trigram_op_class_gin_index(self):
index_name = "trigram_op_class_gin"
index = GinIndex(OpClass(F("scene"), name="gin_trgm_ops"), name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Scene, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [index_name])
self.assertCountEqual(cursor.fetchall(), [("gin_trgm_ops", index_name)])
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIn(index_name, constraints)
self.assertIn(constraints[index_name]["type"], GinIndex.suffix)
with connection.schema_editor() as editor:
editor.remove_index(Scene, index)
self.assertNotIn(index_name, self.get_constraints(Scene._meta.db_table))
def test_cast_search_vector_gin_index(self):
index_name = "cast_search_vector_gin"
index = GinIndex(Cast("field", SearchVectorField()), name=index_name)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
sql = index.create_sql(TextFieldModel, editor)
table = TextFieldModel._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(index_name, constraints)
self.assertIn(constraints[index_name]["type"], GinIndex.suffix)
self.assertIs(sql.references_column(table, "field"), True)
self.assertIn("::tsvector", str(sql))
with connection.schema_editor() as editor:
editor.remove_index(TextFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(table))
def test_bloom_index(self):
index_name = "char_field_model_field_bloom"
index = BloomIndex(fields=["field"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], BloomIndex.suffix)
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_bloom_parameters(self):
index_name = "char_field_model_field_bloom_params"
index = BloomIndex(fields=["field"], name=index_name, length=512, columns=[3])
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], BloomIndex.suffix)
self.assertEqual(constraints[index_name]["options"], ["length=512", "col1=3"])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_brin_index(self):
index_name = "char_field_model_field_brin"
index = BrinIndex(fields=["field"], name=index_name, pages_per_range=4)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], BrinIndex.suffix)
self.assertEqual(constraints[index_name]["options"], ["pages_per_range=4"])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_brin_parameters(self):
index_name = "char_field_brin_params"
index = BrinIndex(fields=["field"], name=index_name, autosummarize=True)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], BrinIndex.suffix)
self.assertEqual(constraints[index_name]["options"], ["autosummarize=on"])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_btree_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn("field", self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = "char_field_model_field_btree"
index = BTreeIndex(fields=["field"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]["type"], BTreeIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_btree_parameters(self):
index_name = "integer_array_btree_parameters"
index = BTreeIndex(
fields=["field"], name=index_name, fillfactor=80, deduplicate_items=False
)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], BTreeIndex.suffix)
self.assertEqual(
constraints[index_name]["options"],
["fillfactor=80", "deduplicate_items=off"],
)
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_gist_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn("field", self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = "char_field_model_field_gist"
index = GistIndex(fields=["field"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]["type"], GistIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_gist_parameters(self):
index_name = "integer_array_gist_buffering"
index = GistIndex(
fields=["field"], name=index_name, buffering=True, fillfactor=80
)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], GistIndex.suffix)
self.assertEqual(
constraints[index_name]["options"], ["buffering=on", "fillfactor=80"]
)
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_gist_include(self):
index_name = "scene_gist_include_setting"
index = GistIndex(name=index_name, fields=["scene"], include=["setting"])
with connection.schema_editor() as editor:
editor.add_index(Scene, index)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIn(index_name, constraints)
self.assertEqual(constraints[index_name]["type"], GistIndex.suffix)
self.assertEqual(constraints[index_name]["columns"], ["scene", "setting"])
with connection.schema_editor() as editor:
editor.remove_index(Scene, index)
self.assertNotIn(index_name, self.get_constraints(Scene._meta.db_table))
def test_tsvector_op_class_gist_index(self):
index_name = "tsvector_op_class_gist"
index = GistIndex(
OpClass(
SearchVector("scene", "setting", config="english"),
name="tsvector_ops",
),
name=index_name,
)
with connection.schema_editor() as editor:
editor.add_index(Scene, index)
sql = index.create_sql(Scene, editor)
table = Scene._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(index_name, constraints)
self.assertIn(constraints[index_name]["type"], GistIndex.suffix)
self.assertIs(sql.references_column(table, "scene"), True)
self.assertIs(sql.references_column(table, "setting"), True)
with connection.schema_editor() as editor:
editor.remove_index(Scene, index)
self.assertNotIn(index_name, self.get_constraints(table))
def test_search_vector(self):
"""SearchVector generates IMMUTABLE SQL in order to be indexable."""
index_name = "test_search_vector"
index = Index(SearchVector("id", "scene", config="english"), name=index_name)
# Indexed function must be IMMUTABLE.
with connection.schema_editor() as editor:
editor.add_index(Scene, index)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIn(index_name, constraints)
self.assertIs(constraints[index_name]["index"], True)
with connection.schema_editor() as editor:
editor.remove_index(Scene, index)
self.assertNotIn(index_name, self.get_constraints(Scene._meta.db_table))
def test_hash_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn("field", self.get_constraints(CharFieldModel._meta.db_table))
# Add the index.
index_name = "char_field_model_field_hash"
index = HashIndex(fields=["field"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]["type"], HashIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_hash_parameters(self):
index_name = "integer_array_hash_fillfactor"
index = HashIndex(fields=["field"], name=index_name, fillfactor=80)
with connection.schema_editor() as editor:
editor.add_index(CharFieldModel, index)
constraints = self.get_constraints(CharFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], HashIndex.suffix)
self.assertEqual(constraints[index_name]["options"], ["fillfactor=80"])
with connection.schema_editor() as editor:
editor.remove_index(CharFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(CharFieldModel._meta.db_table)
)
def test_spgist_index(self):
# Ensure the table is there and doesn't have an index.
self.assertNotIn("field", self.get_constraints(TextFieldModel._meta.db_table))
# Add the index.
index_name = "text_field_model_field_spgist"
index = SpGistIndex(fields=["field"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
constraints = self.get_constraints(TextFieldModel._meta.db_table)
# The index was added.
self.assertEqual(constraints[index_name]["type"], SpGistIndex.suffix)
# Drop the index.
with connection.schema_editor() as editor:
editor.remove_index(TextFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(TextFieldModel._meta.db_table)
)
def test_spgist_parameters(self):
index_name = "text_field_model_spgist_fillfactor"
index = SpGistIndex(fields=["field"], name=index_name, fillfactor=80)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
constraints = self.get_constraints(TextFieldModel._meta.db_table)
self.assertEqual(constraints[index_name]["type"], SpGistIndex.suffix)
self.assertEqual(constraints[index_name]["options"], ["fillfactor=80"])
with connection.schema_editor() as editor:
editor.remove_index(TextFieldModel, index)
self.assertNotIn(
index_name, self.get_constraints(TextFieldModel._meta.db_table)
)
def test_spgist_include(self):
index_name = "scene_spgist_include_setting"
index = SpGistIndex(name=index_name, fields=["scene"], include=["setting"])
with connection.schema_editor() as editor:
editor.add_index(Scene, index)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIn(index_name, constraints)
self.assertEqual(constraints[index_name]["type"], SpGistIndex.suffix)
self.assertEqual(constraints[index_name]["columns"], ["scene", "setting"])
with connection.schema_editor() as editor:
editor.remove_index(Scene, index)
self.assertNotIn(index_name, self.get_constraints(Scene._meta.db_table))
def test_custom_suffix(self):
class CustomSuffixIndex(PostgresIndex):
suffix = "sfx"
def create_sql(self, model, schema_editor, using="gin", **kwargs):
return super().create_sql(model, schema_editor, using=using, **kwargs)
index = CustomSuffixIndex(fields=["field"], name="custom_suffix_idx")
self.assertEqual(index.suffix, "sfx")
with connection.schema_editor() as editor:
self.assertIn(
" USING gin ",
str(index.create_sql(CharFieldModel, editor)),
)
def test_custom_sql(self):
class CustomSQLIndex(PostgresIndex):
sql_create_index = "SELECT 1"
sql_delete_index = "SELECT 2"
def create_sql(self, model, schema_editor, using="", **kwargs):
kwargs.setdefault("sql", self.sql_create_index)
return super().create_sql(model, schema_editor, using, **kwargs)
def remove_sql(self, model, schema_editor, **kwargs):
kwargs.setdefault("sql", self.sql_delete_index)
return super().remove_sql(model, schema_editor, **kwargs)
index = CustomSQLIndex(fields=["field"], name="custom_sql_idx")
operations = [
(index.create_sql, CustomSQLIndex.sql_create_index),
(index.remove_sql, CustomSQLIndex.sql_delete_index),
]
for operation, expected in operations:
with self.subTest(operation=operation.__name__):
with connection.schema_editor() as editor:
self.assertEqual(expected, str(operation(CharFieldModel, editor)))
def test_op_class(self):
index_name = "test_op_class"
index = Index(
OpClass(Lower("field"), name="text_pattern_ops"),
name=index_name,
)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [index_name])
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)])
def test_op_class_descending_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
index_name = "test_op_class_descending_collation"
index = Index(
Collate(
OpClass(Lower("field"), name="text_pattern_ops").desc(nulls_last=True),
collation=collation,
),
name=index_name,
)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
self.assertIn(
"COLLATE %s" % editor.quote_name(collation),
str(index.create_sql(TextFieldModel, editor)),
)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [index_name])
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)])
table = TextFieldModel._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(index_name, constraints)
self.assertEqual(constraints[index_name]["orders"], ["DESC"])
with connection.schema_editor() as editor:
editor.remove_index(TextFieldModel, index)
self.assertNotIn(index_name, self.get_constraints(table))
def test_op_class_descending_partial(self):
index_name = "test_op_class_descending_partial"
index = Index(
OpClass(Lower("field"), name="text_pattern_ops").desc(),
name=index_name,
condition=Q(field__contains="China"),
)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [index_name])
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)])
constraints = self.get_constraints(TextFieldModel._meta.db_table)
self.assertIn(index_name, constraints)
self.assertEqual(constraints[index_name]["orders"], ["DESC"])
def test_op_class_descending_partial_tablespace(self):
index_name = "test_op_class_descending_partial_tablespace"
index = Index(
OpClass(Lower("field").desc(), name="text_pattern_ops"),
name=index_name,
condition=Q(field__contains="China"),
db_tablespace="pg_default",
)
with connection.schema_editor() as editor:
editor.add_index(TextFieldModel, index)
self.assertIn(
'TABLESPACE "pg_default" ',
str(index.create_sql(TextFieldModel, editor)),
)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [index_name])
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)])
constraints = self.get_constraints(TextFieldModel._meta.db_table)
self.assertIn(index_name, constraints)
self.assertEqual(constraints[index_name]["orders"], ["DESC"])
|
SchemaTests
|
python
|
huggingface__transformers
|
src/transformers/models/deberta/modeling_deberta.py
|
{
"start": 21455,
"end": 24835
}
|
class ____(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
self.gradient_checkpointing = False
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
if query_states is not None:
relative_pos = build_relative_position(query_states, hidden_states)
else:
relative_pos = build_relative_position(hidden_states, hidden_states)
return relative_pos
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_hidden_states: bool = True,
output_attentions: bool = False,
query_states=None,
relative_pos=None,
return_dict: bool = True,
):
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states: Optional[tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
for i, layer_module in enumerate(self.layer):
hidden_states, att_m = layer_module(
next_kv,
attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if query_states is not None:
query_states = hidden_states
else:
next_kv = hidden_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
@auto_docstring
|
DebertaEncoder
|
python
|
huggingface__transformers
|
src/transformers/models/olmo2/modeling_olmo2.py
|
{
"start": 19052,
"end": 22150
}
|
class ____(Olmo2PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Olmo2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Olmo2ForCausalLM
>>> model = Olmo2ForCausalLM.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["Olmo2ForCausalLM", "Olmo2Model", "Olmo2PreTrainedModel"]
|
Olmo2ForCausalLM
|
python
|
lepture__authlib
|
authlib/jose/rfc7518/jwe_encs.py
|
{
"start": 716,
"end": 3215
}
|
class ____(JWEEncAlgorithm):
# The IV used is a 128-bit value generated randomly or
# pseudo-randomly for use in the cipher.
IV_SIZE = 128
def __init__(self, key_size, hash_type):
self.name = f"A{key_size}CBC-HS{hash_type}"
tpl = "AES_{}_CBC_HMAC_SHA_{} authenticated encryption algorithm"
self.description = tpl.format(key_size, hash_type)
# bit length
self.key_size = key_size
# byte length
self.key_len = key_size // 8
self.CEK_SIZE = key_size * 2
self.hash_alg = getattr(hashlib, f"sha{hash_type}")
def _hmac(self, ciphertext, aad, iv, key):
al = encode_int(len(aad) * 8, 64)
msg = aad + iv + ciphertext + al
d = hmac.new(key, msg, self.hash_alg).digest()
return d[: self.key_len]
def encrypt(self, msg, aad, iv, key):
"""Key Encryption with AES_CBC_HMAC_SHA2.
:param msg: text to be encrypt in bytes
:param aad: additional authenticated data in bytes
:param iv: initialization vector in bytes
:param key: encrypted key in bytes
:return: (ciphertext, iv, tag)
"""
self.check_iv(iv)
hkey = key[: self.key_len]
ekey = key[self.key_len :]
pad = PKCS7(AES.block_size).padder()
padded_data = pad.update(msg) + pad.finalize()
cipher = Cipher(AES(ekey), CBC(iv), backend=default_backend())
enc = cipher.encryptor()
ciphertext = enc.update(padded_data) + enc.finalize()
tag = self._hmac(ciphertext, aad, iv, hkey)
return ciphertext, tag
def decrypt(self, ciphertext, aad, iv, tag, key):
"""Key Decryption with AES AES_CBC_HMAC_SHA2.
:param ciphertext: ciphertext in bytes
:param aad: additional authenticated data in bytes
:param iv: initialization vector in bytes
:param tag: authentication tag in bytes
:param key: encrypted key in bytes
:return: message
"""
self.check_iv(iv)
hkey = key[: self.key_len]
dkey = key[self.key_len :]
_tag = self._hmac(ciphertext, aad, iv, hkey)
if not hmac.compare_digest(_tag, tag):
raise InvalidTag()
cipher = Cipher(AES(dkey), CBC(iv), backend=default_backend())
d = cipher.decryptor()
data = d.update(ciphertext) + d.finalize()
unpad = PKCS7(AES.block_size).unpadder()
return unpad.update(data) + unpad.finalize()
|
CBCHS2EncAlgorithm
|
python
|
ray-project__ray
|
rllib/policy/tests/test_policy.py
|
{
"start": 356,
"end": 2351
}
|
class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_policy_get_and_set_state(self):
config = (
PPOConfig()
.environment("CartPole-v1")
.api_stack(
enable_env_runner_and_connector_v2=False,
enable_rl_module_and_learner=False,
)
)
algo = config.build()
policy = algo.get_policy()
state1 = policy.get_state()
algo.train()
state2 = policy.get_state()
check(state1["global_timestep"], state2["global_timestep"], false=True)
# Reset policy to its original state and compare.
policy.set_state(state1)
state3 = policy.get_state()
# Make sure everything is the same.
check(state1["_exploration_state"], state3["_exploration_state"])
check(state1["global_timestep"], state3["global_timestep"])
check(state1["weights"], state3["weights"])
# Create a new Policy only from state (which could be part of an algorithm's
# checkpoint). This would allow users to restore a policy w/o having access
# to the original code (e.g. the config, policy class used, etc..).
if isinstance(policy, (EagerTFPolicyV2, DynamicTFPolicyV2, TorchPolicyV2)):
policy_restored_from_scratch = Policy.from_state(state3)
state4 = policy_restored_from_scratch.get_state()
check(state3["_exploration_state"], state4["_exploration_state"])
check(state3["global_timestep"], state4["global_timestep"])
# For tf static graph, the new model has different layer names
# (as it gets written into the same graph as the old one).
check(state3["weights"], state4["weights"])
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestPolicy
|
python
|
conda__conda
|
conda/deprecations.py
|
{
"start": 807,
"end": 16473
}
|
class ____:
_version: str | None
_version_tuple: tuple[int, ...] | None
_version_object: Version | None
def __init__(self: Self, version: str) -> None:
"""Factory to create a deprecation handle for the specified version.
:param version: The version to compare against when checking deprecation statuses.
"""
self._version = version
# Try to parse the version string as a simple tuple[int, ...] to avoid
# packaging.version import and costlier version comparisons.
self._version_tuple = self._get_version_tuple(version)
self._version_object = None
@staticmethod
def _get_version_tuple(version: str) -> tuple[int, ...] | None:
"""Return version as non-empty tuple of ints if possible, else None.
:param version: Version string to parse.
"""
try:
return tuple(int(part) for part in version.strip().split(".")) or None
except (AttributeError, ValueError):
return None
def _version_less_than(self: Self, version: str) -> bool:
"""Test whether own version is less than the given version.
:param version: Version string to compare against.
"""
if self._version_tuple and (version_tuple := self._get_version_tuple(version)):
return self._version_tuple < version_tuple
# If self._version or version could not be represented by a simple
# tuple[int, ...], do a more elaborate version parsing and comparison.
# Avoid this import otherwise to reduce import time for conda activate.
from packaging.version import parse
if self._version_object is None:
try:
self._version_object = parse(self._version) # type: ignore[arg-type]
except TypeError:
# TypeError: self._version could not be parsed
self._version_object = parse("0.0.0.dev0+placeholder")
return self._version_object < parse(version)
def __call__(
self: Self,
deprecate_in: str,
remove_in: str,
*,
addendum: str | None = None,
stack: int = 0,
deprecation_type: type[Warning] = DeprecationWarning,
) -> Callable[[Callable[P, T]], Callable[P, T]]:
"""Deprecation decorator for functions, methods, & classes.
:param deprecate_in: Version in which code will be marked as deprecated.
:param remove_in: Version in which code is expected to be removed.
:param addendum: Optional additional messaging. Useful to indicate what to do instead.
:param stack: Optional stacklevel increment.
"""
def deprecated_decorator(obj: Callable[P, T]) -> Callable[P, T]:
# detect function name and generate message
category, message = self._generate_message(
deprecate_in=deprecate_in,
remove_in=remove_in,
prefix=f"{obj.__module__}.{obj.__qualname__}",
addendum=addendum,
deprecation_type=deprecation_type,
)
# alert developer that it's time to remove something
if not category:
raise DeprecatedError(message)
# if obj is a class, wrap the __init__
isclass = False
func: Callable[P, T]
if isinstance(obj, type):
try:
func = obj.__init__ # type: ignore[misc]
except AttributeError:
# AttributeError: obj has no __init__
func = obj
else:
isclass = True
else:
func = obj
# alert user that it's time to remove something
@wraps(func) # type: ignore[reportArgumentType]
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.warn(message, category, stacklevel=2 + stack)
return func(*args, **kwargs)
if isclass:
obj.__init__ = inner # type: ignore[misc]
return obj
else:
return inner
return deprecated_decorator
def argument(
self: Self,
deprecate_in: str,
remove_in: str,
argument: str,
*,
rename: str | None = None,
addendum: str | None = None,
stack: int = 0,
deprecation_type: type[Warning] = DeprecationWarning,
) -> Callable[[Callable[P, T]], Callable[P, T]]:
"""Deprecation decorator for keyword arguments.
:param deprecate_in: Version in which code will be marked as deprecated.
:param remove_in: Version in which code is expected to be removed.
:param argument: The argument to deprecate.
:param rename: Optional new argument name.
:param addendum: Optional additional messaging. Useful to indicate what to do instead.
:param stack: Optional stacklevel increment.
"""
def deprecated_decorator(func: Callable[P, T]) -> Callable[P, T]:
# detect function name and generate message
category, message = self._generate_message(
deprecate_in=deprecate_in,
remove_in=remove_in,
prefix=f"{func.__module__}.{func.__qualname__}({argument})",
# provide a default addendum if renaming and no addendum is provided
addendum=(
f"Use '{rename}' instead." if rename and not addendum else addendum
),
deprecation_type=deprecation_type,
)
# alert developer that it's time to remove something
if not category:
raise DeprecatedError(message)
# alert user that it's time to remove something
@wraps(func)
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
# only warn about argument deprecations if the argument is used
if argument in kwargs:
warnings.warn(message, category, stacklevel=2 + stack)
# rename argument deprecations as needed
value = kwargs.pop(argument, None)
if rename:
kwargs.setdefault(rename, value)
return func(*args, **kwargs)
return inner
return deprecated_decorator
def action(
self: Self,
deprecate_in: str,
remove_in: str,
action: ActionType,
*,
addendum: str | None = None,
stack: int = 0,
deprecation_type: type[Warning] = FutureWarning,
) -> ActionType:
"""Wraps any argparse.Action to issue a deprecation warning."""
class DeprecationMixin(Action):
category: type[Warning]
help: str # override argparse.Action's help type annotation
def __init__(inner_self: Self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
category, message = self._generate_message(
deprecate_in=deprecate_in,
remove_in=remove_in,
prefix=(
# option_string are ordered shortest to longest,
# use the longest as it's the most descriptive
f"`{inner_self.option_strings[-1]}`"
if inner_self.option_strings
# if not a flag/switch, use the destination itself
else f"`{inner_self.dest}`"
),
addendum=addendum,
deprecation_type=deprecation_type,
)
# alert developer that it's time to remove something
if not category:
raise DeprecatedError(message)
inner_self.category = category
inner_self.deprecation = message
if inner_self.help is not SUPPRESS:
inner_self.help = message
def __call__(
inner_self: Self,
parser: ArgumentParser,
namespace: Namespace,
values: Any,
option_string: str | None = None,
) -> None:
# alert user that it's time to remove something
from conda.common.constants import NULL
if values is not NULL:
warnings.warn(
inner_self.deprecation,
inner_self.category,
stacklevel=7 + stack,
)
super().__call__(parser, namespace, values, option_string)
return type(action.__name__, (DeprecationMixin, action), {}) # type: ignore[return-value]
def module(
self: Self,
deprecate_in: str,
remove_in: str,
*,
addendum: str | None = None,
stack: int = 0,
) -> None:
"""Deprecation function for modules.
:param deprecate_in: Version in which code will be marked as deprecated.
:param remove_in: Version in which code is expected to be removed.
:param addendum: Optional additional messaging. Useful to indicate what to do instead.
:param stack: Optional stacklevel increment.
"""
self.topic(
deprecate_in=deprecate_in,
remove_in=remove_in,
topic=self._get_module(stack)[1],
addendum=addendum,
stack=2 + stack,
)
def constant(
self: Self,
deprecate_in: str,
remove_in: str,
constant: str,
value: Any,
*,
addendum: str | None = None,
stack: int = 0,
deprecation_type: type[Warning] = DeprecationWarning,
) -> None:
"""Deprecation function for module constant/global.
:param deprecate_in: Version in which code will be marked as deprecated.
:param remove_in: Version in which code is expected to be removed.
:param constant:
:param value:
:param addendum: Optional additional messaging. Useful to indicate what to do instead.
:param stack: Optional stacklevel increment.
"""
# detect calling module
module, fullname = self._get_module(stack)
# detect function name and generate message
category, message = self._generate_message(
deprecate_in=deprecate_in,
remove_in=remove_in,
prefix=f"{fullname}.{constant}",
addendum=addendum,
deprecation_type=deprecation_type,
)
# alert developer that it's time to remove something
if not category:
raise DeprecatedError(message)
# patch module level __getattr__ to alert user that it's time to remove something
super_getattr = getattr(module, "__getattr__", None)
def __getattr__(name: str) -> Any:
if name == constant:
warnings.warn(message, category, stacklevel=3 + stack)
return value
if super_getattr:
return super_getattr(name)
raise AttributeError(f"module '{fullname}' has no attribute '{name}'")
module.__getattr__ = __getattr__ # type: ignore[method-assign]
def topic(
self: Self,
deprecate_in: str,
remove_in: str,
*,
topic: str,
addendum: str | None = None,
stack: int = 0,
deprecation_type: type[Warning] = DeprecationWarning,
) -> None:
"""Deprecation function for a topic.
:param deprecate_in: Version in which code will be marked as deprecated.
:param remove_in: Version in which code is expected to be removed.
:param topic: The topic being deprecated.
:param addendum: Optional additional messaging. Useful to indicate what to do instead.
:param stack: Optional stacklevel increment.
"""
# detect function name and generate message
category, message = self._generate_message(
deprecate_in=deprecate_in,
remove_in=remove_in,
prefix=topic,
addendum=addendum,
deprecation_type=deprecation_type,
)
# alert developer that it's time to remove something
if not category:
raise DeprecatedError(message)
# alert user that it's time to remove something
warnings.warn(message, category, stacklevel=2 + stack)
def _get_module(self: Self, stack: int) -> tuple[ModuleType, str]:
"""Detect the module from which we are being called.
:param stack: The stacklevel increment.
:return: The module and module name.
"""
try:
frame = sys._getframe(2 + stack)
except IndexError:
# IndexError: 2 + stack is out of range
pass
else:
# Shortcut finding the module by manually inspecting loaded modules.
try:
filename = frame.f_code.co_filename
except AttributeError:
# AttributeError: frame.f_code.co_filename is undefined
pass
else:
# use a copy of sys.modules to avoid RuntimeError during iteration
# see https://github.com/conda/conda/issues/13754
for loaded in tuple(sys.modules.values()):
if not isinstance(loaded, ModuleType):
continue
if not hasattr(loaded, "__file__"):
continue
if loaded.__file__ == filename:
return (loaded, loaded.__name__)
# If above failed, do an expensive import and costly getmodule call.
import inspect
module = inspect.getmodule(frame)
if module is not None:
return (module, module.__name__)
raise DeprecatedError("unable to determine the calling module")
def _generate_message(
self: Self,
deprecate_in: str,
remove_in: str,
prefix: str,
addendum: str | None,
*,
deprecation_type: type[Warning],
) -> tuple[type[Warning] | None, str]:
"""Generate the standardized deprecation message and determine whether the
deprecation is pending, active, or past.
:param deprecate_in: Version in which code will be marked as deprecated.
:param remove_in: Version in which code is expected to be removed.
:param prefix: The message prefix, usually the function name.
:param addendum: Additional messaging. Useful to indicate what to do instead.
:param deprecation_type: The warning type to use for active deprecations.
:return: The warning category (if applicable) and the message.
"""
category: type[Warning] | None
if self._version_less_than(deprecate_in):
category = PendingDeprecationWarning
warning = f"is pending deprecation and will be removed in {remove_in}."
elif self._version_less_than(remove_in):
category = deprecation_type
warning = f"is deprecated and will be removed in {remove_in}."
else:
category = None
warning = f"was slated for removal in {remove_in}."
return (
category,
" ".join(filter(None, [prefix, warning, addendum])), # message
)
deprecated = DeprecationHandler(__version__)
|
DeprecationHandler
|
python
|
run-llama__llama_index
|
llama-index-core/tests/memory/test_chat_summary_memory_buffer.py
|
{
"start": 2014,
"end": 15430
}
|
class ____(MockLLM):
_i: int = PrivateAttr()
_responses: List[ChatMessage] = PrivateAttr()
_role_counts: dict = PrivateAttr()
def __init__(self, responses: List[ChatMessage], max_tokens: int = 512) -> None:
super().__init__(max_tokens=max_tokens)
self._i = 0 # call counter, determines which response to return
self._responses = responses # list of responses to return
self._role_counts: dict = dict.fromkeys(MessageRole, 0)
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
# Count how many messages are going to be summarized for each role
for role in MessageRole:
for message in messages:
self._role_counts[role] += message.content.count(role + ": ")
del messages
# For this mockLLM, we assume tokens are separated by spaces
max_tokens = self.max_tokens
if self.max_tokens > len(self._responses[self._i].content):
max_tokens = len(self._responses[self._i].content)
response_tokens = " ".join(
self._responses[self._i].content.split(" ")[0:max_tokens]
)
response = ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=response_tokens),
)
self._i += 1
return response
def set_max_tokens(self, max_tokens):
self.max_tokens = max_tokens
def get_role_count(self, role: MessageRole):
return self._role_counts[role]
FIRST_SUMMARY_RESPONSE = "First, the user asked what an LLM was, and the assistant explained the basic ideas."
SECOND_SUMMARY_RESPONSE = (
"The conversation started about LLMs. It then continued about LlamaIndex."
)
@pytest.fixture()
def summarizer_llm():
return MockSummarizerLLM(
responses=[
ChatMessage(
content=FIRST_SUMMARY_RESPONSE,
role=MessageRole.ASSISTANT,
),
ChatMessage(
content=SECOND_SUMMARY_RESPONSE,
role=MessageRole.ASSISTANT,
),
]
)
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_put_get(summarizer_llm) -> None:
# Given one message with fewer tokens than token_limit
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=[USER_CHAT_MESSAGE], llm=summarizer_llm
)
# When I get the chat history from the memory
history = memory.get()
# Then the history should contain the full message
assert len(history) == 1
assert history[0].content == USER_CHAT_MESSAGE.content
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_put_get_summarize_long_message(summarizer_llm) -> None:
# Given one message with more tokens than token_limit
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=[LONG_USER_CHAT_MESSAGE],
token_limit=2,
llm=summarizer_llm,
)
# When I get the chat history from the memory
history = memory.get()
# Then the history should contain the summarized message
assert len(history) == 1
assert history[0].content == FIRST_SUMMARY_RESPONSE
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_put_get_summarize_message_with_tool_call(summarizer_llm) -> None:
# Given one message with more tokens than token_limit and tool calls
# This case test 2 things:
# 1. It can summarize the ASSISTANT_TOOL_CALLING_MESSAGE with content=None (Issue #14014).
# 2. In `_handle_assistant_and_tool_messages`, when chat_history_full_text only
# contains tool calls or assistant messages, it could add them all into
# `chat_history_to_be_summarized`, without triggering the IndexError.
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=[
LONG_USER_CHAT_MESSAGE,
ASSISTANT_TOOL_CALLING_MESSAGE,
TOOL_CHAT_MESSAGE,
ASSISTANT_CHAT_MESSAGE,
],
token_limit=LONG_USER_CHAT_MESSAGE_TOKENS,
llm=summarizer_llm,
)
# When I get the chat history from the memory
history = memory.get()
# Then the history should contain the summarized message
assert len(history) == 1
assert history[0].content == FIRST_SUMMARY_RESPONSE
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_put_get_summarize_part_of_conversation(summarizer_llm) -> None:
# Given a chat history where only 2 responses fit in the token_limit
tokens_most_recent_messages = sum(
[
len(tokenizer(str(LONG_RUNNING_CONVERSATION[-i].content)))
for i in range(1, 3)
]
)
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=LONG_RUNNING_CONVERSATION.copy(),
token_limit=tokens_most_recent_messages,
llm=summarizer_llm,
)
# When I get the chat history from the memory
history = memory.get()
# Then the history should contain the full message for the latest two and
# a summary for the older messages
assert len(history) == 3
assert history[0].content == FIRST_SUMMARY_RESPONSE
assert history[0].role == MessageRole.SYSTEM
assert history[1].content == "Message 4"
assert history[2].content == "Message 5"
# When I add new messages to the history
memory.put(ChatMessage(role=MessageRole.USER, content="Message 6"))
memory.put(ChatMessage(role=MessageRole.ASSISTANT, content="Message 7"))
# Then the history should re-summarize
history = memory.get()
assert len(history) == 3
assert history[0].content == SECOND_SUMMARY_RESPONSE
assert history[0].role == MessageRole.SYSTEM
assert history[1].content == "Message 6"
assert history[2].content == "Message 7"
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_get_when_initial_tokens_less_than_limit_returns_history() -> None:
# Given some initial tokens much smaller than token_limit and message tokens
initial_tokens = 5
# Given a user message
memory = ChatSummaryMemoryBuffer.from_defaults(
token_limit=1000, chat_history=[USER_CHAT_MESSAGE]
)
# When I get the chat history from the memory
history = memory.get(initial_tokens)
# Then the history should contain the message
assert len(history) == 1
assert history[0] == USER_CHAT_MESSAGE
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_get_when_initial_tokens_exceed_limit_raises_value_error() -> None:
# Given some initial tokens exceeding token_limit
initial_tokens = 50
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=[USER_CHAT_MESSAGE],
token_limit=initial_tokens - 1,
count_initial_tokens=True,
)
# When I get the chat history from the memory
with pytest.raises(ValueError) as error:
memory.get(initial_token_count=initial_tokens)
# Then a value error should be raised
assert str(error.value) == "Initial token count exceeds token limit"
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_set() -> None:
memory = ChatSummaryMemoryBuffer.from_defaults(chat_history=[USER_CHAT_MESSAGE])
memory.put(USER_CHAT_MESSAGE)
assert len(memory.get()) == 2
memory.set([USER_CHAT_MESSAGE])
assert len(memory.get()) == 1
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_max_tokens_without_summarizer() -> None:
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=[USER_CHAT_MESSAGE], token_limit=5
)
memory.put(USER_CHAT_MESSAGE)
assert len(memory.get()) == 2
# do we limit properly
memory.put(USER_CHAT_MESSAGE)
memory.put(USER_CHAT_MESSAGE)
assert len(memory.get()) == 2
# In ChatSummaryMemoryBuffer, we overwrite the actual chat history
assert len(memory.get_all()) == 2
# does get return in the correct order?
memory.put(ChatMessage(role=MessageRole.USER, content="test message2"))
assert memory.get()[-1].content == "test message2"
assert len(memory.get()) == 2
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_max_tokens_with_summarizer(summarizer_llm) -> None:
max_tokens = 1
summarizer_llm.set_max_tokens(max_tokens)
memory = ChatSummaryMemoryBuffer.from_defaults(
llm=summarizer_llm,
chat_history=[USER_CHAT_MESSAGE],
token_limit=5,
)
# do we limit properly
memory.put(USER_CHAT_MESSAGE)
memory.put(USER_CHAT_MESSAGE)
memory_results = memory.get()
assert len(memory_results) == 3
# Oldest message is summarized
assert memory_results[0].content == " ".join(
FIRST_SUMMARY_RESPONSE.split(" ")[0:max_tokens]
)
assert memory_results[0].role == MessageRole.SYSTEM
# In ChatSummaryMemoryBuffer, we overwrite the actual chat history
assert len(memory.get_all()) == 3
# does get return in the correct order?
memory.put(ChatMessage(role=MessageRole.USER, content="test message2"))
memory_results = memory.get()
assert memory_results[-1].content == "test message2"
assert len(memory_results) == 3
# Oldest message is summarized based on the latest information
assert memory_results[0].content == " ".join(
SECOND_SUMMARY_RESPONSE.split(" ")[0:max_tokens]
)
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_assistant_never_first_message(summarizer_llm) -> None:
chat_history = [
USER_CHAT_MESSAGE,
ASSISTANT_CHAT_MESSAGE,
USER_CHAT_MESSAGE,
ASSISTANT_CHAT_MESSAGE,
]
tokens_last_3_messages = sum(
[len(tokenizer(str(chat_history[-i].content))) for i in range(1, 4)]
)
# When exactly 3 messages fit the buffer, with first being assistant
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=chat_history,
llm=summarizer_llm,
token_limit=tokens_last_3_messages,
count_initial_tokens=False,
)
memory_results = memory.get()
# the assistant message should be summarized instead of full text
assert len(memory_results) == 3
assert summarizer_llm.get_role_count(MessageRole.ASSISTANT) == 1
assert memory_results[1].role == MessageRole.USER
assert memory_results[2].role == MessageRole.ASSISTANT
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_assistant_tool_pairs(summarizer_llm) -> None:
chat_history = [
USER_CHAT_MESSAGE,
ASSISTANT_CHAT_MESSAGE,
TOOL_CHAT_MESSAGE,
USER_CHAT_MESSAGE,
ASSISTANT_CHAT_MESSAGE,
]
tokens_last_3_messages = sum(
[len(tokenizer(str(chat_history[-i].content))) for i in range(1, 4)]
)
# When exactly 3 messages fit the buffer, with first being assistant
memory = ChatSummaryMemoryBuffer.from_defaults(
chat_history=chat_history,
llm=summarizer_llm,
token_limit=tokens_last_3_messages,
count_initial_tokens=False,
)
memory_results = memory.get()
# the tool message should be summarized along with the assistant message
assert len(memory_results) == 3
assert summarizer_llm.get_role_count(MessageRole.TOOL) == 1
assert memory_results[1].role == MessageRole.USER
assert memory_results[2].role == MessageRole.ASSISTANT
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_string_save_load(summarizer_llm) -> None:
memory = ChatSummaryMemoryBuffer.from_defaults(
llm=summarizer_llm,
chat_history=[USER_CHAT_MESSAGE],
token_limit=5,
summarize_prompt="Mock summary",
count_initial_tokens=True,
)
json_str = memory.to_string()
new_memory = ChatSummaryMemoryBuffer.from_string(json_str)
assert len(new_memory.get()) == 1
assert new_memory.token_limit == 5
assert new_memory.summarize_prompt == "Mock summary"
assert new_memory.count_initial_tokens
# The user needs to set the llm manually when loading (and it needs to match the tokenizer_fn)
assert new_memory.llm is None
new_memory.llm = summarizer_llm
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_dict_save_load(summarizer_llm) -> None:
memory = ChatSummaryMemoryBuffer.from_defaults(
llm=summarizer_llm,
chat_history=[USER_CHAT_MESSAGE],
token_limit=5,
summarize_prompt="Mock summary",
count_initial_tokens=True,
)
json_dict = memory.to_dict()
new_memory = ChatSummaryMemoryBuffer.from_dict(json_dict)
assert len(new_memory.get()) == 1
assert new_memory.token_limit == 5
assert new_memory.summarize_prompt == "Mock summary"
assert new_memory.count_initial_tokens
# The user needs to set the llm manually when loading (and it needs to match the tokenizer_fn)
assert new_memory.llm is None
new_memory.llm = summarizer_llm
@pytest.mark.skipif(not openai_installed, reason="OpenAI not installed")
def test_pickle() -> None:
"""Unpickleable tiktoken tokenizer should be circumvented when pickling."""
memory = ChatSummaryMemoryBuffer.from_defaults()
bytes_ = pickle.dumps(memory)
assert isinstance(pickle.loads(bytes_), ChatSummaryMemoryBuffer)
|
MockSummarizerLLM
|
python
|
neetcode-gh__leetcode
|
python/0605-can-place-flowers.py
|
{
"start": 1119,
"end": 1478
}
|
class ____:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
# Solution with O(n) space complexity
f = [0] + flowerbed + [0]
for i in range(1, len(f) - 1): # skip first & last
if f[i - 1] == 0 and f[i] == 0 and f[i + 1] == 0:
f[i] = 1
n -= 1
return n <= 0
|
Solution3
|
python
|
walkccc__LeetCode
|
solutions/599. Minimum Index Sum of Two Lists/599.py
|
{
"start": 0,
"end": 514
}
|
class ____:
def findRestaurant(self, list1: list[str], list2: list[str]) -> list[str]:
ans = []
restaurantToIndex = {restaurant: i for i,
restaurant in enumerate(list1)}
minSum = math.inf
for i, restaurant in enumerate(list2):
if restaurant in restaurantToIndex:
summ = restaurantToIndex[restaurant] + i
if summ < minSum:
ans.clear()
if summ <= minSum:
ans.append(restaurant)
minSum = summ
return ans
|
Solution
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-lindorm/llama_index/vector_stores/lindorm/base.py
|
{
"start": 27744,
"end": 32943
}
|
class ____(BasePydanticVectorStore):
"""
Lindorm vector store.
Args:
client (LindormVectorClient): Vector index client to use.
for data insertion/querying.
Examples:
`pip install llama-index`
`pip install opensearch-py`
`pip install llama-index-vector-stores-lindorm`
```python
from llama_index.vector_stores.lindorm import (
LindormVectorStore,
LindormVectorClient,
)
# lindorm instance info
# how to obtain an lindorm search instance:
# https://alibabacloud.com/help/en/lindorm/latest/create-an-instance
# how to access your lindorm search instance:
# https://www.alibabacloud.com/help/en/lindorm/latest/view-endpoints
# run curl commands to connect to and use LindormSearch:
# https://www.alibabacloud.com/help/en/lindorm/latest/connect-and-use-the-search-engine-with-the-curl-command
host = "ld-bp******jm*******-proxy-search-pub.lindorm.aliyuncs.com"
port = 30070
username = 'your_username'
password = 'your_password'
# index to demonstrate the VectorStore impl
index_name = "lindorm_test_index"
# extension param of lindorm search, number of cluster units to query; between 1 and method.parameters.nlist.
nprobe = "a number(string type)"
# extension param of lindorm search, usually used to improve recall accuracy, but it increases performance overhead;
# between 1 and 200; default: 10.
reorder_factor = "a number(string type)"
# LindormVectorClient encapsulates logic for a single index with vector search enabled
client = LindormVectorClient(
host=host,
port=port,
username=username,
password=password,
index=index_name,
dimension=1536, # match with your embedding model
nprobe=nprobe,
reorder_factor=reorder_factor,
# filter_type="pre_filter/post_filter(default)"
)
# initialize vector store
vector_store = LindormVectorStore(client)
```
"""
stores_text: bool = True
_client: LindormVectorClient = PrivateAttr(default=None)
def __init__(
self,
client: LindormVectorClient,
) -> None:
"""Initialize params."""
super().__init__()
self._client = client
@property
def client(self) -> Any:
"""Get client."""
return self._client
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.
Args:
nodes: List[BaseNode]: list of nodes with embeddings.
Returns:
List[str]: List of node_ids
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, **add_kwargs)
)
async def async_add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Async add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings.
Returns:
List[str]: List of node_ids
"""
await self._client.index_results(nodes)
return [result.node_id for result in nodes]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using a ref_doc_id.
Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.
Args:
ref_doc_id (str): The doc_id of the document whose nodes should be deleted.
"""
asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Async delete nodes using a ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document whose nodes should be deleted.
"""
await self._client.delete_by_doc_id(ref_doc_id)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Synchronous wrapper,using asynchronous logic of async_add function in synchronous way.
Args:
query (VectorStoreQuery): Store query object.
"""
return asyncio.get_event_loop().run_until_complete(self.aquery(query, **kwargs))
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""
Async query index for top k most similar nodes.
Args:
query (VectorStoreQuery): Store query object.
"""
query_embedding = cast(List[float], query.query_embedding)
return await self._client.aquery(
query.mode,
query.query_str,
query_embedding,
query.similarity_top_k,
filters=query.filters,
)
|
LindormVectorStore
|
python
|
neetcode-gh__leetcode
|
python/1700-number-of-students-unable-to-eat-lunch.py
|
{
"start": 0,
"end": 513
}
|
class ____:
def countStudents(self, students: List[int], sandwiches: List[int]) -> int:
num_of_students_back_in_line = 0
while num_of_students_back_in_line != len(students):
curr_student = students.pop(0)
if curr_student == sandwiches[0]:
sandwiches.pop(0)
num_of_students_back_in_line = 0
else:
students.append(curr_student)
num_of_students_back_in_line += 1
return len(students)
|
Solution
|
python
|
redis__redis-py
|
tests/test_asyncio/test_multidb/test_config.py
|
{
"start": 734,
"end": 5288
}
|
class ____:
def test_default_config(self):
db_configs = [
DatabaseConfig(
client_kwargs={"host": "host1", "port": "port1"}, weight=1.0
),
DatabaseConfig(
client_kwargs={"host": "host2", "port": "port2"}, weight=0.9
),
DatabaseConfig(
client_kwargs={"host": "host3", "port": "port3"}, weight=0.8
),
]
config = MultiDbConfig(databases_config=db_configs)
assert config.databases_config == db_configs
databases = config.databases()
assert len(databases) == 3
i = 0
for db, weight in databases:
assert isinstance(db, Database)
assert weight == db_configs[i].weight
assert db.circuit.grace_period == DEFAULT_GRACE_PERIOD
assert db.client.get_retry() is not config.command_retry
i += 1
assert len(config.default_failure_detectors()) == 1
assert isinstance(
config.default_failure_detectors()[0], FailureDetectorAsyncWrapper
)
assert len(config.default_health_checks()) == 1
assert isinstance(config.default_health_checks()[0], PingHealthCheck)
assert config.health_check_interval == DEFAULT_HEALTH_CHECK_INTERVAL
assert isinstance(
config.default_failover_strategy(), WeightBasedFailoverStrategy
)
assert config.auto_fallback_interval == DEFAULT_AUTO_FALLBACK_INTERVAL
assert isinstance(config.command_retry, Retry)
def test_overridden_config(self):
grace_period = 2
mock_connection_pools = [
Mock(spec=ConnectionPool),
Mock(spec=ConnectionPool),
Mock(spec=ConnectionPool),
]
mock_connection_pools[0].connection_kwargs = {}
mock_connection_pools[1].connection_kwargs = {}
mock_connection_pools[2].connection_kwargs = {}
mock_cb1 = Mock(spec=CircuitBreaker)
mock_cb1.grace_period = grace_period
mock_cb2 = Mock(spec=CircuitBreaker)
mock_cb2.grace_period = grace_period
mock_cb3 = Mock(spec=CircuitBreaker)
mock_cb3.grace_period = grace_period
mock_failure_detectors = [
Mock(spec=AsyncFailureDetector),
Mock(spec=AsyncFailureDetector),
]
mock_health_checks = [Mock(spec=HealthCheck), Mock(spec=HealthCheck)]
health_check_interval = 10
mock_failover_strategy = Mock(spec=AsyncFailoverStrategy)
auto_fallback_interval = 10
db_configs = [
DatabaseConfig(
client_kwargs={"connection_pool": mock_connection_pools[0]},
weight=1.0,
circuit=mock_cb1,
),
DatabaseConfig(
client_kwargs={"connection_pool": mock_connection_pools[1]},
weight=0.9,
circuit=mock_cb2,
),
DatabaseConfig(
client_kwargs={"connection_pool": mock_connection_pools[2]},
weight=0.8,
circuit=mock_cb3,
),
]
config = MultiDbConfig(
databases_config=db_configs,
failure_detectors=mock_failure_detectors,
health_checks=mock_health_checks,
health_check_interval=health_check_interval,
failover_strategy=mock_failover_strategy,
auto_fallback_interval=auto_fallback_interval,
)
assert config.databases_config == db_configs
databases = config.databases()
assert len(databases) == 3
i = 0
for db, weight in databases:
assert isinstance(db, Database)
assert weight == db_configs[i].weight
assert db.client.connection_pool == mock_connection_pools[i]
assert db.circuit.grace_period == grace_period
i += 1
assert len(config.failure_detectors) == 2
assert config.failure_detectors[0] == mock_failure_detectors[0]
assert config.failure_detectors[1] == mock_failure_detectors[1]
assert len(config.health_checks) == 2
assert config.health_checks[0] == mock_health_checks[0]
assert config.health_checks[1] == mock_health_checks[1]
assert config.health_check_interval == health_check_interval
assert config.failover_strategy == mock_failover_strategy
assert config.auto_fallback_interval == auto_fallback_interval
@pytest.mark.onlynoncluster
|
TestMultiDbConfig
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/generator1.py
|
{
"start": 336,
"end": 416
}
|
class ____:
def shouldContinue(self):
global s
return s
|
ClassB
|
python
|
cython__cython
|
Cython/Compiler/Code.py
|
{
"start": 36538,
"end": 47062
}
|
class ____:
# return_label string function return point label
# error_label string error catch point label
# error_without_exception boolean Can go to the error label without an exception (e.g. __next__ can return NULL)
# continue_label string loop continue point label
# break_label string loop break point label
# return_from_error_cleanup_label string
# label_counter integer counter for naming labels
# exc_vars (string * 3) exception variables for reraise, or None
# can_trace boolean line tracing is supported in the current context
# scope Scope the scope object of the current function
# Not used for now, perhaps later
def __init__(self, owner, names_taken=set(), scope=None):
self.names_taken = names_taken
self.owner = owner
self.scope = scope
self.error_label = None
self.label_counter = 0
self.labels_used = set()
self.return_label = self.new_label()
self.new_error_label()
self.continue_label = None
self.break_label = None
self.yield_labels = []
self.exc_vars = None
self.current_except = None
self.can_trace = False
self.gil_owned = True
self.temps_allocated = [] # of (name, type, manage_ref, static)
self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status
self.temps_used_type = {} # name -> (type, manage_ref)
self.zombie_temps = set() # temps that must not be reused after release
self.temp_counter = 0
self.closure_temps = None
# This is used to collect temporaries, useful to find out which temps
# need to be privatized in parallel sections
self.collect_temps_stack = []
# This is used for the error indicator, which needs to be local to the
# function. It used to be global, which relies on the GIL being held.
# However, exceptions may need to be propagated through 'nogil'
# sections, in which case we introduce a race condition.
self.should_declare_error_indicator = False
self.uses_error_indicator = False
self.error_without_exception = False
self.needs_refnanny = False
# safety checks
def validate_exit(self):
# validate that all allocated temps have been freed
if self.temps_allocated:
leftovers = self.temps_in_use()
if leftovers:
msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([
'%s [%s]' % (name, ctype)
for name, ctype, is_pytemp in sorted(leftovers)]),
)
#print(msg)
raise RuntimeError(msg)
# labels
def new_label(self, name=None):
n: cython.size_t = self.label_counter
self.label_counter = n + 1
label = "%s%d" % (Naming.label_prefix, n)
if name is not None:
label += '_' + name
return label
def new_yield_label(self, expr_type='yield'):
label = self.new_label('resume_from_%s' % expr_type)
num_and_label = (len(self.yield_labels) + 1, label)
self.yield_labels.append(num_and_label)
return num_and_label
def new_error_label(self, prefix=""):
old_err_lbl = self.error_label
self.error_label = self.new_label(prefix + 'error')
return old_err_lbl
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
def new_loop_labels(self, prefix=""):
old_labels = self.get_loop_labels()
self.set_loop_labels(
(self.new_label(prefix + "continue"),
self.new_label(prefix + "break")))
return old_labels
def get_all_labels(self):
return (
self.continue_label,
self.break_label,
self.return_label,
self.error_label)
def set_all_labels(self, labels):
(self.continue_label,
self.break_label,
self.return_label,
self.error_label) = labels
def all_new_labels(self):
old_labels = self.get_all_labels()
new_labels = []
for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']):
if old_label:
new_labels.append(self.new_label(name))
else:
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
def use_label(self, lbl):
self.labels_used.add(lbl)
def label_used(self, lbl):
return lbl in self.labels_used
# temp handling
def allocate_temp(self, type, manage_ref, static=False, reusable=True):
"""
Allocates a temporary (which may create a new one or get a previously
allocated and released one of the same type). Type is simply registered
and handed back, but will usually be a PyrexType.
If type.needs_refcounting, manage_ref comes into play. If manage_ref is set to
True, the temp will be decref-ed on return statements and in exception
handling clauses. Otherwise the caller has to deal with any reference
counting of the variable.
If not type.needs_refcounting, then manage_ref will be ignored, but it
still has to be passed. It is recommended to pass False by convention
if it is known that type will never be a reference counted type.
static=True marks the temporary declaration with "static".
This is only used when allocating backing store for a module-level
C array literals.
if reusable=False, the temp will not be reused after release.
A C string referring to the variable is returned.
"""
if type.is_cv_qualified and not type.is_reference:
type = type.cv_base_type
elif type.is_reference and not type.is_fake_reference:
type = type.ref_base_type
elif type.is_cfunction:
from . import PyrexTypes
type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value
elif type.is_cpp_class and not type.is_fake_reference and self.scope.directives['cpp_locals']:
self.scope.use_utility_code(UtilityCode.load_cached("OptionalLocals", "CppSupport.cpp"))
if not type.needs_refcounting:
# Make manage_ref canonical, so that manage_ref will always mean
# a decref is needed.
manage_ref = False
freelist = self.temps_free.get((type, manage_ref))
if reusable and freelist is not None and freelist[0]:
result = freelist[0].pop()
freelist[1].remove(result)
else:
while True:
self.temp_counter += 1
result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter)
if result not in self.names_taken: break
self.temps_allocated.append((result, type, manage_ref, static))
if not reusable:
self.zombie_temps.add(result)
self.temps_used_type[result] = (type, manage_ref)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie"))
if self.collect_temps_stack:
self.collect_temps_stack[-1].add((result, type))
return result
def release_temp(self, name):
"""
Releases a temporary so that it can be reused by other code needing
a temp of the same type.
"""
type, manage_ref = self.temps_used_type[name]
freelist = self.temps_free.get((type, manage_ref))
if freelist is None:
freelist = ([], set()) # keep order in list and make lookups in set fast
self.temps_free[(type, manage_ref)] = freelist
if name in freelist[1]:
raise RuntimeError("Temp %s freed twice!" % name)
if name not in self.zombie_temps:
freelist[0].append(name)
freelist[1].add(name)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s released %s*/" % (
name, " - zombie" if name in self.zombie_temps else ""))
def temps_in_use(self):
"""Return a list of (cname,type,manage_ref) tuples of temp names and their type
that are currently in use.
"""
used = []
for name, type, manage_ref, static in self.temps_allocated:
freelist = self.temps_free.get((type, manage_ref))
if freelist is None or name not in freelist[1]:
used.append((name, type, manage_ref and type.needs_refcounting))
return used
def temps_holding_reference(self):
"""Return a list of (cname,type) tuples of temp names and their type
that are currently in use. This includes only temps
with a reference counted type which owns its reference.
"""
return [(name, type)
for name, type, manage_ref in self.temps_in_use()
if manage_ref and type.needs_refcounting]
def all_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python objects.
"""
return [(cname, type)
for cname, type, manage_ref, static in self.temps_allocated
if manage_ref]
def all_free_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python
objects that are not currently in use. This is used by
try-except and try-finally blocks to clean up temps in the
error case.
"""
return sorted([ # Enforce deterministic order.
(cname, type)
for (type, manage_ref), freelist in self.temps_free.items() if manage_ref
for cname in freelist[0]
])
def start_collecting_temps(self):
"""
Useful to find out which temps were used in a code block
"""
self.collect_temps_stack.append(set())
def stop_collecting_temps(self):
return self.collect_temps_stack.pop()
def init_closure_temps(self, scope):
self.closure_temps = ClosureTempAllocator(scope)
|
FunctionState
|
python
|
vyperlang__vyper
|
vyper/ast/nodes.py
|
{
"start": 24803,
"end": 25210
}
|
class ____(Constant):
# inherited class for all numeric constant node types
__slots__ = ()
def validate(self):
if self.value < SizeLimits.MIN_INT256:
raise OverflowException("Value is below lower bound for all numeric types", self)
if self.value > SizeLimits.MAX_UINT256:
raise OverflowException("Value exceeds upper bound for all numeric types", self)
|
Num
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/dataflow.py
|
{
"start": 9276,
"end": 23468
}
|
class ____(DataflowJobTerminalStateHelper):
"""
Interface for communication with Google Cloud Dataflow API.
Does not use Apache Beam API.
:param dataflow: Discovery resource
:param project_number: The Google Cloud Project ID.
:param location: Job location.
:param poll_sleep: The status refresh rate for pending operations.
:param name: The Job ID prefix used when the multiple_jobs option is passed is set to True.
:param job_id: ID of a single job.
:param num_retries: Maximum number of retries in case of connection problems.
:param multiple_jobs: If set to true this task will be searched by name prefix (``name`` parameter),
not by specific job ID, then actions will be performed on all matching jobs.
:param drain_pipeline: Optional, set to True if we want to stop streaming job by draining it
instead of canceling.
:param cancel_timeout: wait time in seconds for successful job canceling
:param wait_until_finished: If True, wait for the end of pipeline execution before exiting. If False,
it only submits job and check once is job not in terminal state.
The default behavior depends on the type of pipeline:
* for the streaming pipeline, wait for jobs to be in JOB_STATE_RUNNING,
* for the batch pipeline, wait for the jobs to be in JOB_STATE_DONE.
"""
def __init__(
self,
dataflow: Any,
project_number: str,
location: str,
poll_sleep: int = 10,
name: str | None = None,
job_id: str | None = None,
num_retries: int = 0,
multiple_jobs: bool = False,
drain_pipeline: bool = False,
cancel_timeout: int | None = 5 * 60,
wait_until_finished: bool | None = None,
expected_terminal_state: str | None = None,
) -> None:
super().__init__()
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_location = location
self._multiple_jobs = multiple_jobs
self._job_id = job_id
self._num_retries = num_retries
self._poll_sleep = poll_sleep
self._cancel_timeout = cancel_timeout
self._jobs: list[dict] | None = None
self.drain_pipeline = drain_pipeline
self._wait_until_finished = wait_until_finished
self._expected_terminal_state = expected_terminal_state
def is_job_running(self) -> bool:
"""
Check if job is still running in dataflow.
:return: True if job is running.
"""
self._refresh_jobs()
if not self._jobs:
return False
return any(job["currentState"] not in DataflowJobStatus.TERMINAL_STATES for job in self._jobs)
def _get_current_jobs(self) -> list[dict]:
"""
Get list of jobs that start with job name or id.
:return: list of jobs including id's
"""
if not self._multiple_jobs and self._job_id:
return [self.fetch_job_by_id(self._job_id)]
if self._jobs:
return [self.fetch_job_by_id(job["id"]) for job in self._jobs]
if self._job_name:
jobs = self._fetch_jobs_by_prefix_name(self._job_name.lower())
if len(jobs) == 1:
self._job_id = jobs[0]["id"]
return jobs
raise ValueError("Missing both dataflow job ID and name.")
def fetch_job_by_id(self, job_id: str) -> dict[str, str]:
"""
Fetch the job with the specified Job ID.
:param job_id: ID of the job that needs to be fetched.
:return: Dictionary containing the Job's data
"""
return (
self._dataflow.projects()
.locations()
.jobs()
.get(
projectId=self._project_number,
location=self._job_location,
jobId=job_id,
)
.execute(num_retries=self._num_retries)
)
def fetch_job_metrics_by_id(self, job_id: str) -> dict:
"""
Fetch the job metrics with the specified Job ID.
:param job_id: Job ID to get.
:return: the JobMetrics. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/JobMetrics
"""
result = (
self._dataflow.projects()
.locations()
.jobs()
.getMetrics(projectId=self._project_number, location=self._job_location, jobId=job_id)
.execute(num_retries=self._num_retries)
)
self.log.debug("fetch_job_metrics_by_id %s:\n%s", job_id, result)
return result
def _fetch_list_job_messages_responses(self, job_id: str) -> Generator[dict, None, None]:
"""
Fetch ListJobMessagesResponse with the specified Job ID.
:param job_id: Job ID to get.
:return: yields the ListJobMessagesResponse. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse
"""
request = (
self._dataflow.projects()
.locations()
.jobs()
.messages()
.list(projectId=self._project_number, location=self._job_location, jobId=job_id)
)
while request is not None:
response = request.execute(num_retries=self._num_retries)
yield response
request = (
self._dataflow.projects()
.locations()
.jobs()
.messages()
.list_next(previous_request=request, previous_response=response)
)
def fetch_job_messages_by_id(self, job_id: str) -> list[dict]:
"""
Fetch the job messages with the specified Job ID.
:param job_id: Job ID to get.
:return: the list of JobMessages. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#JobMessage
"""
messages: list[dict] = []
for response in self._fetch_list_job_messages_responses(job_id=job_id):
messages.extend(response.get("jobMessages", []))
return messages
def fetch_job_autoscaling_events_by_id(self, job_id: str) -> list[dict]:
"""
Fetch the job autoscaling events with the specified Job ID.
:param job_id: Job ID to get.
:return: the list of AutoscalingEvents. See:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/ListJobMessagesResponse#autoscalingevent
"""
autoscaling_events: list[dict] = []
for response in self._fetch_list_job_messages_responses(job_id=job_id):
autoscaling_events.extend(response.get("autoscalingEvents", []))
return autoscaling_events
def _fetch_all_jobs(self) -> list[dict]:
request = (
self._dataflow.projects()
.locations()
.jobs()
.list(projectId=self._project_number, location=self._job_location)
)
all_jobs: list[dict] = []
while request is not None:
response = request.execute(num_retries=self._num_retries)
jobs = response.get("jobs")
if jobs is None:
break
all_jobs.extend(jobs)
request = (
self._dataflow.projects()
.locations()
.jobs()
.list_next(previous_request=request, previous_response=response)
)
return all_jobs
def _fetch_jobs_by_prefix_name(self, prefix_name: str) -> list[dict]:
jobs = self._fetch_all_jobs()
jobs = [job for job in jobs if job["name"].startswith(prefix_name)]
return jobs
def _refresh_jobs(self) -> None:
"""
Get all jobs by name.
:return: jobs
"""
self._jobs = self._get_current_jobs()
if self._jobs:
for job in self._jobs:
self.log.info(
"Google Cloud DataFlow job %s is state: %s",
job["name"],
job["currentState"],
)
else:
self.log.info("Google Cloud DataFlow job not available yet..")
def _check_dataflow_job_state(self, job) -> bool:
"""
Check the state of one job in dataflow for this task if job failed raise exception.
:return: True if job is done.
:raise: Exception
"""
current_state = job["currentState"]
is_streaming = job.get("type") == DataflowJobType.JOB_TYPE_STREAMING
current_expected_state = self._expected_terminal_state
if current_expected_state is None:
if is_streaming:
current_expected_state = DataflowJobStatus.JOB_STATE_RUNNING
else:
current_expected_state = DataflowJobStatus.JOB_STATE_DONE
terminal_states = DataflowJobStatus.TERMINAL_STATES | {DataflowJobStatus.JOB_STATE_RUNNING}
if current_expected_state not in terminal_states:
raise AirflowException(
f"Google Cloud Dataflow job's expected terminal state "
f"'{current_expected_state}' is invalid."
f" The value should be any of the following: {terminal_states}"
)
if is_streaming and current_expected_state == DataflowJobStatus.JOB_STATE_DONE:
raise AirflowException(
"Google Cloud Dataflow job's expected terminal state cannot be "
"JOB_STATE_DONE while it is a streaming job"
)
if not is_streaming and current_expected_state == DataflowJobStatus.JOB_STATE_DRAINED:
raise AirflowException(
"Google Cloud Dataflow job's expected terminal state cannot be "
"JOB_STATE_DRAINED while it is a batch job"
)
if current_state == current_expected_state:
if current_expected_state == DataflowJobStatus.JOB_STATE_RUNNING:
return not self._wait_until_finished
return True
if current_state in DataflowJobStatus.AWAITING_STATES:
return self._wait_until_finished is False
self.log.debug("Current job: %s", job)
raise AirflowException(
f"Google Cloud Dataflow job {job['name']} is in an unexpected terminal state: {current_state}, "
f"expected terminal state: {current_expected_state}"
)
def wait_for_done(self) -> None:
"""Wait for result of submitted job."""
self.log.info("Start waiting for done.")
self._refresh_jobs()
while self._jobs and not all(
self.job_reached_terminal_state(job, self._wait_until_finished, self._expected_terminal_state)
for job in self._jobs
):
self.log.info("Waiting for done. Sleep %s s", self._poll_sleep)
time.sleep(self._poll_sleep)
self._refresh_jobs()
def get_jobs(self, refresh: bool = False) -> list[dict]:
"""
Return Dataflow jobs.
:param refresh: Forces the latest data to be fetched.
:return: list of jobs
"""
if not self._jobs or refresh:
self._refresh_jobs()
if not self._jobs:
raise ValueError("Could not read _jobs")
return self._jobs
def _wait_for_states(self, expected_states: set[str]):
"""Wait for the jobs to reach a certain state."""
if not self._jobs:
raise ValueError("The _jobs should be set")
while True:
self._refresh_jobs()
job_states = {job["currentState"] for job in self._jobs}
if not job_states.difference(expected_states):
return
unexpected_failed_end_states = DataflowJobStatus.FAILED_END_STATES - expected_states
if unexpected_failed_end_states.intersection(job_states):
unexpected_failed_jobs = [
job for job in self._jobs if job["currentState"] in unexpected_failed_end_states
]
raise AirflowException(
"Jobs failed: "
+ ", ".join(
f"ID: {job['id']} name: {job['name']} state: {job['currentState']}"
for job in unexpected_failed_jobs
)
)
time.sleep(self._poll_sleep)
def cancel(self) -> None:
"""Cancel or drains current job."""
self._jobs = [
job for job in self.get_jobs() if job["currentState"] not in DataflowJobStatus.TERMINAL_STATES
]
job_ids = [job["id"] for job in self._jobs]
if job_ids:
self.log.info("Canceling jobs: %s", ", ".join(job_ids))
for job in self._jobs:
requested_state = (
DataflowJobStatus.JOB_STATE_DRAINED
if self.drain_pipeline and job["type"] == DataflowJobType.JOB_TYPE_STREAMING
else DataflowJobStatus.JOB_STATE_CANCELLED
)
request = (
self._dataflow.projects()
.locations()
.jobs()
.update(
projectId=self._project_number,
location=self._job_location,
jobId=job["id"],
body={"requestedState": requested_state},
)
)
request.execute(num_retries=self._num_retries)
if self._cancel_timeout and isinstance(self._cancel_timeout, int):
timeout_error_message = (
f"Canceling jobs failed due to timeout ({self._cancel_timeout}s): {', '.join(job_ids)}"
)
tm = timeout(seconds=self._cancel_timeout, error_message=timeout_error_message)
with tm:
self._wait_for_states(
{DataflowJobStatus.JOB_STATE_CANCELLED, DataflowJobStatus.JOB_STATE_DRAINED}
)
else:
self.log.info("No jobs to cancel")
|
_DataflowJobsController
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclass12.py
|
{
"start": 305,
"end": 375
}
|
class ____(Base):
x: ClassVar[int] = 1
z: int
@dataclass
|
Special
|
python
|
ray-project__ray
|
doc/source/serve/doc_code/model_composition/streaming_example.py
|
{
"start": 231,
"end": 384
}
|
class ____:
def __call__(self, limit: int) -> Generator[int, None, None]:
for i in range(limit):
yield i
@serve.deployment
|
Streamer
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/_response.py
|
{
"start": 10601,
"end": 13963
}
|
class ____(BaseAPIResponse[R]):
@property
def request_id(self) -> str | None:
return self.http_response.headers.get("request-id") # type: ignore[no-any-return]
@overload
def parse(self, *, to: type[_T]) -> _T: ...
@overload
def parse(self) -> R: ...
def parse(self, *, to: type[_T] | None = None) -> R | _T:
"""Returns the rich python representation of this response's data.
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
You can customise the type that the response is parsed into through
the `to` argument, e.g.
```py
from anthropic import BaseModel
class MyModel(BaseModel):
foo: str
obj = response.parse(to=MyModel)
print(obj.foo)
```
We support parsing:
- `BaseModel`
- `dict`
- `list`
- `Union`
- `str`
- `int`
- `float`
- `httpx.Response`
"""
cache_key = to if to is not None else self._cast_to
cached = self._parsed_by_type.get(cache_key)
if cached is not None:
return cached # type: ignore[no-any-return]
if not self._is_sse_stream:
self.read()
parsed = self._parse(to=to)
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)
if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)
self._parsed_by_type[cache_key] = parsed
return cast(R, parsed)
def read(self) -> bytes:
"""Read and return the binary response content."""
try:
return self.http_response.read()
except httpx.StreamConsumed as exc:
# The default error raised by httpx isn't very
# helpful in our case so we re-raise it with
# a different error message.
raise StreamAlreadyConsumed() from exc
def text(self) -> str:
"""Read and decode the response content into a string."""
self.read()
return self.http_response.text
def json(self) -> object:
"""Read and decode the JSON response content."""
self.read()
return self.http_response.json()
def close(self) -> None:
"""Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
self.http_response.close()
def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
"""
A byte-iterator over the decoded response content.
This automatically handles gzip, deflate and brotli encoded responses.
"""
for chunk in self.http_response.iter_bytes(chunk_size):
yield chunk
def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
"""A str-iterator over the decoded response content
that handles both gzip, deflate, etc but also detects the content's
string encoding.
"""
for chunk in self.http_response.iter_text(chunk_size):
yield chunk
def iter_lines(self) -> Iterator[str]:
"""Like `iter_text()` but will only yield chunks for each line"""
for chunk in self.http_response.iter_lines():
yield chunk
|
APIResponse
|
python
|
scikit-learn__scikit-learn
|
sklearn/cluster/_feature_agglomeration.py
|
{
"start": 465,
"end": 2439
}
|
class ____(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
An M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X_original : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `X` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
AgglomerationTransform
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/events.py
|
{
"start": 237,
"end": 2423
}
|
class ____:
__slots__ = 'start_mark', 'end_mark', 'comment'
def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
# type: (Any, Any, Any) -> None
self.start_mark = start_mark
self.end_mark = end_mark
# assert comment is not CommentCheck
if comment is CommentCheck:
comment = None
self.comment = comment
def __repr__(self):
# type: () -> Any
if True:
arguments = []
if hasattr(self, 'value'):
# if you use repr(getattr(self, 'value')) then flake8 complains about
# abuse of getattr with a constant. When you change to self.value
# then mypy throws an error
arguments.append(repr(self.value)) # type: ignore
for key in ['anchor', 'tag', 'implicit', 'flow_style', 'style']:
v = getattr(self, key, None)
if v is not None:
arguments.append(_F('{key!s}={v!r}', key=key, v=v))
if self.comment not in [None, CommentCheck]:
arguments.append('comment={!r}'.format(self.comment))
if SHOW_LINES:
arguments.append(
'({}:{}/{}:{})'.format(
self.start_mark.line,
self.start_mark.column,
self.end_mark.line,
self.end_mark.column,
)
)
arguments = ', '.join(arguments) # type: ignore
else:
attributes = [
key
for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
if hasattr(self, key)
]
arguments = ', '.join(
[_F('{k!s}={attr!r}', k=key, attr=getattr(self, key)) for key in attributes]
)
if self.comment not in [None, CommentCheck]:
arguments += ', comment={!r}'.format(self.comment)
return _F(
'{self_class_name!s}({arguments!s})',
self_class_name=self.__class__.__name__,
arguments=arguments,
)
|
Event
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/lists.py
|
{
"start": 42160,
"end": 43407
}
|
class ____(BaseListVariable):
def python_type(self) -> type[tuple]: # type: ignore[type-arg]
return tuple
def __repr__(self) -> str:
return f"{self.__class__.__name__}(length={len(self.items)})"
def debug_repr(self) -> str:
return self.debug_repr_helper("(", ")")
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.foreach(self.items)
codegen.append_output(create_build_tuple(len(self.items)))
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if name == "__class__":
source = AttrSource(self.source, name) if self.source else None
class_type = self.python_type()
if class_type is tuple:
return variables.BuiltinVariable(class_type, source=source)
else:
return variables.UserDefinedClassVariable(class_type, source=source)
return super().var_getattr(tx, name)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
if self.python_type() is not tuple:
return super().call_obj_hasattr(tx, name)
return variables.ConstantVariable.create(hasattr((), name))
|
TupleVariable
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-census/dagster_census/types.py
|
{
"start": 73,
"end": 698
}
|
class ____(
NamedTuple(
"_CensusOutput",
[
("sync_run", Mapping[str, Any]),
("source", Mapping[str, Any]),
("destination", Mapping[str, Any]),
],
)
):
"""Contains recorded information about the state of a Census sync after a sync completes.
Args:
sync_run (Dict[str, Any]):
The details of the specific sync run.
source (Dict[str, Any]):
Information about the source for the Census sync.
destination (Dict[str, Any]):
Information about the destination for the Census sync.
"""
|
CensusOutput
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 12577,
"end": 12654
}
|
class ____(PythonBaseNode):
type = 'decorator'
__slots__ = ()
|
Decorator
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_differ.py
|
{
"start": 2217,
"end": 2366
}
|
class ____(Property):
@property
def close(self):
pass
@close.setter
def close(self, attr):
return attr
|
PropertySetter
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 19084,
"end": 19303
}
|
class ____(BaseModel):
max_message_queue_size: int = Field(..., description="")
tick_period_ms: int = Field(..., description="")
bootstrap_timeout_sec: int = Field(..., description="")
|
ConsensusConfigTelemetry
|
python
|
apache__airflow
|
airflow-core/src/airflow/triggers/testing.py
|
{
"start": 1224,
"end": 1707
}
|
class ____(BaseTrigger):
"""
A trigger that always errors immediately.
Should only be used for testing.
"""
def serialize(self) -> tuple[str, dict[str, Any]]:
return ("airflow.triggers.testing.FailureTrigger", {})
async def run(self):
# Python needs at least one "yield" keyword in the body to make
# this an async generator.
if False:
yield None
raise ValueError("Deliberate trigger failure")
|
FailureTrigger
|
python
|
apache__airflow
|
airflow-core/tests/unit/core/test_impersonation_tests.py
|
{
"start": 7202,
"end": 8181
}
|
class ____(BaseImpersonationTest):
@classmethod
def setup_class(cls):
cls.dagbag = cls.get_dagbag(TEST_DAG_FOLDER)
def test_impersonation(self):
"""
Tests that impersonating a unix user works
"""
self.run_dag("test_impersonation", "test_impersonated_user")
def test_no_impersonation(self):
"""
If default_impersonation=None, tests that the job is run
as the current user (which will be a sudoer)
"""
self.run_dag(
"test_no_impersonation",
"test_superuser",
)
def test_default_impersonation(self, monkeypatch):
"""
If default_impersonation=TEST_USER, tests that the job defaults
to running as TEST_USER for a test without 'run_as_user' set.
"""
monkeypatch.setenv("AIRFLOW__CORE__DEFAULT_IMPERSONATION", TEST_USER)
self.run_dag("test_default_impersonation", "test_deelevated_user")
|
TestImpersonation
|
python
|
keras-team__keras
|
keras/src/ops/nn.py
|
{
"start": 19417,
"end": 20295
}
|
class ____(Operation):
def call(self, x):
return backend.nn.hard_tanh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.hard_tanh", "keras.ops.nn.hard_tanh"])
def hard_tanh(x):
"""Applies the HardTanh function element-wise.
It is defined as:
`f(x) = -1 for x < -1`, `f(x) = x for -1 <= x <= 1`, `f(x) = 1 for x > 1`.
Args:
x: Input tensor.
Returns:
Output tensor of same shape as `x`
where values are clamped between -1 and 1.
Example:
>>> x = np.array([-2., -1., 0., 1., 2.])
>>> x_hard_tanh = keras.ops.hard_tanh(x)
>>> print(x_hard_tanh)
array([-1. -1. 0. 1. 1.], shape=(5,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return HardTanh().symbolic_call(x)
return backend.nn.hard_tanh(x)
|
HardTanh
|
python
|
django-crispy-forms__django-crispy-forms
|
tests/forms.py
|
{
"start": 2682,
"end": 2812
}
|
class ____(models.Model):
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
|
CrispyTestModel
|
python
|
PyCQA__pylint
|
tests/functional/r/regression_02/regression_5408.py
|
{
"start": 642,
"end": 727
}
|
class ____(_Child):
def patch(cls):
MyClass.sub_class.inner_class = cls
|
Child
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_processing.py
|
{
"start": 3580,
"end": 15256
}
|
class ____:
def setup_method(self):
self.processing_config_kwargs = dict(
task_id="test_sagemaker_operator",
wait_for_completion=False,
check_interval=5,
)
self.defer_processing_config_kwargs = dict(
task_id="test_sagemaker_operator", wait_for_completion=True, check_interval=5, deferrable=True
)
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=0)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(sagemaker, "serialize", return_value="")
def test_integer_fields_without_stopping_condition(self, _, __, ___, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS
)
sagemaker.execute(None)
assert sagemaker.integer_fields == EXPECTED_INTEGER_FIELDS
for key1, key2, key3 in EXPECTED_INTEGER_FIELDS:
assert sagemaker.config[key1][key2][key3] == int(sagemaker.config[key1][key2][key3])
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=0)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(sagemaker, "serialize", return_value="")
def test_integer_fields_with_stopping_condition(self, _, __, ___, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS_WITH_STOPPING_CONDITION
)
sagemaker.execute(None)
assert (
sagemaker.integer_fields == EXPECTED_INTEGER_FIELDS + EXPECTED_STOPPING_CONDITION_INTEGER_FIELDS
)
for key1, key2, *key3_raw in EXPECTED_INTEGER_FIELDS:
if key3_raw:
(key3,) = key3_raw
assert sagemaker.config[key1][key2][key3] == int(sagemaker.config[key1][key2][key3])
else:
sagemaker.config[key1][key2] == int(sagemaker.config[key1][key2])
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=0)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(sagemaker, "serialize", return_value="")
def test_execute(self, _, mock_processing, __, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS
)
sagemaker.execute(None)
mock_processing.assert_called_once_with(
CREATE_PROCESSING_PARAMS, wait_for_completion=False, check_interval=5, max_ingestion_time=None
)
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=0)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(sagemaker, "serialize", return_value="")
def test_execute_with_stopping_condition(self, _, mock_processing, __, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS_WITH_STOPPING_CONDITION
)
sagemaker.execute(None)
mock_processing.assert_called_once_with(
CREATE_PROCESSING_PARAMS_WITH_STOPPING_CONDITION,
wait_for_completion=False,
check_interval=5,
max_ingestion_time=None,
)
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 404}},
)
def test_execute_with_failure(self, _, mock_desc):
mock_desc.side_effect = [ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS
)
with pytest.raises(AirflowException):
sagemaker.execute(None)
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=1)
@mock.patch.object(
SageMakerHook, "create_processing_job", return_value={"ResponseMetadata": {"HTTPStatusCode": 200}}
)
def test_execute_with_existing_job_timestamp(self, mock_create_processing_job, _, mock_desc):
mock_desc.side_effect = [None, ClientError({"Error": {"Code": "ValidationException"}}, "op"), None]
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS
)
sagemaker.action_if_job_exists = "timestamp"
sagemaker.execute(None)
expected_config = CREATE_PROCESSING_PARAMS.copy()
# Expect to see ProcessingJobName suffixed because we return one existing job
expected_config["ProcessingJobName"].startswith("job_name-")
mock_create_processing_job.assert_called_once_with(
expected_config,
wait_for_completion=False,
check_interval=5,
max_ingestion_time=None,
)
@mock.patch.object(SageMakerHook, "describe_processing_job")
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=1)
@mock.patch.object(
SageMakerHook, "create_processing_job", return_value={"ResponseMetadata": {"HTTPStatusCode": 200}}
)
def test_execute_with_existing_job_fail(self, _, __, ___):
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs, config=CREATE_PROCESSING_PARAMS
)
sagemaker.action_if_job_exists = "fail"
with pytest.raises(AirflowException):
sagemaker.execute(None)
@mock.patch.object(SageMakerHook, "describe_processing_job")
def test_action_if_job_exists_validation(self, mock_client):
with pytest.raises(AirflowException):
SageMakerProcessingOperator(
**self.processing_config_kwargs,
config=CREATE_PROCESSING_PARAMS,
action_if_job_exists="not_fail_or_increment",
)
@mock.patch.object(
SageMakerHook, "describe_processing_job", return_value={"ProcessingJobStatus": "InProgress"}
)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={
"ProcessingJobArn": "test_arn",
"ResponseMetadata": {"HTTPStatusCode": 200},
},
)
@mock.patch.object(SageMakerBaseOperator, "_check_if_job_exists", return_value=False)
def test_operator_defer(self, mock_job_exists, mock_processing, mock_describe):
sagemaker_operator = SageMakerProcessingOperator(
**self.defer_processing_config_kwargs,
config=CREATE_PROCESSING_PARAMS,
)
sagemaker_operator.wait_for_completion = True
with pytest.raises(TaskDeferred) as exc:
sagemaker_operator.execute(context=None)
assert isinstance(exc.value.trigger, SageMakerTrigger), "Trigger is not a SagemakerTrigger"
@mock.patch("airflow.providers.amazon.aws.operators.sagemaker.SageMakerProcessingOperator.defer")
@mock.patch.object(
SageMakerHook, "describe_processing_job", return_value={"ProcessingJobStatus": "Completed"}
)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(SageMakerBaseOperator, "_check_if_job_exists", return_value=False)
def test_operator_complete_before_defer(
self, mock_job_exists, mock_processing, mock_describe, mock_defer
):
sagemaker_operator = SageMakerProcessingOperator(
**self.defer_processing_config_kwargs,
config=CREATE_PROCESSING_PARAMS,
)
sagemaker_operator.execute(context=None)
assert not mock_defer.called
@mock.patch("airflow.providers.amazon.aws.operators.sagemaker.SageMakerProcessingOperator.defer")
@mock.patch.object(
SageMakerHook,
"describe_processing_job",
return_value={"ProcessingJobStatus": "Failed", "FailureReason": "It failed"},
)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(SageMakerBaseOperator, "_check_if_job_exists", return_value=False)
def test_operator_failed_before_defer(
self,
mock_job_exists,
mock_processing,
mock_describe,
mock_defer,
):
sagemaker_operator = SageMakerProcessingOperator(
**self.defer_processing_config_kwargs,
config=CREATE_PROCESSING_PARAMS,
)
with pytest.raises(AirflowException):
sagemaker_operator.execute(context=None)
assert not mock_defer.called
@mock.patch.object(
SageMakerHook,
"describe_processing_job",
return_value={
"ProcessingInputs": [{"S3Input": {"S3Uri": "s3://input-bucket/input-path"}}],
"ProcessingOutputConfig": {
"Outputs": [{"S3Output": {"S3Uri": "s3://output-bucket/output-path"}}]
},
},
)
@mock.patch.object(SageMakerHook, "count_processing_jobs_by_name", return_value=0)
@mock.patch.object(
SageMakerHook,
"create_processing_job",
return_value={"ProcessingJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}},
)
@mock.patch.object(SageMakerBaseOperator, "_check_if_job_exists", return_value=False)
def test_operator_openlineage_data(self, check_job_exists, mock_processing, _, mock_desc):
sagemaker = SageMakerProcessingOperator(
**self.processing_config_kwargs,
config=CREATE_PROCESSING_PARAMS,
deferrable=True,
)
sagemaker.execute(context=None)
assert sagemaker.get_openlineage_facets_on_complete(None) == OperatorLineage(
inputs=[Dataset(namespace="s3://input-bucket", name="input-path")],
outputs=[Dataset(namespace="s3://output-bucket", name="output-path")],
)
def test_template_fields(self):
operator = SageMakerProcessingOperator(
**self.processing_config_kwargs,
config=CREATE_PROCESSING_PARAMS,
)
validate_template_fields(operator)
|
TestSageMakerProcessingOperator
|
python
|
pypa__pip
|
tests/functional/test_install_user.py
|
{
"start": 1079,
"end": 12214
}
|
class ____:
@pytest.mark.network
def test_reset_env_system_site_packages_usersite(
self, script: PipTestEnvironment
) -> None:
"""
Check user site works as expected.
"""
script.pip("install", "--user", "INITools==0.2")
result = script.run(
"python",
"-c",
"import pkg_resources; print(pkg_resources.get_distribution"
"('initools').project_name)",
)
project_name = result.stdout.strip()
assert "initools" == project_name, project_name
@pytest.mark.xfail
@pytest.mark.network
@need_svn
def test_install_subversion_usersite_editable_with_distribute(
self, script: PipTestEnvironment, tmpdir: Path
) -> None:
"""
Test installing current directory ('.') into usersite after installing
distribute
"""
result = script.pip(
"install",
"--user",
"-e",
"{checkout}#egg=initools".format(
checkout=local_checkout(
"svn+http://svn.colorstudy.com/INITools", tmpdir
)
),
)
result.assert_installed("INITools")
def test_install_from_current_directory_into_usersite(
self, script: PipTestEnvironment, data: TestData
) -> None:
"""
Test installing current directory ('.') into usersite
"""
run_from = data.packages.joinpath("FSPkg")
result = script.pip(
"install",
"--no-build-isolation",
"-vvv",
"--user",
curdir,
cwd=run_from,
)
fspkg_folder = script.user_site / "fspkg"
result.did_create(fspkg_folder)
dist_info_folder = script.user_site / "fspkg-0.1.dev0.dist-info"
result.did_create(dist_info_folder)
def test_install_user_venv_nositepkgs_fails(
self, virtualenv: VirtualEnvironment, script: PipTestEnvironment, data: TestData
) -> None:
"""
user install in virtualenv (with no system packages) fails with message
"""
# We can't use PYTHONNOUSERSITE, as it's not
# honoured by virtualenv's custom site.py.
virtualenv.user_site_packages = False
run_from = data.packages.joinpath("FSPkg")
result = script.pip(
"install",
"--user",
curdir,
cwd=run_from,
expect_error=True,
)
assert (
"Can not perform a '--user' install. User site-packages are not "
"visible in this virtualenv." in result.stderr
)
@pytest.mark.network
def test_install_user_conflict_in_usersite(
self, script: PipTestEnvironment
) -> None:
"""
Test user install with conflict in usersite updates usersite.
"""
script.pip("install", "--user", "INITools==0.3", "--no-binary=:all:")
result2 = script.pip("install", "--user", "INITools==0.1", "--no-binary=:all:")
# usersite has 0.1
dist_info_folder = script.user_site / "initools-0.1.dist-info"
initools_v3_file = (
# file only in 0.3
script.base_path
/ script.user_site
/ "initools"
/ "configparser.py"
)
result2.did_create(dist_info_folder)
assert not isfile(initools_v3_file), initools_v3_file
def test_install_user_conflict_in_globalsite(
self, virtualenv: VirtualEnvironment, script: PipTestEnvironment
) -> None:
"""
Test user install with conflict in global site ignores site and
installs to usersite
"""
create_basic_wheel_for_package(script, "initools", "0.1")
create_basic_wheel_for_package(script, "initools", "0.2")
_patch_dist_in_site_packages(virtualenv)
script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"initools==0.2",
)
result2 = script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"--user",
"initools==0.1",
)
# usersite has 0.1
dist_info_folder = script.user_site / "initools-0.1.dist-info"
initools_folder = script.user_site / "initools"
result2.did_create(dist_info_folder)
result2.did_create(initools_folder)
# site still has 0.2 (can't look in result1; have to check)
dist_info_folder = (
script.base_path / script.site_packages / "initools-0.2.dist-info"
)
initools_folder = script.base_path / script.site_packages / "initools"
assert isdir(dist_info_folder)
assert isdir(initools_folder)
def test_upgrade_user_conflict_in_globalsite(
self, virtualenv: VirtualEnvironment, script: PipTestEnvironment
) -> None:
"""
Test user install/upgrade with conflict in global site ignores site and
installs to usersite
"""
create_basic_wheel_for_package(script, "initools", "0.2")
create_basic_wheel_for_package(script, "initools", "0.3.1")
_patch_dist_in_site_packages(virtualenv)
script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"initools==0.2",
)
result2 = script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"--user",
"--upgrade",
"initools",
)
# usersite has 0.3.1
dist_info_folder = script.user_site / "initools-0.3.1.dist-info"
initools_folder = script.user_site / "initools"
result2.did_create(dist_info_folder)
result2.did_create(initools_folder)
# site still has 0.2 (can't look in result1; have to check)
dist_info_folder = (
script.base_path / script.site_packages / "initools-0.2.dist-info"
)
initools_folder = script.base_path / script.site_packages / "initools"
assert isdir(dist_info_folder), result2.stdout
assert isdir(initools_folder)
def test_install_user_conflict_in_globalsite_and_usersite(
self, virtualenv: VirtualEnvironment, script: PipTestEnvironment
) -> None:
"""
Test user install with conflict in globalsite and usersite ignores
global site and updates usersite.
"""
initools_v3_file_name = os.path.join("initools", "configparser.py")
create_basic_wheel_for_package(script, "initools", "0.1")
create_basic_wheel_for_package(script, "initools", "0.2")
create_basic_wheel_for_package(
script,
"initools",
"0.3",
extra_files={initools_v3_file_name: "# Hi!"},
)
_patch_dist_in_site_packages(virtualenv)
script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"initools==0.2",
)
script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"--user",
"initools==0.3",
)
result3 = script.pip(
"install",
"--no-index",
"--find-links",
script.scratch_path,
"--user",
"initools==0.1",
)
# usersite has 0.1
dist_info_folder = script.user_site / "initools-0.1.dist-info"
result3.did_create(dist_info_folder)
initools_v3_file = script.base_path / script.user_site / initools_v3_file_name
assert not isfile(initools_v3_file), initools_v3_file
# site still has 0.2 (can't just look in result1; have to check)
dist_info_folder = (
script.base_path / script.site_packages / "initools-0.2.dist-info"
)
initools_folder = script.base_path / script.site_packages / "initools"
assert isdir(dist_info_folder)
assert isdir(initools_folder)
def test_install_user_in_global_virtualenv_with_conflict_fails(
self, script: PipTestEnvironment
) -> None:
"""
Test user install in --system-site-packages virtualenv with conflict in
site fails.
"""
create_basic_wheel_for_package(script, "pkg", "0.1")
create_basic_wheel_for_package(script, "pkg", "0.2")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"pkg==0.2",
)
result2 = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--user",
"pkg==0.1",
expect_error=True,
)
resultp = script.run(
"python",
"-c",
"from pip._internal.metadata import get_default_environment; "
"print(get_default_environment().get_distribution('pkg').location)",
)
dist_location = resultp.stdout.strip()
assert (
f"Will not install to the user site because it will lack sys.path "
f"precedence to pkg in {dist_location}"
) in result2.stderr
def test_install_user_nositepkgs_fails(
self,
script: PipTestEnvironment,
data: TestData,
) -> None:
"""
Test that --user install fails when user site-packages are disabled.
"""
create_basic_wheel_for_package(script, "pkg", "0.1")
# Create a custom Python script that disables user site and runs pip via exec
test_script = script.scratch_path / "test_disable_user_site.py"
test_script.write_text(
textwrap.dedent(
f"""
import site
import sys
# Make sys.base_prefix equal to sys.prefix to simulate not being in a venv
# This ensures virtualenv_no_global() returns False, so we test the
# site.ENABLE_USER_SITE path
sys.base_prefix = sys.prefix
site.ENABLE_USER_SITE = False
# Set up sys.argv to simulate running pip install --user
sys.argv = [
"pip", "install",
"--no-cache-dir",
"--no-index",
"--find-links",
r"{script.scratch_path}",
"pkg",
"--user"
]
# Import and run pip's main
from pip._internal.cli.main import main
sys.exit(main())
"""
)
)
result = script.run("python", str(test_script), expect_error=True)
assert (
"Can not perform a '--user' install. User site-packages are "
"disabled for this Python." in result.stderr
)
|
Tests_UserSite
|
python
|
django__django
|
django/db/models/functions/comparison.py
|
{
"start": 4990,
"end": 5709
}
|
class ____(Func):
"""
Return the maximum expression.
If any expression is null the return value is database-specific:
On PostgreSQL, the maximum not-null expression is returned.
On MySQL, Oracle, and SQLite, if any expression is null, null is returned.
"""
function = "GREATEST"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError("Greatest must take at least two expressions")
super().__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection, **extra_context):
"""Use the MAX function on SQLite."""
return super().as_sqlite(compiler, connection, function="MAX", **extra_context)
|
Greatest
|
python
|
wandb__wandb
|
wandb/old/summary.py
|
{
"start": 12136,
"end": 13843
}
|
class ____(Summary):
def __init__(self, run, client, summary=None):
super().__init__(run, summary=summary)
self._run = run
self._client = client
self._started = time.time()
def __delitem__(self, key):
if key not in self._json_dict:
raise KeyError(key)
del self._json_dict[key]
def load(self):
pass
def open_h5(self):
if not self._h5 and h5py:
download_h5(
self._run.id,
entity=self._run.entity,
project=self._run.project,
out_dir=self._run.dir,
)
super().open_h5()
def _write(self, commit=False):
mutation = gql(
"""
mutation UpsertBucket( $id: String, $summaryMetrics: JSONString) {
upsertBucket(input: { id: $id, summaryMetrics: $summaryMetrics}) {
bucket { id }
}
}
"""
)
if commit:
if self._h5:
self._h5.close()
self._h5 = None
res = self._client.execute(
mutation,
variable_values={
"id": self._run.storage_id,
"summaryMetrics": util.json_dumps_safer(self._json_dict),
},
)
assert res["upsertBucket"]["bucket"]["id"]
entity, project, run = self._run.path
if (
os.path.exists(self._h5_path)
and os.path.getmtime(self._h5_path) >= self._started
):
upload_h5(self._h5_path, run, entity=entity, project=project)
else:
return False
|
HTTPSummary
|
python
|
scipy__scipy
|
scipy/odr/_models.py
|
{
"start": 4599,
"end": 6045
}
|
class ____(Model):
r"""
Exponential model
This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}`
Examples
--------
We can calculate orthogonal distance regression with an exponential model:
>>> from scipy import odr
>>> import numpy as np
>>> x = np.linspace(0.0, 5.0)
>>> y = -10.0 + np.exp(0.5*x)
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.exponential)
>>> output = odr_obj.run()
>>> print(output.beta)
[-10. 0.5]
"""
def __init__(self):
super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est,
meta={'name': 'Exponential',
'equ': 'y= B_0 + exp(B_1 * x)',
'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'})
exponential = _ExponentialModel()
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
return _ret.reshape((2,) + x.shape)
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
return _ret.reshape((3,) + x.shape)
def _quad_est(data):
return (1.,1.,1.)
|
_ExponentialModel
|
python
|
airbytehq__airbyte
|
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py
|
{
"start": 11375,
"end": 12116
}
|
class ____(BaseConfig):
config_path: str = config_path
configured_catalog_path: Optional[str] = configured_catalog_path
future_state: Optional[FutureStateConfig] = Field(description="Configuration for the future state.")
timeout_seconds: int = timeout_seconds
deployment_mode: Optional[str] = deployment_mode
skip_comprehensive_incremental_tests: Optional[bool] = Field(
description="Determines whether to skip more granular testing for incremental syncs", default=False
)
client_container_config: Optional[ClientContainerConfig] = Field(
description="Information required to run a client Docker container before each test.",
)
class Config:
smart_union = True
|
IncrementalConfig
|
python
|
cython__cython
|
Demos/benchmarks/bm_richards_cclass.py
|
{
"start": 7866,
"end": 10267
}
|
class ____(object):
def run(self, iterations: cython.long):
i: cython.long
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = None
DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec())
DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec())
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
def entry_point(iterations, timer=time.perf_counter):
r = Richards()
startTime = timer()
result = r.run(iterations)
endTime = timer()
return result, endTime - startTime
def run_benchmark(repeat=True, scale: cython.long = 1):
from util import repeat_to_accuracy
def single_run(scale, timer):
result, t = entry_point(scale, timer)
assert result, "Incorrect result!"
return t
return repeat_to_accuracy(single_run, scale=scale, repeat=repeat)[0]
def main(iterations = 10, entry_point = entry_point):
print("Richards benchmark (Python) starting... [%r]" % entry_point)
result, total_s = entry_point(iterations)
if not result:
print("Incorrect results!")
return -1
print("finished.")
print("Total time for %d iterations: %.2f secs" % (iterations, total_s))
print("Average time per iteration: %.2f ms" % (total_s*1000/iterations))
return 42
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
main(iterations = int(sys.argv[1]))
else:
main()
|
Richards
|
python
|
explosion__spaCy
|
spacy/lang/ja/__init__.py
|
{
"start": 7309,
"end": 7550
}
|
class ____(BaseDefaults):
config = load_config_from_str(DEFAULT_CONFIG)
stop_words = STOP_WORDS
syntax_iterators = SYNTAX_ITERATORS
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
|
JapaneseDefaults
|
python
|
getsentry__sentry
|
tests/snuba/search/test_backend.py
|
{
"start": 11342,
"end": 98282
}
|
class ____(EventsDatasetTestSetup):
def test_query(self) -> None:
results = self.make_query(search_filter_query="foo")
assert set(results) == {self.group1}
results = self.make_query(search_filter_query="bar")
assert set(results) == {self.group2}
def test_query_multi_project(self) -> None:
self.set_up_multi_project()
results = self.make_query([self.project, self.project2], search_filter_query="foo")
assert set(results) == {self.group1, self.group_p2}
def test_query_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="foo"
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="bar"
)
assert set(results) == set()
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="bar"
)
assert set(results) == {self.group2}
def test_query_for_text_in_long_message(self) -> None:
results = self.make_query(
[self.project],
environments=[self.environments["production"]],
search_filter_query="santryrox",
)
assert set(results) == {self.group1}
def test_multi_environments(self) -> None:
self.set_up_multi_project()
results = self.make_query(
[self.project, self.project2],
environments=[self.environments["production"], self.environments["staging"]],
)
assert set(results) == {self.group1, self.group2, self.group_p2}
def test_query_with_environment_multi_project(self) -> None:
self.set_up_multi_project()
results = self.make_query(
[self.project, self.project2],
environments=[self.environments["production"]],
search_filter_query="foo",
)
assert set(results) == {self.group1, self.group_p2}
results = self.make_query(
[self.project, self.project2],
environments=[self.environments["production"]],
search_filter_query="bar",
)
assert set(results) == set()
def test_query_timestamp(self) -> None:
results = self.make_query(
[self.project],
environments=[self.environments["production"]],
search_filter_query=f"timestamp:>{self.event1.datetime.isoformat()} timestamp:<{self.event3.datetime.isoformat()}",
)
assert set(results) == {self.group1}
def test_sort(self) -> None:
results = self.make_query(sort_by="date")
assert list(results) == [self.group1, self.group2]
results = self.make_query(sort_by="new")
assert list(results) == [self.group2, self.group1]
results = self.make_query(sort_by="freq")
assert list(results) == [self.group1, self.group2]
results = self.make_query(sort_by="trends")
assert list(results) == [self.group2, self.group1]
results = self.make_query(sort_by="user")
assert list(results) == [self.group1, self.group2]
def test_trends_sort(self) -> None:
weights: TrendsSortWeights = {
"log_level": 5,
"has_stacktrace": 5,
"relative_volume": 1,
"event_halflife_hours": 4,
"issue_halflife_hours": 24 * 7,
"v2": False,
"norm": False,
}
results = self.make_query(
sort_by="trends",
aggregate_kwargs=weights,
)
assert list(results) == [self.group2, self.group1]
def test_sort_with_environment(self) -> None:
for dt in [
self.group1.first_seen + timedelta(days=1),
self.group1.first_seen + timedelta(days=2),
self.group1.last_seen + timedelta(days=1),
]:
self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"timestamp": dt.isoformat(),
"stacktrace": {"frames": [{"module": "group2"}]},
"environment": "production",
"message": "group2",
},
project_id=self.project.id,
)
results = self.make_query(environments=[self.environments["production"]], sort_by="date")
assert list(results) == [self.group2, self.group1]
results = self.make_query(environments=[self.environments["production"]], sort_by="new")
assert list(results) == [self.group2, self.group1]
results = self.make_query(environments=[self.environments["production"]], sort_by="freq")
assert list(results) == [self.group2, self.group1]
results = self.make_query(environments=[self.environments["production"]], sort_by="trends")
assert list(results) == [self.group2, self.group1]
results = self.make_query(environments=[self.environments["production"]], sort_by="user")
assert list(results) == [self.group1, self.group2]
def test_status(self) -> None:
results = self.make_query(search_filter_query="is:unresolved")
assert set(results) == {self.group1}
results = self.make_query(search_filter_query="is:resolved")
assert set(results) == {self.group2}
event_3 = self.store_event(
data={
"fingerprint": ["put-me-in-group3"],
"event_id": "c" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
},
project_id=self.project.id,
)
group_3 = event_3.group
group_3.status = GroupStatus.MUTED
group_3.substatus = None
group_3.save()
self.run_test_query("status:[unresolved, resolved]", [self.group1, self.group2], [group_3])
self.run_test_query("status:[resolved, muted]", [self.group2, group_3], [self.group1])
def test_substatus(self) -> None:
results = self.make_query(search_filter_query="is:ongoing")
assert set(results) == {self.group1}
def test_category(self) -> None:
results = self.make_query(search_filter_query="issue.category:error")
assert set(results) == {self.group1, self.group2}
event_3 = self.store_event(
data={
"fingerprint": ["put-me-in-group3"],
"event_id": "c" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
},
project_id=self.project.id,
)
group_3 = event_3.group
group_3.update(type=PerformanceNPlusOneGroupType.type_id)
results = self.make_query(search_filter_query="issue.category:performance")
assert set(results) == {group_3}
results = self.make_query(search_filter_query="issue.category:[error, performance]")
assert set(results) == {self.group1, self.group2, group_3}
with pytest.raises(InvalidSearchQuery):
self.make_query(search_filter_query="issue.category:hellboy")
def test_not_perf_category(self) -> None:
results = self.make_query(search_filter_query="issue.category:error foo")
assert set(results) == {self.group1}
not_results = self.make_query(search_filter_query="!issue.category:performance foo")
assert set(not_results) == {self.group1}
def test_type(self) -> None:
results = self.make_query(search_filter_query="issue.type:error")
assert set(results) == {self.group1, self.group2}
event_3 = self.store_event(
data={
"fingerprint": ["put-me-in-group3"],
"event_id": "c" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
"type": PerformanceNPlusOneGroupType.type_id,
},
project_id=self.project.id,
)
group_3 = event_3.group
group_3.update(type=PerformanceNPlusOneGroupType.type_id)
results = self.make_query(
search_filter_query="issue.type:performance_n_plus_one_db_queries"
)
assert set(results) == {group_3}
event_4 = self.store_event(
data={
"fingerprint": ["put-me-in-group4"],
"event_id": "d" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
},
project_id=self.project.id,
)
group_4 = event_4.group
group_4.update(type=PerformanceRenderBlockingAssetSpanGroupType.type_id)
results = self.make_query(
search_filter_query="issue.type:performance_render_blocking_asset_span"
)
assert set(results) == {group_4}
results = self.make_query(
search_filter_query="issue.type:[performance_render_blocking_asset_span, performance_n_plus_one_db_queries, error]"
)
assert set(results) == {self.group1, self.group2, group_3, group_4}
with pytest.raises(InvalidSearchQuery):
self.make_query(search_filter_query="issue.type:performance_i_dont_exist")
def test_status_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="is:unresolved"
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="is:resolved"
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="is:resolved"
)
assert set(results) == set()
def test_tags(self) -> None:
results = self.make_query(search_filter_query="environment:staging")
assert set(results) == {self.group2}
results = self.make_query(search_filter_query="environment:example.com")
assert set(results) == set()
results = self.make_query(search_filter_query="has:environment")
assert set(results) == {self.group2, self.group1}
results = self.make_query(search_filter_query="environment:staging server:example.com")
assert set(results) == {self.group2}
results = self.make_query(search_filter_query='url:"http://example.com"')
assert set(results) == {self.group2}
results = self.make_query(search_filter_query="environment:staging has:server")
assert set(results) == {self.group2}
results = self.make_query(search_filter_query="environment:staging server:bar.example.com")
assert set(results) == set()
def test_tags_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="server:example.com"
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="server:example.com"
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="has:server"
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query='url:"http://example.com"',
)
assert set(results) == set()
results = self.make_query(
environments=[self.environments["staging"]],
search_filter_query='url:"http://example.com"',
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["staging"]],
search_filter_query="server:bar.example.com",
)
assert set(results) == set()
def test_bookmarked_by(self) -> None:
results = self.make_query(search_filter_query="bookmarks:%s" % self.user.username)
assert set(results) == {self.group2}
def test_bookmarked_by_in_syntax(self) -> None:
self.run_test_query(f"bookmarks:[{self.user.username}]", [self.group2], [self.group1])
user_2 = self.create_user()
GroupBookmark.objects.create(
user_id=user_2.id, group=self.group1, project=self.group2.project
)
self.run_test_query(
f"bookmarks:[{self.user.username}, {user_2.username}]", [self.group2, self.group1], []
)
def test_bookmarked_by_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["staging"]],
search_filter_query="bookmarks:%s" % self.user.username,
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="bookmarks:%s" % self.user.username,
)
assert set(results) == set()
def test_search_filter_query_with_custom_trends_tag(self) -> None:
trends = "high"
self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"timestamp": (self.group2.first_seen + timedelta(days=1)).isoformat(),
"stacktrace": {"frames": [{"module": "group2"}]},
"message": "group2",
"tags": {"trends": trends},
},
project_id=self.project.id,
)
results = self.make_query(search_filter_query="trends:%s" % trends)
assert set(results) == {self.group2}
def test_search_filter_query_with_custom_trends_tag_and_trends_sort(self) -> None:
trends = "high"
for i in range(1, 3):
self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"timestamp": (self.group2.last_seen + timedelta(days=i)).isoformat(),
"stacktrace": {"frames": [{"module": "group1"}]},
"message": "group1",
"tags": {"trends": trends},
},
project_id=self.project.id,
)
self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"timestamp": (self.group2.last_seen + timedelta(days=2)).isoformat(),
"stacktrace": {"frames": [{"module": "group2"}]},
"message": "group2",
"tags": {"trends": trends},
},
project_id=self.project.id,
)
results = self.make_query(search_filter_query="trends:%s" % trends, sort_by="trends")
assert list(results) == [self.group2, self.group1]
def test_search_tag_overlapping_with_internal_fields(self) -> None:
# Using a tag of email overlaps with the promoted user.email column in events.
# We don't want to bypass public schema limits in issue search.
self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"timestamp": (self.group2.first_seen + timedelta(days=1)).isoformat(),
"stacktrace": {"frames": [{"module": "group2"}]},
"message": "group2",
"tags": {"email": "tags@example.com"},
},
project_id=self.project.id,
)
results = self.make_query(search_filter_query="email:tags@example.com")
assert set(results) == {self.group2}
def test_project(self) -> None:
results = self.make_query([self.create_project(name="other")])
assert set(results) == set()
def test_pagination(self) -> None:
for options_set in [
{"snuba.search.min-pre-snuba-candidates": None},
{"snuba.search.min-pre-snuba-candidates": 500},
]:
with self.options(options_set):
results = self.backend.query(
[self.project], limit=1, sort_by="date", referrer=Referrer.TESTING_TEST
)
assert set(results) == {self.group1}
assert not results.prev.has_results
assert results.next.has_results
results = self.backend.query(
[self.project],
cursor=results.next,
limit=1,
sort_by="date",
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group2}
assert results.prev.has_results
assert not results.next.has_results
# note: previous cursor
results = self.backend.query(
[self.project],
cursor=results.prev,
limit=1,
sort_by="date",
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group1}
assert results.prev.has_results
assert results.next.has_results
# note: previous cursor, paging too far into 0 results
results = self.backend.query(
[self.project],
cursor=results.prev,
limit=1,
sort_by="date",
referrer=Referrer.TESTING_TEST,
)
assert set(results) == set()
assert not results.prev.has_results
assert results.next.has_results
results = self.backend.query(
[self.project],
cursor=results.next,
limit=1,
sort_by="date",
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group1}
assert results.prev.has_results
assert results.next.has_results
results = self.backend.query(
[self.project],
cursor=results.next,
limit=1,
sort_by="date",
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group2}
assert results.prev.has_results
assert not results.next.has_results
results = self.backend.query(
[self.project],
cursor=results.next,
limit=1,
sort_by="date",
referrer=Referrer.TESTING_TEST,
)
assert set(results) == set()
assert results.prev.has_results
assert not results.next.has_results
def test_pagination_with_environment(self) -> None:
for dt in [
self.group1.first_seen + timedelta(days=1),
self.group1.first_seen + timedelta(days=2),
self.group1.last_seen + timedelta(days=1),
]:
self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"timestamp": dt.isoformat(),
"environment": "production",
"message": "group2",
"stacktrace": {"frames": [{"module": "group2"}]},
},
project_id=self.project.id,
)
results = self.backend.query(
[self.project],
environments=[self.environments["production"]],
sort_by="date",
limit=1,
count_hits=True,
referrer=Referrer.TESTING_TEST,
)
assert list(results) == [self.group2]
assert results.hits == 2
results = self.backend.query(
[self.project],
environments=[self.environments["production"]],
sort_by="date",
limit=1,
cursor=results.next,
count_hits=True,
referrer=Referrer.TESTING_TEST,
)
assert list(results) == [self.group1]
assert results.hits == 2
results = self.backend.query(
[self.project],
environments=[self.environments["production"]],
sort_by="date",
limit=1,
cursor=results.next,
count_hits=True,
referrer=Referrer.TESTING_TEST,
)
assert list(results) == []
assert results.hits == 2
def test_age_filter(self) -> None:
results = self.make_query(
search_filter_query="firstSeen:>=%s" % date_to_query_format(self.group2.first_seen)
)
assert set(results) == {self.group2}
results = self.make_query(
search_filter_query="firstSeen:<=%s"
% date_to_query_format(self.group1.first_seen + timedelta(minutes=1))
)
assert set(results) == {self.group1}
results = self.make_query(
search_filter_query="firstSeen:>=%s firstSeen:<=%s"
% (
date_to_query_format(self.group1.first_seen),
date_to_query_format(self.group1.first_seen + timedelta(minutes=1)),
)
)
assert set(results) == {self.group1}
def test_age_filter_with_environment(self) -> None:
# add time instead to make it greater than or less than as needed.
group1_first_seen = GroupEnvironment.objects.get(
environment=self.environments["production"], group=self.group1
).first_seen
assert group1_first_seen is not None
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="firstSeen:>=%s" % date_to_query_format(group1_first_seen),
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="firstSeen:<=%s" % date_to_query_format(group1_first_seen),
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="firstSeen:>%s" % date_to_query_format(group1_first_seen),
)
assert set(results) == set()
self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"timestamp": (group1_first_seen + timedelta(days=1)).isoformat(),
"message": "group1",
"stacktrace": {"frames": [{"module": "group1"}]},
"environment": "development",
},
project_id=self.project.id,
)
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="firstSeen:>%s" % date_to_query_format(group1_first_seen),
)
assert set(results) == set()
results = self.make_query(
environments=[Environment.objects.get(name="development")],
search_filter_query="firstSeen:>%s" % date_to_query_format(group1_first_seen),
)
assert set(results) == {self.group1}
def test_times_seen_filter(self) -> None:
results = self.make_query([self.project], search_filter_query="times_seen:2")
assert set(results) == {self.group1}
results = self.make_query([self.project], search_filter_query="times_seen:>=2")
assert set(results) == {self.group1}
results = self.make_query([self.project], search_filter_query="times_seen:<=1")
assert set(results) == {self.group2}
def test_last_seen_filter(self) -> None:
results = self.make_query(
search_filter_query="lastSeen:>=%s" % date_to_query_format(self.group1.last_seen)
)
assert set(results) == {self.group1}
results = self.make_query(
search_filter_query="lastSeen:>=%s lastSeen:<=%s"
% (
date_to_query_format(self.group1.last_seen),
date_to_query_format(self.group1.last_seen + timedelta(minutes=1)),
)
)
assert set(results) == {self.group1}
def test_last_seen_filter_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="lastSeen:>=%s" % date_to_query_format(self.group1.last_seen),
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="lastSeen:<=%s" % date_to_query_format(self.group1.last_seen),
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="lastSeen:>%s" % date_to_query_format(self.group1.last_seen),
)
assert set(results) == set()
self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"timestamp": (self.group1.last_seen + timedelta(days=1)).isoformat(),
"message": "group1",
"stacktrace": {"frames": [{"module": "group1"}]},
"environment": "development",
},
project_id=self.project.id,
)
self.group1.update(last_seen=self.group1.last_seen + timedelta(days=1))
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="lastSeen:>%s" % date_to_query_format(self.group1.last_seen),
)
assert set(results) == set()
results = self.make_query(
environments=[Environment.objects.get(name="development")],
search_filter_query="lastSeen:>%s" % date_to_query_format(self.group1.last_seen),
)
assert set(results) == set()
results = self.make_query(
environments=[Environment.objects.get(name="development")],
search_filter_query="lastSeen:>=%s" % date_to_query_format(self.group1.last_seen),
)
assert set(results) == {self.group1}
def test_date_filter(self) -> None:
results = self.make_query(
date_from=self.event2.datetime,
search_filter_query="timestamp:>=%s" % date_to_query_format(self.event2.datetime),
)
assert set(results) == {self.group1, self.group2}
results = self.make_query(
date_to=self.event1.datetime + timedelta(minutes=1),
search_filter_query="timestamp:<=%s"
% date_to_query_format(self.event1.datetime + timedelta(minutes=1)),
)
assert set(results) == {self.group1}
results = self.make_query(
date_from=self.event1.datetime,
date_to=self.event2.datetime + timedelta(minutes=1),
search_filter_query="timestamp:>=%s timestamp:<=%s"
% (
date_to_query_format(self.event1.datetime),
date_to_query_format(self.event2.datetime + timedelta(minutes=1)),
),
)
assert set(results) == {self.group1, self.group2}
# Test with `Z` utc marker, should be equivalent
results = self.make_query(
date_from=self.event1.datetime,
date_to=self.event2.datetime + timedelta(minutes=1),
search_filter_query="timestamp:>=%s timestamp:<=%s"
% (
date_to_query_format(self.event1.datetime) + "Z",
date_to_query_format(self.event2.datetime + timedelta(minutes=1)) + "Z",
),
)
assert set(results) == {self.group1, self.group2}
def test_date_filter_with_environment(self) -> None:
results = self.backend.query(
[self.project],
environments=[self.environments["production"]],
date_from=self.event2.datetime,
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group1}
results = self.backend.query(
[self.project],
environments=[self.environments["production"]],
date_to=self.event1.datetime + timedelta(minutes=1),
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group1}
results = self.backend.query(
[self.project],
environments=[self.environments["staging"]],
date_from=self.event1.datetime,
date_to=self.event2.datetime + timedelta(minutes=1),
referrer=Referrer.TESTING_TEST,
)
assert set(results) == {self.group2}
def test_linked(self) -> None:
linked_group1 = self.create_group_with_integration_external_issue()
linked_group2 = self.create_group_with_platform_external_issue()
results = self.make_query(search_filter_query="is:unlinked")
assert set(results) == {self.group1, self.group2}
results = self.make_query(search_filter_query="is:linked")
assert set(results) == {linked_group1, linked_group2}
def test_linked_with_only_integration_external_issue(self) -> None:
linked_group = self.create_group_with_integration_external_issue()
results = self.make_query(search_filter_query="is:unlinked")
assert set(results) == {self.group1, self.group2}
results = self.make_query(search_filter_query="is:linked")
assert set(results) == {linked_group}
def test_linked_with_only_platform_external_issue(self) -> None:
linked_group = self.create_group_with_platform_external_issue()
results = self.make_query(search_filter_query="is:unlinked")
assert set(results) == {self.group1, self.group2}
results = self.make_query(search_filter_query="is:linked")
assert set(results) == {linked_group}
def test_linked_with_environment(self) -> None:
linked_group1 = self.create_group_with_integration_external_issue(environment="production")
linked_group2 = self.create_group_with_platform_external_issue(environment="staging")
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="is:unlinked"
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="is:unlinked"
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="is:linked"
)
assert set(results) == {linked_group1}
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="is:linked"
)
assert set(results) == {linked_group2}
def test_unassigned(self) -> None:
results = self.make_query(search_filter_query="is:unassigned")
assert set(results) == {self.group1}
results = self.make_query(search_filter_query="is:assigned")
assert set(results) == {self.group2}
def test_unassigned_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="is:unassigned"
)
assert set(results) == {self.group1}
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="is:assigned"
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="is:assigned"
)
assert set(results) == set()
def test_assigned_to(self) -> None:
results = self.make_query(search_filter_query="assigned:%s" % self.user.username)
assert set(results) == {self.group2}
# test team assignee
ga = GroupAssignee.objects.get(
user_id=self.user.id, group=self.group2, project=self.group2.project
)
ga.update(team=self.team, user_id=None)
assert GroupAssignee.objects.get(id=ga.id).user_id is None
results = self.make_query(search_filter_query="assigned:%s" % self.user.username)
assert set(results) == set()
# test when there should be no results
other_user = self.create_user()
results = self.make_query(search_filter_query="assigned:%s" % other_user.username)
assert set(results) == set()
owner = self.create_user()
self.create_member(
organization=self.project.organization, user=owner, role="owner", teams=[]
)
# test that owners don't see results for all teams
results = self.make_query(search_filter_query="assigned:%s" % owner.username)
assert set(results) == set()
def test_assigned_to_me_my_teams(self) -> None:
my_team_group = self.store_event(
data={
"fingerprint": ["put-me-in-group-my-teams"],
"event_id": "f" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
"message": "baz",
"environment": "staging",
"tags": {
"server": "example.com",
"url": "http://example.com",
"sentry:user": "event2@example.com",
},
"level": "error",
},
project_id=self.project.id,
).group
# assign the issue to my team instead of me
GroupAssignee.objects.create(
user_id=None, team_id=self.team.id, group=my_team_group, project=my_team_group.project
)
self.run_test_query(
"assigned:me",
[self.group2],
user=self.user,
)
assert not GroupAssignee.objects.filter(user_id=self.user.id, group=my_team_group).exists()
self.run_test_query(
"assigned:my_teams",
[my_team_group],
user=self.user,
)
def test_assigned_to_me_my_teams_in_syntax(self) -> None:
my_team_group = self.store_event(
data={
"fingerprint": ["put-me-in-group-my-teams"],
"event_id": "f" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
"message": "baz",
"environment": "staging",
"tags": {
"server": "example.com",
"url": "http://example.com",
"sentry:user": "event2@example.com",
},
"level": "error",
},
project_id=self.project.id,
).group
# assign the issue to my team instead of me
GroupAssignee.objects.create(
user_id=None, team_id=self.team.id, group=my_team_group, project=my_team_group.project
)
self.run_test_query(
"assigned:[me]",
[self.group2],
user=self.user,
)
assert not GroupAssignee.objects.filter(user_id=self.user.id, group=my_team_group).exists()
self.run_test_query(
"assigned:[me]",
[self.group2],
user=self.user,
)
self.run_test_query(
"assigned:[my_teams]",
[my_team_group],
user=self.user,
)
self.run_test_query(
"assigned:[me, my_teams]",
[self.group2, my_team_group],
user=self.user,
)
def test_assigned_to_in_syntax(self) -> None:
group_3 = self.store_event(
data={
"fingerprint": ["put-me-in-group3"],
"event_id": "c" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
},
project_id=self.project.id,
).group
group_3.status = GroupStatus.MUTED
group_3.substatus = None
group_3.save()
other_user = self.create_user()
self.run_test_query(
f"assigned:[{self.user.username}, {other_user.username}]",
[self.group2],
[self.group1, group_3],
)
GroupAssignee.objects.create(project=self.project, group=group_3, user_id=other_user.id)
self.run_test_query(
f"assigned:[{self.user.username}, {other_user.username}]",
[self.group2, group_3],
[self.group1],
)
self.run_test_query(
f"assigned:[#{self.team.slug}, {other_user.username}]",
[group_3],
[self.group1, self.group2],
)
ga_2 = GroupAssignee.objects.get(
user_id=self.user.id, group=self.group2, project=self.group2.project
)
ga_2.update(team=self.team, user_id=None)
self.run_test_query(
f"assigned:[{self.user.username}, {other_user.username}]",
[group_3],
[self.group1, self.group2],
)
self.run_test_query(
f"assigned:[#{self.team.slug}, {other_user.username}]",
[self.group2, group_3],
[self.group1],
)
self.run_test_query(
f"assigned:[me, none, {other_user.username}]",
[self.group1, group_3],
[self.group2],
)
def test_assigned_or_suggested_in_syntax(self) -> None:
Group.objects.all().delete()
group = self.store_event(
data={
"timestamp": before_now(seconds=180).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
).group
group1 = self.store_event(
data={
"timestamp": before_now(seconds=185).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
).group
group2 = self.store_event(
data={
"timestamp": before_now(seconds=190).isoformat(),
"fingerprint": ["group-3"],
},
project_id=self.project.id,
).group
assigned_group = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-4"],
},
project_id=self.project.id,
).group
assigned_to_other_group = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-5"],
},
project_id=self.project.id,
).group
self.run_test_query(
"assigned_or_suggested:[me]",
[],
[group, group1, group2, assigned_group, assigned_to_other_group],
)
GroupOwner.objects.create(
group=assigned_to_other_group,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
GroupOwner.objects.create(
group=group,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
self.run_test_query(
"assigned_or_suggested:[me]",
[group, assigned_to_other_group],
[group1, group2, assigned_group],
)
# Because assigned_to_other_event is assigned to self.other_user, it should not show up in assigned_or_suggested search for anyone but self.other_user. (aka. they are now the only owner)
other_user = self.create_user("other@user.com", is_superuser=False)
GroupAssignee.objects.create(
group=assigned_to_other_group,
project=self.project,
user_id=other_user.id,
)
self.run_test_query(
"assigned_or_suggested:[me]",
[group],
[group1, group2, assigned_group, assigned_to_other_group],
)
self.run_test_query(
f"assigned_or_suggested:[{other_user.email}]",
[assigned_to_other_group],
[group, group1, group2, assigned_group],
)
GroupAssignee.objects.create(
group=assigned_group, project=self.project, user_id=self.user.id
)
self.run_test_query(
f"assigned_or_suggested:[{self.user.email}]",
[assigned_group, group],
)
GroupOwner.objects.create(
group=group,
project=self.project,
organization=self.organization,
type=0,
team_id=self.team.id,
user_id=None,
)
self.run_test_query(
f"assigned_or_suggested:[#{self.team.slug}]",
[group],
)
self.run_test_query(
"assigned_or_suggested:[me, none]",
[group, group1, group2, assigned_group],
[assigned_to_other_group],
)
not_me = self.create_user(email="notme@sentry.io")
GroupOwner.objects.create(
group=group2,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=not_me.id,
)
self.run_test_query(
"assigned_or_suggested:[me, none]",
[group, group1, assigned_group],
[assigned_to_other_group, group2],
)
GroupOwner.objects.filter(group=group, user_id=self.user.id).delete()
self.run_test_query(
f"assigned_or_suggested:[me, none, #{self.team.slug}]",
[group, group1, assigned_group],
[assigned_to_other_group, group2],
)
self.run_test_query(
f"assigned_or_suggested:[me, none, #{self.team.slug}, {not_me.email}]",
[group, group1, assigned_group, group2],
[assigned_to_other_group],
)
def test_assigned_or_suggested_my_teams(self) -> None:
Group.objects.all().delete()
group = self.store_event(
data={
"timestamp": before_now(seconds=180).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
).group
group1 = self.store_event(
data={
"timestamp": before_now(seconds=185).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
).group
group2 = self.store_event(
data={
"timestamp": before_now(seconds=190).isoformat(),
"fingerprint": ["group-3"],
},
project_id=self.project.id,
).group
assigned_group = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-4"],
},
project_id=self.project.id,
).group
assigned_to_other_group = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-5"],
},
project_id=self.project.id,
).group
my_team_group = self.store_event(
data={
"fingerprint": ["put-me-in-group-my-teams"],
"event_id": "f" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
"message": "baz",
"environment": "staging",
"tags": {
"server": "example.com",
"url": "http://example.com",
"sentry:user": "event2@example.com",
},
"level": "error",
},
project_id=self.project.id,
).group
self.run_test_query(
"assigned_or_suggested:me",
[],
[group, group1, group2, assigned_group, assigned_to_other_group, my_team_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:my_teams",
[],
[group, group1, group2, assigned_group, assigned_to_other_group, my_team_group],
user=self.user,
)
GroupOwner.objects.create(
group=assigned_to_other_group,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
GroupOwner.objects.create(
group=group,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
GroupAssignee.objects.create(
user_id=None, team_id=self.team.id, group=my_team_group, project=my_team_group.project
)
self.run_test_query(
"assigned_or_suggested:me",
[group, assigned_to_other_group],
[group1, group2, assigned_group, my_team_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:my_teams",
[my_team_group],
[group, group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
# Because assigned_to_other_event is assigned to self.other_user, it should not show up in assigned_or_suggested search for anyone but self.other_user. (aka. they are now the only owner)
other_user = self.create_user("other@user.com", is_superuser=False)
GroupAssignee.objects.create(
group=assigned_to_other_group,
project=self.project,
user_id=other_user.id,
)
self.run_test_query(
"assigned_or_suggested:me",
[group],
[group1, group2, assigned_group, my_team_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:my_teams",
[my_team_group],
[group, group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:{other_user.email}",
[assigned_to_other_group],
[group, group1, group2, assigned_group, my_team_group],
user=self.user,
)
GroupAssignee.objects.create(
group=assigned_group, project=self.project, user_id=self.user.id
)
self.run_test_query(
f"assigned_or_suggested:{self.user.email}",
[assigned_group, group],
[group1, group2, my_team_group, assigned_to_other_group],
user=self.user,
)
GroupOwner.objects.create(
group=group,
project=self.project,
organization=self.organization,
type=0,
team_id=self.team.id,
user_id=None,
)
self.run_test_query(
f"assigned_or_suggested:#{self.team.slug}",
[group, my_team_group],
[group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
def test_assigned_or_suggested_my_teams_in_syntax(self) -> None:
Group.objects.all().delete()
group = self.store_event(
data={
"timestamp": before_now(seconds=180).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
).group
group1 = self.store_event(
data={
"timestamp": before_now(seconds=185).isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
).group
group2 = self.store_event(
data={
"timestamp": before_now(seconds=190).isoformat(),
"fingerprint": ["group-3"],
},
project_id=self.project.id,
).group
assigned_group = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-4"],
},
project_id=self.project.id,
).group
assigned_to_other_group = self.store_event(
data={
"timestamp": before_now(seconds=195).isoformat(),
"fingerprint": ["group-5"],
},
project_id=self.project.id,
).group
my_team_group = self.store_event(
data={
"fingerprint": ["put-me-in-group-my-teams"],
"event_id": "f" * 32,
"timestamp": (self.base_datetime - timedelta(days=20)).isoformat(),
"message": "baz",
"environment": "staging",
"tags": {
"server": "example.com",
"url": "http://example.com",
"sentry:user": "event2@example.com",
},
"level": "error",
},
project_id=self.project.id,
).group
self.run_test_query(
"assigned_or_suggested:[me]",
[],
[group, group1, group2, assigned_group, assigned_to_other_group, my_team_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[my_teams]",
[],
[group, group1, group2, assigned_group, assigned_to_other_group, my_team_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[me, my_teams]",
[],
[group, group1, group2, assigned_group, assigned_to_other_group, my_team_group],
user=self.user,
)
GroupOwner.objects.create(
group=assigned_to_other_group,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
GroupOwner.objects.create(
group=group,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=self.user.id,
)
GroupAssignee.objects.create(
user_id=None, team_id=self.team.id, group=my_team_group, project=my_team_group.project
)
self.run_test_query(
"assigned_or_suggested:[me]",
[group, assigned_to_other_group],
[group1, group2, assigned_group, my_team_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[my_teams]",
[my_team_group],
[group, group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[me, my_teams]",
[group, assigned_to_other_group, my_team_group],
[group1, group2, assigned_group],
user=self.user,
)
# Because assigned_to_other_event is assigned to self.other_user, it should not show up in assigned_or_suggested search for anyone but self.other_user. (aka. they are now the only owner)
other_user = self.create_user("other@user.com", is_superuser=False)
GroupAssignee.objects.create(
group=assigned_to_other_group,
project=self.project,
user_id=other_user.id,
)
self.run_test_query(
"assigned_or_suggested:[me]",
[group],
[group1, group2, assigned_group, my_team_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[my_teams]",
[my_team_group],
[group, group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[me, my_teams]",
[group, my_team_group],
[group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:[{other_user.email}]",
[assigned_to_other_group],
[group, group1, group2, assigned_group, my_team_group],
user=self.user,
)
GroupAssignee.objects.create(
group=assigned_group, project=self.project, user_id=self.user.id
)
self.run_test_query(
f"assigned_or_suggested:[{self.user.email}]",
[assigned_group, group],
[group1, group2, my_team_group, assigned_to_other_group],
user=self.user,
)
GroupOwner.objects.create(
group=group,
project=self.project,
organization=self.organization,
type=0,
team_id=self.team.id,
user_id=None,
)
self.run_test_query(
f"assigned_or_suggested:[#{self.team.slug}]",
[group, my_team_group],
[group1, group2, assigned_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[me, none]",
[group, group1, group2, assigned_group],
[my_team_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[my_teams, none]",
[group, group1, group2, my_team_group],
[assigned_to_other_group, assigned_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[me, my_teams, none]",
[group, group1, group2, my_team_group, assigned_group],
[assigned_to_other_group],
user=self.user,
)
not_me = self.create_user(email="notme@sentry.io")
GroupOwner.objects.create(
group=group2,
project=self.project,
organization=self.organization,
type=0,
team_id=None,
user_id=not_me.id,
)
self.run_test_query(
"assigned_or_suggested:[me, none]",
[group, group1, assigned_group],
[group2, my_team_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[my_teams, none]",
[group, group1, my_team_group],
[group2, assigned_group, assigned_to_other_group],
user=self.user,
)
self.run_test_query(
"assigned_or_suggested:[me, my_teams, none]",
[group, group1, my_team_group, assigned_group],
[group2, assigned_to_other_group],
user=self.user,
)
GroupOwner.objects.filter(group=group, user_id=self.user.id).delete()
self.run_test_query(
f"assigned_or_suggested:[me, none, #{self.team.slug}]",
[group, group1, assigned_group, my_team_group],
[assigned_to_other_group, group2],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:[my_teams, none, #{self.team.slug}]",
[group, group1, my_team_group],
[assigned_to_other_group, group2, assigned_group],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:[me, my_teams, none, #{self.team.slug}]",
[group, group1, my_team_group, assigned_group],
[assigned_to_other_group, group2],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:[me, none, #{self.team.slug}, {not_me.email}]",
[group, group1, group2, assigned_group, my_team_group],
[assigned_to_other_group],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:[my_teams, none, #{self.team.slug}, {not_me.email}]",
[group, group1, group2, my_team_group],
[assigned_to_other_group, assigned_group],
user=self.user,
)
self.run_test_query(
f"assigned_or_suggested:[me, my_teams, none, #{self.team.slug}, {not_me.email}]",
[group, group1, group2, my_team_group, assigned_group],
[assigned_to_other_group],
user=self.user,
)
def test_assigned_to_with_environment(self) -> None:
results = self.make_query(
environments=[self.environments["staging"]],
search_filter_query="assigned:%s" % self.user.username,
)
assert set(results) == {self.group2}
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="assigned:%s" % self.user.username,
)
assert set(results) == set()
def test_subscribed_by(self) -> None:
results = self.make_query(
[self.group1.project], search_filter_query="subscribed:%s" % self.user.username
)
assert set(results) == {self.group1}
def test_subscribed_by_in_syntax(self) -> None:
self.run_test_query(f"subscribed:[{self.user.username}]", [self.group1], [self.group2])
user_2 = self.create_user()
GroupSubscription.objects.create(
user_id=user_2.id, group=self.group2, project=self.project, is_active=True
)
self.run_test_query(
f"subscribed:[{self.user.username}, {user_2.username}]", [self.group1, self.group2], []
)
def test_subscribed_by_with_environment(self) -> None:
results = self.make_query(
[self.group1.project],
environments=[self.environments["production"]],
search_filter_query="subscribed:%s" % self.user.username,
)
assert set(results) == {self.group1}
results = self.make_query(
[self.group1.project],
environments=[self.environments["staging"]],
search_filter_query="subscribed:%s" % self.user.username,
)
assert set(results) == set()
@mock.patch("sentry.search.snuba.executors.bulk_raw_query")
def test_snuba_not_called_optimization(self, query_mock: mock.MagicMock) -> None:
assert self.make_query(search_filter_query="status:unresolved").results == [self.group1]
assert not query_mock.called
assert (
self.make_query(
search_filter_query="last_seen:>%s" % date_to_query_format(timezone.now()),
sort_by="date",
).results
== []
)
assert query_mock.called
@mock.patch("sentry.search.snuba.executors.bulk_raw_query")
def test_reduce_bulk_results_none_total(self, bulk_raw_query_mock: mock.MagicMock) -> None:
bulk_raw_query_mock.return_value = [
{"data": [], "totals": {"total": None}},
{"data": [], "totals": {"total": None}},
]
assert (
self.make_query(
search_filter_query="last_seen:>%s" % date_to_query_format(timezone.now()),
sort_by="date",
).results
== []
)
assert bulk_raw_query_mock.called
@mock.patch("sentry.search.snuba.executors.bulk_raw_query")
def test_reduce_bulk_results_none_data(self, bulk_raw_query_mock: mock.MagicMock) -> None:
bulk_raw_query_mock.return_value = [
{"data": None, "totals": {"total": 0}},
{"data": None, "totals": {"total": 0}},
]
assert (
self.make_query(
search_filter_query="last_seen:>%s" % date_to_query_format(timezone.now()),
sort_by="date",
).results
== []
)
assert bulk_raw_query_mock.called
def test_pre_and_post_filtering(self) -> None:
prev_max_pre = options.get("snuba.search.max-pre-snuba-candidates")
options.set("snuba.search.max-pre-snuba-candidates", 1)
try:
# normal queries work as expected
results = self.make_query(search_filter_query="foo")
assert set(results) == {self.group1}
results = self.make_query(search_filter_query="bar")
assert set(results) == {self.group2}
# no candidate matches in Sentry, immediately return empty paginator
results = self.make_query(search_filter_query="NO MATCHES IN SENTRY")
assert set(results) == set()
# too many candidates, skip pre-filter, requires >1 postfilter queries
results = self.make_query()
assert set(results) == {self.group1, self.group2}
finally:
options.set("snuba.search.max-pre-snuba-candidates", prev_max_pre)
def test_optimizer_enabled(self) -> None:
prev_optimizer_enabled = options.get("snuba.search.pre-snuba-candidates-optimizer")
options.set("snuba.search.pre-snuba-candidates-optimizer", True)
try:
results = self.make_query(
search_filter_query="server:example.com",
environments=[self.environments["production"]],
)
assert set(results) == {self.group1}
finally:
options.set("snuba.search.pre-snuba-candidates-optimizer", prev_optimizer_enabled)
def test_search_out_of_range(self) -> None:
the_date = datetime(2000, 1, 1, 0, 0, 0, tzinfo=UTC)
results = self.make_query(
search_filter_query=f"event.timestamp:>{the_date} event.timestamp:<{the_date}",
date_from=the_date,
date_to=the_date,
)
assert set(results) == set()
def test_regressed_in_release(self) -> None:
# expect no groups within the results since there are no releases
results = self.make_query(search_filter_query="regressed_in_release:fake")
assert set(results) == set()
# expect no groups even though there is a release; since no group regressed in this release
release_1 = self.create_release()
results = self.make_query(search_filter_query="regressed_in_release:%s" % release_1.version)
assert set(results) == set()
# Create a new event so that we get a group in this release
group = self.store_event(
data={
"release": release_1.version,
},
project_id=self.project.id,
).group
# # Should still be no group since we didn't regress in this release
results = self.make_query(search_filter_query="regressed_in_release:%s" % release_1.version)
assert set(results) == set()
record_group_history(group, GroupHistoryStatus.REGRESSED, release=release_1)
results = self.make_query(search_filter_query="regressed_in_release:%s" % release_1.version)
assert set(results) == {group}
# Make sure this works correctly with multiple releases
release_2 = self.create_release()
group_2 = self.store_event(
data={
"fingerprint": ["put-me-in-group9001"],
"event_id": "a" * 32,
"release": release_2.version,
},
project_id=self.project.id,
).group
record_group_history(group_2, GroupHistoryStatus.REGRESSED, release=release_2)
results = self.make_query(search_filter_query="regressed_in_release:%s" % release_1.version)
assert set(results) == {group}
results = self.make_query(search_filter_query="regressed_in_release:%s" % release_2.version)
assert set(results) == {group_2}
def test_first_release(self) -> None:
# expect no groups within the results since there are no releases
results = self.make_query(search_filter_query="first_release:%s" % "fake")
assert set(results) == set()
# expect no groups even though there is a release; since no group
# is attached to a release
release_1 = self.create_release(self.project)
results = self.make_query(search_filter_query="first_release:%s" % release_1.version)
assert set(results) == set()
# Create a new event so that we get a group in this release
group = self.store_event(
data={
"fingerprint": ["put-me-in-group9001"],
"event_id": "a" * 32,
"message": "hello",
"environment": "production",
"tags": {"server": "example.com"},
"release": release_1.version,
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
).group
results = self.make_query(search_filter_query="first_release:%s" % release_1.version)
assert set(results) == {group}
def test_first_release_in_syntax(self) -> None:
# expect no groups within the results since there are no releases
self.run_test_query("first_release:[fake, fake2]", [])
# expect no groups even though there is a release; since no group
# is attached to a release
release_1 = self.create_release(self.project)
release_2 = self.create_release(self.project)
self.run_test_query(f"first_release:[{release_1.version}, {release_2.version}]", [])
# Create a new event so that we get a group in release 1
group = self.store_event(
data={
"fingerprint": ["put-me-in-group9001"],
"event_id": "a" * 32,
"message": "hello",
"environment": "production",
"tags": {"server": "example.com"},
"release": release_1.version,
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
).group
self.run_test_query(
f"first_release:[{release_1.version}, {release_2.version}]",
[group],
[self.group1, self.group2],
)
# Create a new event so that we get a group in release 2
group_2 = self.store_event(
data={
"fingerprint": ["put-me-in-group9002"],
"event_id": "a" * 32,
"message": "hello",
"environment": "production",
"tags": {"server": "example.com"},
"release": release_2.version,
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
).group
self.run_test_query(
f"first_release:[{release_1.version}, {release_2.version}]",
[group, group_2],
[self.group1, self.group2],
)
def test_first_release_environments(self) -> None:
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query="first_release:fake",
)
assert set(results) == set()
release = self.create_release(self.project)
group_env = GroupEnvironment.get_or_create(
group_id=self.group1.id, environment_id=self.environments["production"].id
)[0]
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query=f"first_release:{release.version}",
)
assert set(results) == set()
group_env.first_release = release
group_env.save()
results = self.make_query(
environments=[self.environments["production"]],
search_filter_query=f"first_release:{release.version}",
)
assert set(results) == {self.group1}
def test_first_release_environments_in_syntax(self) -> None:
self.run_test_query(
"first_release:[fake, fake2]",
[],
[self.group1, self.group2],
environments=[self.environments["production"]],
)
release = self.create_release(self.project)
group_1_env = GroupEnvironment.objects.get(
group_id=self.group1.id, environment_id=self.environments["production"].id
)
group_1_env.update(first_release=release)
self.group1.first_release = release
self.group1.save()
self.run_test_query(
f"first_release:[{release.version}, fake2]",
[self.group1],
[self.group2],
environments=[self.environments["production"]],
)
group_2_env = GroupEnvironment.objects.get(
group_id=self.group2.id, environment_id=self.environments["staging"].id
)
group_2_env.update(first_release=release)
self.group2.first_release = release
self.group2.save()
self.run_test_query(
f"first_release:[{release.version}, fake2]",
[self.group1, self.group2],
[],
environments=[self.environments["production"], self.environments["staging"]],
)
# Make sure we don't get duplicate groups
GroupEnvironment.objects.create(
group_id=self.group1.id,
environment_id=self.environments["staging"].id,
first_release=release,
)
self.run_test_query(
f"first_release:[{release.version}, fake2]",
[self.group1, self.group2],
[],
environments=[self.environments["production"], self.environments["staging"]],
)
def test_query_enclosed_in_quotes(self) -> None:
results = self.make_query(search_filter_query='"foo"')
assert set(results) == {self.group1}
results = self.make_query(search_filter_query='"bar"')
assert set(results) == {self.group2}
def test_wildcard(self) -> None:
escaped_event = self.store_event(
data={
"fingerprint": ["hello-there"],
"event_id": "f" * 32,
"message": "somet[hing]",
"environment": "production",
"tags": {"server": "example.net"},
"timestamp": self.base_datetime.isoformat(),
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
)
# Note: Adding in `environment:production` so that we make sure we query
# in both snuba and postgres
results = self.make_query(search_filter_query="environment:production so*t")
assert set(results) == {escaped_event.group}
# Make sure it's case insensitive
results = self.make_query(search_filter_query="environment:production SO*t")
assert set(results) == {escaped_event.group}
results = self.make_query(search_filter_query="environment:production so*zz")
assert set(results) == set()
results = self.make_query(search_filter_query="environment:production [hing]")
assert set(results) == {escaped_event.group}
results = self.make_query(search_filter_query="environment:production s*]")
assert set(results) == {escaped_event.group}
results = self.make_query(search_filter_query="environment:production server:example.*")
assert set(results) == {self.group1, escaped_event.group}
results = self.make_query(search_filter_query="environment:production !server:*net")
assert set(results) == {self.group1}
# TODO: Disabling tests that use [] syntax for the moment. Re-enable
# these if we decide to add back in, or remove if this comment has been
# here a while.
# results = self.make_query(
# search_filter_query='environment:production [s][of][mz]',
# )
# assert set(results) == set([escaped_event.group])
# results = self.make_query(
# search_filter_query='environment:production [z][of][mz]',
# )
# assert set(results) == set()
def test_null_tags(self) -> None:
tag_event = self.store_event(
data={
"fingerprint": ["hello-there"],
"event_id": "f" * 32,
"message": "something",
"environment": "production",
"tags": {"server": "example.net"},
"timestamp": self.base_datetime.isoformat(),
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
)
no_tag_event = self.store_event(
data={
"fingerprint": ["hello-there-2"],
"event_id": "5" * 32,
"message": "something",
"environment": "production",
"timestamp": self.base_datetime.isoformat(),
"stacktrace": {"frames": [{"module": "group2"}]},
},
project_id=self.project.id,
)
results = self.make_query(search_filter_query="environment:production !server:*net")
assert set(results) == {self.group1, no_tag_event.group}
results = self.make_query(search_filter_query="environment:production server:*net")
assert set(results) == {tag_event.group}
results = self.make_query(search_filter_query="environment:production !server:example.net")
assert set(results) == {self.group1, no_tag_event.group}
results = self.make_query(search_filter_query="environment:production server:example.net")
assert set(results) == {tag_event.group}
results = self.make_query(search_filter_query="environment:production has:server")
assert set(results) == {self.group1, tag_event.group}
results = self.make_query(search_filter_query="environment:production !has:server")
assert set(results) == {no_tag_event.group}
def test_null_promoted_tags(self) -> None:
tag_event = self.store_event(
data={
"fingerprint": ["hello-there"],
"event_id": "f" * 32,
"message": "something",
"environment": "production",
"tags": {"logger": "csp"},
"timestamp": self.base_datetime.isoformat(),
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
)
no_tag_event = self.store_event(
data={
"fingerprint": ["hello-there-2"],
"event_id": "5" * 32,
"message": "something",
"environment": "production",
"timestamp": self.base_datetime.isoformat(),
"stacktrace": {"frames": [{"module": "group2"}]},
},
project_id=self.project.id,
)
results = self.make_query(search_filter_query="environment:production !logger:*sp")
assert set(results) == {self.group1, no_tag_event.group}
results = self.make_query(search_filter_query="environment:production logger:*sp")
assert set(results) == {tag_event.group}
results = self.make_query(search_filter_query="environment:production !logger:csp")
assert set(results) == {self.group1, no_tag_event.group}
results = self.make_query(search_filter_query="environment:production logger:csp")
assert set(results) == {tag_event.group}
results = self.make_query(search_filter_query="environment:production has:logger")
assert set(results) == {tag_event.group}
results = self.make_query(search_filter_query="environment:production !has:logger")
assert set(results) == {self.group1, no_tag_event.group}
def test_sort_multi_project(self) -> None:
self.set_up_multi_project()
results = self.make_query([self.project, self.project2], sort_by="date")
assert list(results) == [self.group1, self.group_p2, self.group2]
results = self.make_query([self.project, self.project2], sort_by="new")
assert list(results) == [self.group2, self.group_p2, self.group1]
results = self.make_query([self.project, self.project2], sort_by="freq")
assert list(results) == [self.group1, self.group_p2, self.group2]
results = self.make_query([self.project, self.project2], sort_by="trends")
assert list(results) == [
self.group_p2,
self.group2,
self.group1,
]
results = self.make_query([self.project, self.project2], sort_by="user")
assert list(results) == [self.group1, self.group2, self.group_p2]
def test_in_syntax_is_invalid(self) -> None:
with pytest.raises(InvalidSearchQuery, match='"in" syntax invalid for "is" search'):
self.make_query(search_filter_query="is:[unresolved, resolved]")
def test_first_release_any_or_no_environments(self) -> None:
# test scenarios for tickets:
# SEN-571
# ISSUE-432
# given the following setup:
#
# groups table:
# group first_release
# A 1
# B 1
# C 2
#
# groupenvironments table:
# group environment first_release
# A staging 1
# A production 2
#
# when querying by first release, the appropriate set of groups should be displayed:
#
# first_release: 1
# env=[]: A, B
# env=[production, staging]: A
# env=[staging]: A
# env=[production]: nothing
#
# first_release: 2
# env=[]: C
# env=[production, staging]: A
# env=[staging]: nothing
# env=[production]: A
# create an issue/group whose events that occur in 2 distinct environments
group_a_event_1 = self.store_event(
data={
"fingerprint": ["group_a"],
"event_id": "aaa" + ("1" * 29),
"environment": "example_staging",
"release": "release_1",
},
project_id=self.project.id,
)
group_a_event_2 = self.store_event(
data={
"fingerprint": ["group_a"],
"event_id": "aaa" + ("2" * 29),
"environment": "example_production",
"release": "release_2",
},
project_id=self.project.id,
)
group_a = group_a_event_1.group
# get the environments for group_a
prod_env = group_a_event_2.get_environment()
staging_env = group_a_event_1.get_environment()
# create an issue/group whose event that occur in no environments
# but will be tied to release release_1
group_b_event_1 = self.store_event(
data={
"fingerprint": ["group_b"],
"event_id": "bbb" + ("1" * 29),
"release": "release_1",
},
project_id=self.project.id,
)
assert group_b_event_1.get_environment().name == "" # has no environment
group_b = group_b_event_1.group
# create an issue/group whose event that occur in no environments
# but will be tied to release release_2
group_c_event_1 = self.store_event(
data={
"fingerprint": ["group_c"],
"event_id": "ccc" + ("1" * 29),
"release": "release_2",
},
project_id=self.project.id,
)
assert group_c_event_1.get_environment().name == "" # has no environment
group_c = group_c_event_1.group
# query by release release_1
results = self.make_query(search_filter_query="first_release:release_1")
assert set(results) == {group_a, group_b}
results = self.make_query(
environments=[staging_env, prod_env],
search_filter_query="first_release:release_1",
)
assert set(results) == {group_a}
results = self.make_query(
environments=[staging_env], search_filter_query="first_release:release_1"
)
assert set(results) == {group_a}
results = self.make_query(
environments=[prod_env], search_filter_query="first_release:release_1"
)
assert set(results) == set()
# query by release release_2
results = self.make_query(search_filter_query="first_release:release_2")
assert set(results) == {group_c}
results = self.make_query(
environments=[staging_env, prod_env],
search_filter_query="first_release:release_2",
)
assert set(results) == {group_a}
results = self.make_query(
environments=[staging_env], search_filter_query="first_release:release_2"
)
assert set(results) == set()
results = self.make_query(
environments=[prod_env], search_filter_query="first_release:release_2"
)
assert set(results) == {group_a}
@pytest.mark.skip(reason="test runs far too slowly, causing timeouts atm.")
def test_all_fields_do_not_error(self) -> None:
# Just a sanity check to make sure that all fields can be successfully
# searched on without returning type errors and other schema related
# issues.
def test_query(query) -> None:
self.make_query(search_filter_query=query)
for key in SENTRY_SNUBA_MAP:
if key in ["project.id", "issue.id", "performance.issue_ids", "status"]:
continue
test_query("has:%s" % key)
test_query("!has:%s" % key)
if key == "error.handled":
val: Any = 1
elif key in issue_search_config.numeric_keys:
val = "123"
elif key in issue_search_config.date_keys:
val = self.base_datetime.isoformat()
elif key in issue_search_config.boolean_keys:
val = "true"
elif key in {"trace.span", "trace.parent_span"}:
val = "abcdef1234abcdef"
test_query(f"!{key}:{val}")
else:
val = "abadcafedeadbeefdeaffeedabadfeed"
test_query(f"!{key}:{val}")
test_query(f"{key}:{val}")
def test_message_negation(self) -> None:
self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"event_id": "2" * 32,
"message": "something",
"timestamp": self.base_datetime.isoformat(),
},
project_id=self.project.id,
)
results = self.make_query(search_filter_query="!message:else")
results2 = self.make_query(search_filter_query="!message:else")
assert list(results) == list(results2)
def test_error_main_thread_true(self) -> None:
myProject = self.create_project(
name="Foo", slug="foo", teams=[self.team], fire_project_created=True
)
event = self.store_event(
data={
"event_id": "1" * 32,
"message": "something",
"timestamp": self.base_datetime.isoformat(),
"exception": {
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"thread_id": 1,
},
],
},
"threads": {
"values": [
{
"id": 1,
"main": True,
},
],
},
},
project_id=myProject.id,
)
myGroup = event.groups[0]
results = self.make_query(
projects=[myProject],
search_filter_query="error.main_thread:1",
sort_by="date",
)
assert list(results) == [myGroup]
def test_error_main_thread_false(self) -> None:
myProject = self.create_project(
name="Foo2", slug="foo2", teams=[self.team], fire_project_created=True
)
event = self.store_event(
data={
"event_id": "2" * 32,
"message": "something",
"timestamp": self.base_datetime.isoformat(),
"exception": {
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"thread_id": 1,
},
],
},
"threads": {
"values": [
{
"id": 1,
"main": False,
},
],
},
},
project_id=myProject.id,
)
myGroup = event.groups[0]
results = self.make_query(
projects=[myProject],
search_filter_query="error.main_thread:0",
sort_by="date",
)
assert list(results) == [myGroup]
def test_error_main_thread_no_results(self) -> None:
myProject = self.create_project(
name="Foo3", slug="foo3", teams=[self.team], fire_project_created=True
)
self.store_event(
data={
"event_id": "3" * 32,
"message": "something",
"timestamp": self.base_datetime.isoformat(),
"exception": {
"values": [
{
"type": "SyntaxError",
"value": "hello world",
"thread_id": 1,
},
],
},
"threads": {
"values": [
{
"id": 1,
},
],
},
},
project_id=myProject.id,
)
results = self.make_query(
projects=[myProject],
search_filter_query="error.main_thread:1",
sort_by="date",
)
assert len(results) == 0
|
EventsSnubaSearchTestCases
|
python
|
getsentry__sentry
|
src/sentry/seer/fetch_issues/by_function_name.py
|
{
"start": 1077,
"end": 9538
}
|
class ____(TypedDict):
group_id: int
event_id: str
title: str
def _simple_function_name_conditions(function_names: list[str], stack_frame_idx: int) -> Condition:
return Condition(
Function(
"arrayElement",
(Column("exception_frames.function"), stack_frame_idx),
),
Op.IN,
function_names,
)
def _get_issues_for_file(
projects: list[Project],
sentry_filenames: list[str],
function_names: list[str],
event_timestamp_start: datetime,
event_timestamp_end: datetime,
max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
run_id: int | None = None,
) -> list[IssueFromSnuba]:
"""
Fetch issues with their latest event if its stacktrace frames match the function names
and file names.
"""
if not projects:
return []
# Fetch an initial, candidate set of groups.
group_ids: list[int] = list(
Group.objects.filter(
first_seen__gte=datetime.now(UTC) - timedelta(weeks=26),
last_seen__gte=event_timestamp_start,
status__in=[GroupStatus.UNRESOLVED, GroupStatus.RESOLVED],
project__in=projects,
)
.order_by("-times_seen")
.values_list("id", flat=True)
)[:OPEN_PR_MAX_RECENT_ISSUES]
project_ids = [project.id for project in projects]
# Fetch the latest event for each group, along with some other event data we'll need for
# filtering by function names and file names.
subquery = (
Query(Entity("events"))
.set_select(
[
Column("group_id"),
Function(
"argMax",
[Column("event_id"), Column("timestamp")],
"event_id",
),
Function(
"argMax",
[Column("title"), Column("timestamp")],
"title",
),
Function(
"argMax",
[Column("exception_frames.filename"), Column("timestamp")],
"exception_frames.filename",
),
Function(
"argMax",
[Column("exception_frames.function"), Column("timestamp")],
"exception_frames.function",
),
]
)
.set_groupby(
[
Column("group_id"),
]
)
.set_where(
[
Condition(Column("project_id"), Op.IN, project_ids),
Condition(Column("group_id"), Op.IN, group_ids),
Condition(Column("timestamp"), Op.GTE, event_timestamp_start),
Condition(Column("timestamp"), Op.LT, event_timestamp_end),
]
)
)
# Filter out groups whose event's stacktrace doesn't match the function names and file names.
query = (
Query(subquery)
.set_select(
[
Column("group_id"),
Column("event_id"),
Column("title"),
]
)
.set_where(
[
BooleanCondition(
BooleanOp.OR,
[
BooleanCondition(
BooleanOp.AND,
[
Condition(
Function(
"arrayElement",
(Column("exception_frames.filename"), stackframe_idx),
),
Op.IN,
sentry_filenames,
),
_simple_function_name_conditions(function_names, stackframe_idx),
],
)
for stackframe_idx in range(-STACKFRAME_COUNT, 0) # first n frames
],
),
]
)
.set_limit(max_num_issues_per_file)
)
request = SnubaRequest(
dataset=Dataset.Events.value,
app_id="default",
tenant_ids={"organization_id": projects[0].organization_id},
query=query,
)
try:
return raw_snql_query(request, referrer=Referrer.SEER_RPC.value)["data"]
except Exception:
logger.exception(
"Seer fetch issues given patches Snuba query error",
extra={"query": request.to_dict()["query"], "run_id": run_id},
)
return []
def _left_truncated_paths(filename: str, max_num_paths: int = 2) -> list[str]:
"""
Example::
paths = _left_truncated_paths("src/seer/automation/agent/client.py", 2)
assert paths == [
"seer/automation/agent/client.py",
"automation/agent/client.py",
]
"""
path = Path(filename)
parts = list(path.parts)
num_dirs = len(parts) - 1 # -1 for the filename
num_paths = min(max_num_paths, num_dirs)
result = []
for _ in range(num_paths):
parts.pop(0)
result.append(str(Path(*parts)))
return result
def _get_projects_and_filenames_from_source_file(
org_id: int, repo_id: int, pr_filename: str, max_num_left_truncated_paths: int = 2
) -> tuple[set[Project], set[str]]:
# Fetch the code mappings in which the source_root is a substring at the start of pr_filename
code_mappings = (
RepositoryProjectPathConfig.objects.filter(
organization_id=org_id,
repository_id=repo_id,
)
.annotate(substring_match=StrIndex(Value(pr_filename), "source_root"))
.filter(substring_match=1)
)
projects_set = {code_mapping.project for code_mapping in code_mappings}
sentry_filenames = {
pr_filename.replace(code_mapping.source_root, code_mapping.stack_root, 1)
for code_mapping in code_mappings
}
# The code-mapped filenames alone aren't enough. They don't work for the seer app, for example.
# We can tolerate potential false positives if downstream uses of this data filter
# out irrelevant issues.
sentry_filenames.add(pr_filename)
sentry_filenames.update(_left_truncated_paths(pr_filename, max_num_left_truncated_paths))
return projects_set, sentry_filenames
def _fetch_issues_from_repo_projects(
repo_projects: utils.RepoProjects,
filename: str,
function_name: str,
max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
run_id: int | None = None,
) -> list[Group]:
event_timestamp_start = datetime.now(UTC) - timedelta(days=NUM_DAYS_AGO)
event_timestamp_end = datetime.now(UTC)
file_projects, sentry_filenames = _get_projects_and_filenames_from_source_file(
repo_projects.organization_id, repo_projects.repo.id, filename
)
file_projects_list = list(file_projects)
if not file_projects:
logger.warning(
"No projects found for file. Using all projects.",
extra={"file": filename, "function_name": function_name, "run_id": run_id},
)
file_projects_list = repo_projects.projects
issues = _get_issues_for_file(
file_projects_list,
list(sentry_filenames),
[function_name],
event_timestamp_start,
event_timestamp_end,
max_num_issues_per_file=max_num_issues_per_file,
run_id=run_id,
)
group_ids = [issue["group_id"] for issue in issues]
return list(Group.objects.filter(id__in=group_ids).order_by("-last_seen"))
@utils.handle_fetch_issues_exceptions
def fetch_issues(
organization_id: int,
provider: str,
external_id: str,
filename: str,
function_name: str,
max_num_issues_per_file: int = MAX_NUM_ISSUES_PER_FILE_DEFAULT,
run_id: int | None = None,
) -> utils.SeerResponse | utils.SeerResponseError:
"""
Fetch issues containing an event w/ a stacktrace frame that matches the `filename` and `function_name`.
"""
repo_projects = utils.get_repo_and_projects(
organization_id, provider, external_id, run_id=run_id
)
groups = _fetch_issues_from_repo_projects(
repo_projects,
filename,
function_name,
max_num_issues_per_file=max_num_issues_per_file,
run_id=run_id,
)
return utils.bulk_serialize_for_seer(groups)
|
IssueFromSnuba
|
python
|
huggingface__transformers
|
src/transformers/models/detr/modeling_detr.py
|
{
"start": 55290,
"end": 62272
}
|
class ____(DetrPreTrainedModel):
def __init__(self, config: DetrConfig):
super().__init__(config)
# DETR encoder-decoder model
self.model = DetrModel(config)
# Object detection heads
self.class_labels_classifier = nn.Linear(
config.d_model, config.num_labels + 1
) # We add one for the "no object" class
self.bbox_predictor = DetrMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[list[dict]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], DetrObjectDetectionOutput]:
r"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import AutoImageProcessor, DetrForObjectDetection
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
>>> model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[
... 0
... ]
>>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected remote with confidence 0.998 at location [40.16, 70.81, 175.55, 117.98]
Detected remote with confidence 0.996 at location [333.24, 72.55, 368.33, 187.66]
Detected couch with confidence 0.995 at location [-0.02, 1.15, 639.73, 473.76]
Detected cat with confidence 0.999 at location [13.24, 52.05, 314.02, 470.93]
Detected cat with confidence 0.999 at location [345.4, 23.85, 640.37, 368.72]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# First, sent images through DETR base model to obtain encoder + decoder outputs
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# class logits + predicted bounding boxes
logits = self.class_labels_classifier(sequence_output)
pred_boxes = self.bbox_predictor(sequence_output).sigmoid()
loss, loss_dict, auxiliary_outputs = None, None, None
if labels is not None:
outputs_class, outputs_coord = None, None
if self.config.auxiliary_loss:
intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
outputs_class = self.class_labels_classifier(intermediate)
outputs_coord = self.bbox_predictor(intermediate).sigmoid()
loss, loss_dict, auxiliary_outputs = self.loss_function(
logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord
)
if not return_dict:
if auxiliary_outputs is not None:
output = (logits, pred_boxes) + auxiliary_outputs + outputs
else:
output = (logits, pred_boxes) + outputs
return ((loss, loss_dict) + output) if loss is not None else output
return DetrObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
auxiliary_outputs=auxiliary_outputs,
last_hidden_state=outputs.last_hidden_state,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@auto_docstring(
custom_intro="""
DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks
such as COCO panoptic.
"""
)
|
DetrForObjectDetection
|
python
|
huggingface__transformers
|
src/transformers/models/aria/modular_aria.py
|
{
"start": 55646,
"end": 60519
}
|
class ____(LlavaModel):
def __init__(self, config: AriaConfig):
super().__init__(config)
self.multi_modal_projector = AriaProjector(config)
def _create_patch_attention_mask(self, pixel_mask):
if pixel_mask is None:
return None
patches_subgrid = pixel_mask.unfold(
dimension=1,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
patches_subgrid = patches_subgrid.unfold(
dimension=2,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
def get_image_features(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.FloatTensor] = None,
vision_feature_layer: int = -1,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
pixel_mask (`torch.FloatTensor]`, *optional*):
The tensors corresponding to the input image mask.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
patch_attention_mask = self._create_patch_attention_mask(pixel_mask)
image_outputs = self.vision_tower(
pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True
)
image_attn_mask = None
if patch_attention_mask is not None:
flattened_mask = patch_attention_mask.flatten(1)
image_attn_mask = torch.logical_not(flattened_mask)
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)
return image_features
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, AriaModelOutputWithPast]:
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# 2. Merge text and images
if pixel_values is not None and inputs_embeds.shape[1] != 1:
image_features = self.get_image_features(
pixel_values=pixel_values,
pixel_mask=pixel_mask,
vision_feature_layer=self.config.vision_feature_layer,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AriaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
Aria model for conditional generation tasks.
This model combines a vision tower, a multi-modal projector, and a language model
to perform tasks that involve both image and text inputs.
"""
)
|
AriaModel
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/fs/s3.py
|
{
"start": 1327,
"end": 4818
}
|
class ____(Exception):
"""Raises when unable to sign a S3 request."""
def get_fs(conn_id: str | None, storage_options: dict[str, str] | None = None) -> AbstractFileSystem:
try:
from s3fs import S3FileSystem
except ImportError:
raise ImportError(
"Airflow FS S3 protocol requires the s3fs library, but it is not installed as it requires"
"aiobotocore. Please install the s3 protocol support library by running: "
"pip install apache-airflow-providers-amazon[s3fs]"
)
s3_hook = S3Hook(aws_conn_id=conn_id)
session = s3_hook.get_session(deferrable=True)
endpoint_url = s3_hook.conn_config.get_service_endpoint_url(service_name="s3")
config_kwargs: dict[str, Any] = s3_hook.conn_config.extra_config.get("config_kwargs", {})
config_kwargs.update(storage_options or {})
register_events: dict[str, Callable[[Properties], None]] = {}
s3_service_config = s3_hook.service_config
if signer := s3_service_config.get("signer", None):
log.info("Loading signer %s", signer)
if singer_func := SIGNERS.get(signer):
uri = s3_service_config.get("signer_uri", None)
token = s3_service_config.get("signer_token", None)
if not uri or not token:
raise ValueError(f"Signer {signer} requires uri and token")
properties: Properties = {
"uri": uri,
"token": uri,
}
singer_func_with_properties = partial(singer_func, properties)
register_events["before-sign.s3"] = singer_func_with_properties
# Disable the AWS Signer
config_kwargs["signature_version"] = UNSIGNED
else:
raise ValueError(f"Signer not available: {signer}")
if proxy_uri := s3_service_config.get(S3_PROXY_URI, None):
config_kwargs["proxies"] = {"http": proxy_uri, "https": proxy_uri}
anon = False
if asyncio.run(session.get_credentials()) is None:
log.info("No credentials found, using anonymous access")
anon = True
fs = S3FileSystem(session=session, config_kwargs=config_kwargs, endpoint_url=endpoint_url, anon=anon)
for event_name, event_function in register_events.items():
fs.s3.meta.events.register_last(event_name, event_function, unique_id=1925)
return fs
def s3v4_rest_signer(properties: Properties, request: AWSRequest, **_: Any) -> AWSRequest:
if "token" not in properties:
raise SignError("Signer set, but token is not available")
signer_url = properties["uri"].rstrip("/")
signer_headers = {"Authorization": f"Bearer {properties['token']}"}
signer_body = {
"method": request.method,
"region": request.context["client_region"],
"uri": request.url,
"headers": {key: [val] for key, val in request.headers.items()},
}
response = requests.post(f"{signer_url}/v1/aws/s3/sign", headers=signer_headers, json=signer_body)
try:
response.raise_for_status()
response_json = response.json()
except HTTPError as e:
raise SignError(f"Failed to sign request {response.status_code}: {signer_body}") from e
for key, value in response_json["headers"].items():
request.headers.add_header(key, ", ".join(value))
request.url = response_json["uri"]
return request
SIGNERS: dict[str, Callable[[Properties, AWSRequest], AWSRequest]] = {"S3V4RestSigner": s3v4_rest_signer}
|
SignError
|
python
|
django__django
|
tests/admin_views/tests.py
|
{
"start": 149818,
"end": 161216
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
cls.v1 = Villain.objects.create(name="Adam")
cls.v2 = Villain.objects.create(name="Sue")
cls.sv1 = SuperVillain.objects.create(name="Bob")
cls.pl1 = Plot.objects.create(
name="World Domination", team_leader=cls.v1, contact=cls.v2
)
cls.pl2 = Plot.objects.create(
name="World Peace", team_leader=cls.v2, contact=cls.v2
)
cls.pl3 = Plot.objects.create(
name="Corn Conspiracy", team_leader=cls.v1, contact=cls.v1
)
cls.pd1 = PlotDetails.objects.create(details="almost finished", plot=cls.pl1)
cls.sh1 = SecretHideout.objects.create(
location="underground bunker", villain=cls.v1
)
cls.sh2 = SecretHideout.objects.create(
location="floating castle", villain=cls.sv1
)
cls.ssh1 = SuperSecretHideout.objects.create(
location="super floating castle!", supervillain=cls.sv1
)
cls.cy1 = CyclicOne.objects.create(pk=1, name="I am recursive", two_id=1)
cls.cy2 = CyclicTwo.objects.create(pk=1, name="I am recursive too", one_id=1)
def setUp(self):
self.client.force_login(self.superuser)
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(
r'<li>Plot: <a href="%s">World Domination</a>\s*<ul>\s*'
r'<li>Plot details: <a href="%s">almost finished</a>'
% (
reverse("admin:admin_views_plot_change", args=(self.pl1.pk,)),
reverse("admin:admin_views_plotdetails_change", args=(self.pd1.pk,)),
)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertRegex(response.text, pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % (
reverse("admin:admin_views_cyclicone_change", args=(self.cy1.pk,)),
)
two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % (
reverse("admin:admin_views_cyclictwo_change", args=(self.cy2.pk,)),
)
response = self.client.get(
reverse("admin:admin_views_cyclicone_delete", args=(self.cy1.pk,))
)
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username="deleteuser")
delete_user.user_permissions.add(
get_perm(Plot, get_permission_codename("delete", Plot._meta))
)
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(self.pl1.pk,))
)
self.assertContains(
response,
"your account doesn't have permission to delete the following types of "
"objects",
)
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get(
reverse("admin:admin_views_question_delete", args=(q.pk,))
)
self.assertContains(
response, "would require deleting the following protected related objects"
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Because.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a1.pk,)),
)
self.assertContains(
response,
'<li>Answer: <a href="%s">Yes.</a></li>'
% reverse("admin:admin_views_answer_change", args=(a2.pk,)),
)
def test_post_delete_protected(self):
"""
A POST request to delete protected objects should display the page
which says the deletion is prohibited.
"""
q = Question.objects.create(question="Why?")
Answer.objects.create(question=q, answer="Because.")
response = self.client.post(
reverse("admin:admin_views_question_delete", args=(q.pk,)), {"post": "yes"}
)
self.assertEqual(Question.objects.count(), 1)
self.assertContains(
response, "would require deleting the following protected related objects"
)
def test_restricted(self):
album = Album.objects.create(title="Amaryllis")
song = Song.objects.create(album=album, name="Unity")
response = self.client.get(
reverse("admin:admin_views_album_delete", args=(album.pk,))
)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
self.assertContains(
response,
'<li>Song: <a href="%s">Unity</a></li>'
% reverse("admin:admin_views_song_change", args=(song.pk,)),
)
def test_post_delete_restricted(self):
album = Album.objects.create(title="Amaryllis")
Song.objects.create(album=album, name="Unity")
response = self.client.post(
reverse("admin:admin_views_album_delete", args=(album.pk,)),
{"post": "yes"},
)
self.assertEqual(Album.objects.count(), 1)
self.assertContains(
response,
"would require deleting the following protected related objects",
)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = '<li>Plot: <a href="%s">World Domination</a>' % reverse(
"admin:admin_views_plot_change", args=(self.pl1.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v1.pk,))
)
self.assertContains(response, should_contain)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = '<li>Plot: <a href="%s">World Peace</a></li>' % reverse(
"admin:admin_views_plot_change", args=(self.pl2.pk,)
)
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.v2.pk,))
)
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
'<li>Villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_villain_change", args=(self.sv1.pk,)),
'<li>Super villain: <a href="%s">Bob</a>'
% reverse("admin:admin_views_supervillain_change", args=(self.sv1.pk,)),
"<li>Secret hideout: floating castle",
"<li>Super secret hideout: super floating castle!",
]
response = self.client.get(
reverse("admin:admin_views_villain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get(
reverse("admin:admin_views_supervillain_delete", args=(self.sv1.pk,))
)
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = self.pl3
tag = FunkyTag.objects.create(content_object=plot, name="hott")
should_contain = '<li>Funky tag: <a href="%s">hott' % reverse(
"admin:admin_views_funkytag_change", args=(tag.id,)
)
response = self.client.get(
reverse("admin:admin_views_plot_delete", args=(plot.pk,))
)
self.assertContains(response, should_contain)
def test_generic_relations_with_related_query_name(self):
"""
If a deleted object has GenericForeignKey with
GenericRelation(related_query_name='...') pointing to it, those objects
should be listed for deletion.
"""
bookmark = Bookmark.objects.create(name="djangoproject")
tag = FunkyTag.objects.create(content_object=bookmark, name="django")
tag_url = reverse("admin:admin_views_funkytag_change", args=(tag.id,))
should_contain = '<li>Funky tag: <a href="%s">django' % tag_url
response = self.client.get(
reverse("admin:admin_views_bookmark_delete", args=(bookmark.pk,))
)
self.assertContains(response, should_contain)
def test_delete_view_uses_get_deleted_objects(self):
"""The delete view uses ModelAdmin.get_deleted_objects()."""
book = Book.objects.create(name="Test Book")
response = self.client.get(
reverse("admin2:admin_views_book_delete", args=(book.pk,))
)
# BookAdmin.get_deleted_objects() returns custom text.
self.assertContains(response, "a deletable object")
@override_settings(ROOT_URLCONF="admin_views.urls")
|
AdminViewDeletedObjectsTest
|
python
|
ray-project__ray
|
python/ray/serve/schema.py
|
{
"start": 36788,
"end": 36938
}
|
class ____(str, Enum):
UPSCALING = "UPSCALING"
DOWNSCALING = "DOWNSCALING"
STABLE = "STABLE"
@PublicAPI(stability="alpha")
|
AutoscalingStatus
|
python
|
kamyu104__LeetCode-Solutions
|
Python/pascals-triangle-ii.py
|
{
"start": 1111,
"end": 1370
}
|
class ____(object):
# @return a list of integers
def getRow(self, rowIndex):
result = [1]
for i in range(1, rowIndex + 1):
result = [1] + [result[j - 1] + result[j] for j in xrange(1, i)] + [1]
return result
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/dia/modeling_dia.py
|
{
"start": 18225,
"end": 19542
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: DiaEncoderConfig, layer_idx: int):
super().__init__()
self.pre_sa_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.self_attention = DiaSelfAttention(config, layer_idx, is_causal=False)
self.post_sa_norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.mlp = DiaMLP(config)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
residual = hidden_states
normed_states = self.pre_sa_norm(hidden_states)
self_attn_output, self_attn_weights = self.self_attention(
normed_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + self_attn_output
residual = hidden_states
normed_states = self.post_sa_norm(hidden_states)
mlp_out = self.mlp(normed_states)
hidden_states = residual + mlp_out
return hidden_states, self_attn_weights
|
DiaEncoderLayer
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/app/test_app01.py
|
{
"start": 333,
"end": 2222
}
|
class ____(unittest.TestCase):
"""
Test assembling a complete App file.
"""
def test_assemble_xml_file(self):
"""Test writing an App file."""
self.maxDiff = None
fh = StringIO()
app = App()
app._set_filehandle(fh)
app._add_part_name("Sheet1")
app._add_heading_pair(("Worksheets", 1))
app._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>1</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="1" baseType="lpstr">
<vt:lpstr>Sheet1</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company>
</Company>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>12.0000</AppVersion>
</Properties>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
|
TestAssembleApp
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/Parameter.py
|
{
"start": 36218,
"end": 36490
}
|
class ____(object):
def __init__(self, enterFn, exitFn):
self.enterFn = enterFn
self.exitFn = exitFn
def __enter__(self):
self.enterFn()
def __exit__(self, exc_type, exc_value, tb):
self.exitFn()
|
SignalBlocker
|
python
|
walkccc__LeetCode
|
solutions/2237. Count Positions on Street With Required Brightness/2237.py
|
{
"start": 0,
"end": 453
}
|
class ____:
def meetRequirement(
self,
n: int,
lights: list[list[int]],
requirement: list[int],
) -> int:
ans = 0
currBrightness = 0
change = [0] * (n + 1)
for position, rg in lights:
change[max(0, position - rg)] += 1
change[min(n, position + rg + 1)] -= 1
for i in range(n):
currBrightness += change[i]
if currBrightness >= requirement[i]:
ans += 1
return ans
|
Solution
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/patreon/views.py
|
{
"start": 326,
"end": 2335
}
|
class ____(OAuth2Adapter):
provider_id = PROVIDER_ID
access_token_url = "https://www.patreon.com/api/oauth2/token" # nosec
authorize_url = "https://www.patreon.com/oauth2/authorize"
profile_url = "{0}/{1}".format(
API_URL,
(
"identity?include=memberships&fields%5Buser%5D=email,first_name,"
"full_name,image_url,last_name,social_connections,"
"thumb_url,url,vanity"
if USE_API_V2
else "current_user"
),
)
def complete_login(self, request, app, token, **kwargs):
resp = (
get_adapter()
.get_requests_session()
.get(
self.profile_url,
headers={"Authorization": "Bearer " + token.token},
)
)
extra_data = resp.json().get("data")
if USE_API_V2:
# Extract tier/pledge level for Patreon API v2:
try:
member_id = extra_data["relationships"]["memberships"]["data"][0]["id"]
member_url = (
"{0}/members/{1}?include="
"currently_entitled_tiers&fields%5Btier%5D=title"
).format(API_URL, member_id)
resp_member = (
get_adapter()
.get_requests_session()
.get(
member_url,
headers={"Authorization": "Bearer " + token.token},
)
)
pledge_title = resp_member.json()["included"][0]["attributes"]["title"]
extra_data["pledge_level"] = pledge_title
except (KeyError, IndexError):
extra_data["pledge_level"] = None
pass
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PatreonOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PatreonOAuth2Adapter)
|
PatreonOAuth2Adapter
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_proxy_poolmanager.py
|
{
"start": 27215,
"end": 28048
}
|
class ____(IPv6HypercornDummyProxyTestCase):
@classmethod
def setup_class(cls) -> None:
super().setup_class()
cls.http_url = f"http://{cls.http_host}:{int(cls.http_port)}"
cls.http_url_alt = f"http://{cls.http_host_alt}:{int(cls.http_port)}"
cls.https_url = f"https://{cls.https_host}:{int(cls.https_port)}"
cls.https_url_alt = f"https://{cls.https_host_alt}:{int(cls.https_port)}"
cls.proxy_url = f"http://[{cls.proxy_host}]:{int(cls.proxy_port)}"
def test_basic_ipv6_proxy(self) -> None:
with proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) as http:
r = http.request("GET", f"{self.http_url}/")
assert r.status == 200
r = http.request("GET", f"{self.https_url}/")
assert r.status == 200
|
TestIPv6HTTPProxyManager
|
python
|
keras-team__keras
|
keras/src/dtype_policies/dtype_policy_test.py
|
{
"start": 27855,
"end": 29372
}
|
class ____(test_case.TestCase):
"""Test error handling in GPTQConfig."""
def test_invalid_weight_bits(self):
with self.assertRaisesRegex(ValueError, "Unsupported weight_bits"):
GPTQConfig(
dataset=None,
tokenizer=None,
weight_bits=5,
)
def test_negative_num_samples(self):
with self.assertRaisesRegex(
ValueError, "num_samples must be a positive integer."
):
GPTQConfig(
dataset=None,
tokenizer=None,
num_samples=-10,
)
def test_zero_sequence_length(self):
with self.assertRaisesRegex(
ValueError, "sequence_length must be a positive integer."
):
GPTQConfig(
dataset=None,
tokenizer=None,
sequence_length=0,
)
def test_invalid_hessian_damping(self):
with self.assertRaisesRegex(
ValueError, "hessian_damping must be between 0 and 1."
):
GPTQConfig(
dataset=None,
tokenizer=None,
hessian_damping=1.5,
)
def test_invalid_group_size(self):
with self.assertRaisesRegex(
ValueError, "Invalid group_size. Supported values are -1"
):
GPTQConfig(
dataset=None,
tokenizer=None,
group_size=0,
)
|
GPTQConfigErrorHandlingTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/resource_variable_ops.py
|
{
"start": 14761,
"end": 63183
}
|
class ____(variables.Variable, core.Tensor):
"""A python variable from an existing handle."""
# TODO(wangpeng): Deprecate `constraint` when callers no long pass it in.
def __init__( # pylint: disable=super-init-not-called
self,
trainable=None,
shape=None,
dtype=None,
handle=None,
constraint=None,
synchronization=None,
aggregation=None,
distribute_strategy=None,
name=None,
unique_id=None,
handle_name=None,
graph_element=None,
initial_value=None,
initializer_op=None,
is_initialized_op=None,
cached_value=None,
save_slice_info=None,
caching_device=None,
in_graph_mode=None,
validate_shape=True,
**unused_kwargs):
"""Creates a variable from a handle.
Args:
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
shape: The variable's shape. This shape can be set to tf.TensorShape(None)
in order to assign values of different shapes to this variable.
Otherwise (i.e. if the shape is fully determined), it will trigger run
time checks to ensure that each assignment is of the same shape.
dtype: The variable's dtype.
handle: The variable's handle
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
distribute_strategy: The distribution strategy this variable was created
under.
name: The name for this variable.
unique_id: Internal. Unique ID for this variable's handle.
handle_name: The name for the variable's handle.
graph_element: Optional, required only in session.run-mode. Pre-created
tensor which reads this variable's value.
initial_value: Optional. Variable's initial value.
initializer_op: Operation which assigns the variable's initial value.
is_initialized_op: Pre-created operation to check whether this variable is
initialized.
cached_value: Pre-created operation to read this variable in a specific
device.
save_slice_info: Metadata for variable partitioning.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
in_graph_mode: whether we are executing in TF1 graph mode. If None, will
detect within the function. This is to avoid repeated init_scope()
context entrances which can add up.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
"""
if in_graph_mode is None:
with ops.init_scope():
self._in_graph_mode = not context.executing_eagerly()
else:
self._in_graph_mode = in_graph_mode
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
self._trainable = trainable
self._synchronization = synchronization
self._aggregation = aggregation
self._save_slice_info = save_slice_info
self._initial_value = initial_value
self._initializer_op = initializer_op
self._is_initialized_op = is_initialized_op
self._graph_element = graph_element
self._caching_device = caching_device
self._cached_value = cached_value
self._distribute_strategy = distribute_strategy
# Store the graph key so optimizers know how to only retrieve variables from
# this graph. Guaranteed to be the same as the eager graph_key.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
self._handle = handle
self._unique_id = unique_id
if handle_name is None:
self._handle_name = "Variable:0"
else:
self._handle_name = handle_name + ":0"
self._constraint = constraint
self._cached_shape_as_list = None
self._validate_shape = validate_shape
self._xla_sharding = None
self._variable_read = False
def _get_xla_sharding(self):
return self._xla_sharding
def _set_xla_sharding(self, xla_sharding):
"""Annotates this `ResourceVariable` with `xla_sharding`.
`xla_sharding` will be used to create an `XlaShardingOp` whenever a
`ReadVariableOp` is created.
Args:
xla_sharding: The xla.OpSharding proto to annotate this ResourceVariable
with.
"""
if self._variable_read and not context.executing_eagerly():
logging.warning(
"This variable (%s) has already been read (ie. a ReadVariableOp has"
" already been generated) and a new XlaShardingOp using this sharding"
" will not be created unless it is read again. If that's not possible"
", please set the XLA sharding before reading the variable.",
self.name,
)
self._xla_sharding = xla_sharding
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
# If we cannot read the value for any reason (e.g. variable uninitialized
# during tf.function tracing), still produce a __repr__. Note that for
# async eager, errors due to uninitialized variables will raise in
# ops.value_text when the handle is resolved, so we need to keep that
# under the try...except if we want to suppress them.
try:
with ops.device(self.device):
value_text = ops.value_text(self.read_value(), is_repr=True)
except: # pylint: disable=bare-except
value_text = "numpy=<unavailable>"
return "<tf.Variable '%s' shape=%s dtype=%s, %s>" % (
self.name, self.get_shape(), self.dtype.name, value_text)
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def __tf_tracing_type__(self, signature_context):
alias_id = signature_context.alias_global_id(self._handle._id) # pylint:disable=protected-access
# TODO(xjun): Create variable placeholders directly from VariableSpec
# without using original values.
signature_context.add_placeholder(alias_id, self)
return VariableSpec(shape=self.shape,
dtype=self.dtype,
trainable=self.trainable,
alias_id=alias_id)
@contextlib.contextmanager
def _assign_dependencies(self):
"""Makes assignments depend on the cached value, if any.
This prevents undefined behavior with reads not ordered wrt writes.
Yields:
None.
"""
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
def __array__(self, dtype=None):
"""Allows direct conversion to a numpy array.
>>> np.array(tf.Variable([1.0]))
array([1.], dtype=float32)
Returns:
The variable value as a numpy array.
"""
# You can't return `self.numpy()` here because for scalars
# that raises:
# ValueError: object __array__ method not producing an array
# Even `self.read_value().__array__()` and `self.read_value()._numpy()` give
# the same error. The `EagerTensor` class must be doing something behind the
# scenes to make `np.array(tf.constant(1))` work.
return numpy_compat.np_asarray(self.numpy(), dtype=dtype)
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return bool(self.read_value())
def __copy__(self):
return self
def __deepcopy__(self, memo):
if not context.executing_eagerly():
raise NotImplementedError(
"__deepcopy__() is only available when eager execution is enabled.")
copied_variable = ResourceVariable(
initial_value=self.read_value(),
trainable=self._trainable,
constraint=self._constraint,
dtype=self._dtype,
name=self._shared_name,
distribute_strategy=self._distribute_strategy,
synchronization=self.synchronization,
aggregation=self.aggregation)
memo[self._unique_id] = copied_variable
return copied_variable
@property
def dtype(self):
"""The dtype of this variable."""
return self._dtype
@property
def device(self):
"""The device this variable is on."""
return self.handle.device
@property
def graph(self):
"""The `Graph` of this variable."""
return self.handle.graph
@property
def name(self):
"""The name of the handle for this variable."""
return self._handle_name
@property
def shape(self):
"""The shape of this variable."""
return self._shape
def set_shape(self, shape):
self._shape = self._shape.merge_with(shape)
def _shape_as_list(self):
if self.shape.ndims is None:
return None
return [dim.value for dim in self.shape.dims]
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
@property
def create(self):
"""The op responsible for initializing this variable."""
if not self._in_graph_mode:
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return self._initializer_op
@property
def handle(self):
"""The handle by which this variable can be accessed."""
return self._handle
def value(self):
"""A cached operation which reads the value of this variable."""
if self._cached_value is not None:
return self._cached_value
with ops.colocate_with(None, ignore_existing=True):
return self._read_variable_op()
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._graph_element
@property
def initializer(self):
"""The op responsible for initializing this variable."""
return self._initializer_op
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable."""
if context.executing_eagerly():
raise RuntimeError("This property is not supported "
"when eager execution is enabled.")
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
@property
def op(self) -> ops.Operation:
"""The op for this variable."""
return self.handle.op
@property
def trainable(self):
return self._trainable
@property
def synchronization(self):
return self._synchronization
@property
def aggregation(self):
return self._aggregation
def eval(self, session=None):
"""Evaluates and returns the value of this variable."""
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return self._graph_element.eval(session=session)
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
raise NotImplementedError(
"numpy() is only available when eager execution is enabled.")
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return gen_state_ops.resource_count_up_to(
self.handle, limit=limit, T=self.dtype)
def _copy_trackable_to_cpu(self, object_map):
"""For implementing `Trackable`."""
if self not in object_map:
# If not populated, initialize the cpu copy first.
op_device = pydev.DeviceSpec.from_string(self.device).replace(
device_type="CPU", device_index=0).to_string()
with ops.device(op_device):
# Use `op_device` to prevent cross-device communication for variables
# like `ShardedVariable`
new_var = UninitializedVariable(
trainable=self.trainable,
shape=self.shape,
dtype=self.dtype,
name=self._shared_name) # pylint: disable=protected-access
object_map[self] = new_var
# Then copy value of self to the copy.
destination_var = object_map[self]
with ops.device(destination_var.device):
# Use `op_device` to prevent cross-device communication for variables
# like `ShardedVariable`
destination_var.assign(self.read_value())
def _export_to_saved_model_graph(self, object_map=None, tensor_map=None,
options=None, **kwargs):
"""For implementing `Trackable`."""
new_variable = None
if options.experimental_variable_policy._save_variable_devices(): # pylint:disable=protected-access
with ops.device(self.device):
new_variable = copy_to_graph_uninitialized(self)
else:
new_variable = copy_to_graph_uninitialized(self)
object_map[self] = new_variable
tensor_map[self.handle] = new_variable.handle
return [self.handle]
def _serialize_to_tensors(self):
"""Implements Trackable._serialize_to_tensors."""
def _read_variable_closure():
v = self
with ops.device(v.device):
if context.executing_eagerly() and not v.is_initialized():
# A SaveSpec tensor value of `None` indicates that the variable is
# uninitialized.
return None
# Read the variable without making a copy to limit memory usage.
x = v.read_value_no_copy()
# To allow variables placed on non-CPU devices to be checkpointed,
# we copy them to CPU on the same machine first.
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return {
trackable.VARIABLE_VALUE_KEY:
tensor_callable.Callable(
_read_variable_closure, dtype=self.dtype, device=self.device)
}
def _restore_from_tensors(self, restored_tensors):
"""Implements Trackable._restore_from_tensors."""
with ops.device(self.device):
restored_tensor = array_ops.identity(
restored_tensors[trackable.VARIABLE_VALUE_KEY])
try:
assigned_variable = shape_safe_assign_variable_handle(
self.handle, self.shape, restored_tensor)
except ValueError as e:
raise ValueError(
f"Received incompatible tensor with shape {restored_tensor.shape} "
f"when attempting to restore variable with shape {self.shape} "
f"and name {self.name}.") from e
return assigned_variable
def _read_variable_op(self, no_copy=False):
"""Reads the value of the variable.
If the variable is in copy-on-read mode and `no_copy` is True, the variable
is converted to copy-on-write mode before it is read.
Args:
no_copy: Whether to prevent a copy of the variable.
Returns:
The value of the variable.
"""
variable_accessed(self)
self._variable_read = True
def read_and_set_handle(no_copy):
if no_copy and forward_compat.forward_compatible(2022, 5, 3):
gen_resource_variable_ops.disable_copy_on_read(self.handle)
result = gen_resource_variable_ops.read_variable_op(
self.handle, self._dtype)
_maybe_set_handle_data(self._dtype, self.handle, result)
return result
if getattr(self, "_caching_device", None) is not None:
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._caching_device):
result = read_and_set_handle(no_copy)
else:
result = read_and_set_handle(no_copy)
if not context.executing_eagerly():
# Note that if a control flow context is active the input of the read op
# might not actually be the handle. This line bypasses it.
record.record_operation(
"ReadVariableOp", [result], [self.handle],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
# Create an XlaShardingOp if this ResourceVariable is annotated with an XLA
# sharding i.e. the _xla_sharding field is set. Please see the design at
# http://shortn/_RGoruJpzrv for more details.
if (
context.xla_sharding_for_resource_variables_enabled()
and not context.executing_eagerly()
and self._xla_sharding is not None
):
sharding_string = self._xla_sharding.SerializeToString()
with ops.colocate_with(result):
result = gen_xla_ops.xla_sharding(result, sharding=sharding_string)
# pylint: disable=protected-access
result.op._set_attr(
"_XlaSharding",
attr_value_pb2.AttrValue(s=sharding_string),
)
return result
def read_value(self):
"""Constructs an op which reads the value of this variable.
Should be used when there are multiple reads, or when it is desirable to
read the value only after some condition is true.
Returns:
The value of the variable.
"""
with ops.name_scope("Read"):
value = self._read_variable_op()
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def read_value_no_copy(self):
"""Constructs an op which reads the value of this variable without copy.
The variable is read without making a copy even when it has been sparsely
accessed. Variables in copy-on-read mode will be converted to copy-on-write
mode.
Returns:
The value of the variable.
"""
with ops.name_scope("Read"):
value = self._read_variable_op(no_copy=True)
# Return an identity so it can get placed on whatever device the context
# specifies instead of the device where the variable is.
return array_ops.identity(value)
def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
variable_accessed(self)
value = gen_resource_variable_ops.resource_gather(
self.handle, indices, dtype=self._dtype, name=name)
if self._dtype == dtypes.variant:
# For DT_VARIANT types, the handle's shape_and_type[1:] stores the
# variant's handle data. Extract it.
handle_data = get_eager_safe_handle_data(self.handle)
if handle_data.is_set and len(handle_data.shape_and_type) > 1:
value._handle_data = ( # pylint: disable=protected-access
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(
is_set=True, shape_and_type=handle_data.shape_and_type[1:]))
return array_ops.identity(value)
return value
def gather_nd(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather_nd`."""
with ops.name_scope("GatherNd" if name is None else name) as name:
if self.trainable:
variable_accessed(self)
value = gen_resource_variable_ops.resource_gather_nd(
self.handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value)
def to_proto(self, export_scope=None):
"""Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name,
export_scope)
if self._initial_value is not None:
# This is inside an if-statement for backwards compatibility, since
# self._initial_value might be None for variables constructed from old
# protos.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name,
export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name,
export_scope)
else:
# Store the graph_element here
var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name,
export_scope)
var_def.is_resource = True
var_def.trainable = self.trainable
var_def.synchronization = self.synchronization.value
var_def.aggregation = self.aggregation.value
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(
self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
if context.executing_eagerly():
raise RuntimeError("This operation is not supported "
"when eager execution is enabled.")
return ResourceVariable(
variable_def=variable_def, import_scope=import_scope)
__array_priority__ = 100
def is_initialized(self, name=None):
"""Checks whether a resource variable has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
return gen_resource_variable_ops.var_is_initialized_op(self.handle, name)
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
"""Subtracts a value from this variable.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# TODO(apassos): this here and below is not atomic. Consider making it
# atomic if there's a way to do so without a performance cost for those who
# don't need it.
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_sub_op)
return assign_sub_op
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
"""Adds a value to this variable.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name to use for the operation.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._lazy_read(assign_add_op)
return assign_add_op
def _lazy_read(self, op):
variable_accessed(self)
return _UnreadVariable(
handle=self.handle,
dtype=self.dtype,
shape=self._shape,
in_graph_mode=self._in_graph_mode,
parent_op=op,
unique_id=self._unique_id)
def assign(self, value, use_locking=None, name=None, read_value=True):
"""Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# Note: not depending on the cached value here since this can be used to
# initialize the variable.
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
if not self._shape.is_compatible_with(value_tensor.shape):
if self.name is None:
tensor_name = ""
else:
tensor_name = " " + str(self.name)
raise ValueError(
(f"Cannot assign value to variable '{tensor_name}': Shape mismatch."
f"The variable shape {self._shape}, and the "
f"assigned value shape {value_tensor.shape} are incompatible."))
kwargs = {}
if forward_compat.forward_compatible(2022, 3, 23):
# If the shape is fully defined, we do a runtime check with the shape of
# value.
validate_shape = self._validate_shape and self._shape.is_fully_defined()
kwargs["validate_shape"] = validate_shape
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name, **kwargs)
if read_value:
return self._lazy_read(assign_op)
return assign_op
def __reduce__(self):
# The implementation mirrors that of __deepcopy__.
return functools.partial(
ResourceVariable,
initial_value=self.numpy(),
trainable=self.trainable,
name=self._shared_name,
dtype=self.dtype,
constraint=self.constraint,
distribute_strategy=self._distribute_strategy), ()
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `tf.IndexedSlices` from this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_sub(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_add(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_max(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of max with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_max(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_min(self, sparse_delta, use_locking=False, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of min with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_min(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
"""Multiply this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to multiply this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_mul(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_div(self, sparse_delta, use_locking=False, name=None):
"""Divide this variable by `tf.IndexedSlices`.
Args:
sparse_delta: `tf.IndexedSlices` to divide this variable by.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_div(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
gen_resource_variable_ops.resource_scatter_update(
self.handle,
sparse_delta.indices,
ops.convert_to_tensor(sparse_delta.values, self.dtype),
name=name))
def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `tf.IndexedSlices` to this variable batch-wise.
Analogous to `batch_gather`. This assumes that this variable and the
sparse_delta IndexedSlices have a series of leading dimensions that are the
same for all of them, and the updates are performed on the last dimension of
indices. In other words, the dimensions should be the following:
`num_prefix_dims = sparse_delta.indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[
batch_dim:]`
where
`sparse_delta.updates.shape[:num_prefix_dims]`
`== sparse_delta.indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n,
sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[
i_1, ..., i_n, j]`
When sparse_delta.indices is a 1D tensor, this operation is equivalent to
`scatter_update`.
To avoid this operation one can looping over the first `ndims` of the
variable and using `scatter_update` on the subtensors that result of slicing
the first dimension. This is a valid option for `ndims = 1`, but less
efficient than this implementation.
Args:
sparse_delta: `tf.IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, indexed_slices.IndexedSlices):
raise TypeError(f"Argument `sparse_delta` must be a "
f"`tf.IndexedSlices`. Received arg: {sparse_delta}")
return self._lazy_read(
state_ops.batch_scatter_update(
self,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name))
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_sub(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_add(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_update(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_max(self, indices, updates, name=None):
"""Updates this variable with the max of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_max(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def scatter_nd_min(self, indices, updates, name=None):
"""Updates this variable with the min of `tf.IndexedSlices` and itself.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
The updated variable.
"""
return self._lazy_read(
gen_state_ops.resource_scatter_nd_min(
self.handle,
indices,
ops.convert_to_tensor(updates, self.dtype),
name=name))
def _write_object_proto(self, proto, options):
"""Writes additional information of the variable into the SavedObject proto.
Subclasses of ResourceVariables could choose to override this method to
customize extra information to provide when saving a SavedModel.
Ideally, this should contain the logic in
write_object_proto_for_resource_variable but `DistributedValue` is an
outlier at the momemnt. Once `DistributedValue` becomes a proper
ResourceVariable, we should remove the helper method below.
Args:
proto: `SavedObject` proto to update.
options: A `SaveOption` instance that configures save behavior.
"""
write_object_proto_for_resource_variable(self, proto, options)
def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,
end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask):
with _handle_graph(self.handle), self._assign_dependencies():
return self._lazy_read(
gen_array_ops.resource_strided_slice_assign(
ref=self.handle,
begin=begin,
end=end,
strides=strides,
value=ops.convert_to_tensor(value, dtype=self.dtype),
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask))
def __complex__(self):
return complex(self.value().numpy())
def __int__(self):
return int(self.value().numpy())
def __long__(self):
return long(self.value().numpy())
def __float__(self):
return float(self.value().numpy())
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None and not dtype.is_compatible_with(self.dtype):
raise ValueError(
f"Incompatible type conversion requested to type {dtype.name} for "
f"`tf.Variable of type {self.dtype.name}. (Variable: {self})")
if as_ref:
return self.read_value().op.inputs[0]
else:
return self.value()
def __iadd__(self, unused_other):
raise RuntimeError("`variable += value` with `tf.Variable`s is not "
"supported. Use `variable.assign_add(value)` to modify "
"the variable, or `out = variable + value` if you "
"need to get a new output Tensor.")
def __isub__(self, unused_other):
raise RuntimeError("`variable -= value` with `tf.Variable`s is not "
"supported. Use `variable.assign_sub(value)` to modify "
"the variable, or `out = variable * value` if you "
"need to get a new output Tensor.")
def __imul__(self, unused_other):
raise RuntimeError("`var *= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var * value)` to modify "
"the variable, or `out = var * value` if you "
"need to get a new output Tensor.")
def __idiv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __itruediv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __irealdiv__(self, unused_other):
raise RuntimeError("`var /= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var / value)` to modify "
"the variable, or `out = var / value` if you "
"need to get a new output Tensor.")
def __ipow__(self, unused_other):
raise RuntimeError("`var **= value` with `tf.Variable`s is not "
"supported. Use `var.assign(var ** value)` to modify "
"the variable, or `out = var ** value` if you "
"need to get a new output Tensor.")
|
BaseResourceVariable
|
python
|
cython__cython
|
Cython/Compiler/PyrexTypes.py
|
{
"start": 25277,
"end": 44859
}
|
class ____(PyrexType):
is_memoryviewslice = 1
default_value = "{ 0, 0, { 0 }, { 0 }, { 0 } }"
has_attributes = 1
needs_refcounting = 1 # Ideally this would be true and reference counting for
# memoryview and pyobject code could be generated in the same way.
# However, memoryviews are sufficiently specialized that this doesn't
# seem practical. Implement a limited version of it for now
refcounting_needs_gil = False # __PYX_XCLEAR_MEMVIEW acquires GIL internally.
scope = None
# These are special cased in Defnode
from_py_function = None
to_py_function = None
exception_value = None
exception_check = True
subtypes = ['dtype']
def __init__(self, base_dtype, axes):
"""
MemoryViewSliceType(base, axes)
Base is the C base type; axes is a list of (access, packing) strings,
where access is one of 'full', 'direct' or 'ptr' and packing is one of
'contig', 'strided' or 'follow'. There is one (access, packing) tuple
for each dimension.
the access specifiers determine whether the array data contains
pointers that need to be dereferenced along that axis when
retrieving/setting:
'direct' -- No pointers stored in this dimension.
'ptr' -- Pointer stored in this dimension.
'full' -- Check along this dimension, don't assume either.
the packing specifiers specify how the array elements are laid-out
in memory.
'contig' -- The data is contiguous in memory along this dimension.
At most one dimension may be specified as 'contig'.
'strided' -- The data isn't contiguous along this dimension.
'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
has its stride automatically computed from extents of the other
dimensions to ensure C or Fortran memory layout.
C-contiguous memory has 'direct' as the access spec, 'contig' as the
*last* axis' packing spec and 'follow' for all other packing specs.
Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
the *first* axis' packing spec and 'follow' for all other packing
specs.
"""
from . import Buffer, MemoryView
self.dtype = base_dtype
self.axes = axes
self.ndim = len(axes)
self.flags = MemoryView.get_buf_flags(self.axes)
self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
assert not (self.is_c_contig and self.is_f_contig)
self.mode = MemoryView.get_mode(axes)
self.writable_needed = False
if not self.dtype.is_fused:
self.dtype_name = Buffer.mangle_dtype_name(self.dtype)
def __hash__(self):
return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes))
def __eq__(self, other):
if isinstance(other, BaseType):
return self.same_as_resolved_type(other)
else:
return False
def __ne__(self, other):
# TODO drop when Python2 is dropped
return not (self == other)
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
#self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
def needs_nonecheck(self):
return True
def is_complete(self):
# incomplete since the underlying struct doesn't have a cython.memoryview object.
return 0
def can_be_optional(self):
"""Returns True if type can be used with typing.Optional[]."""
return True
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
# XXX: we put these guards in for now...
assert not dll_linkage
from . import MemoryView
base_code = StringEncoding.EncodedString(
str(self) if pyrex or for_display else Naming.memviewslice_cname)
return self.base_declaration_code(
base_code,
entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'mvs_class_'+self.specialization_suffix(),
None,
visibility='extern',
parent_type=self)
scope.directives = {}
scope.declare_var('_data', c_char_ptr_type, None,
cname='data', is_cdef=1)
return True
def declare_attribute(self, attribute, env, pos):
from . import MemoryView, Options
scope = self.scope
if attribute == 'shape':
scope.declare_var('shape',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='shape',
is_cdef=1)
elif attribute == 'strides':
scope.declare_var('strides',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='strides',
is_cdef=1)
elif attribute == 'suboffsets':
scope.declare_var('suboffsets',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='suboffsets',
is_cdef=1)
elif attribute in ("copy", "copy_fortran"):
ndim = len(self.axes)
follow_dim = [('direct', 'follow')]
contig_dim = [('direct', 'contig')]
to_axes_c = follow_dim * (ndim - 1) + contig_dim
to_axes_f = contig_dim + follow_dim * (ndim -1)
dtype = self.dtype
if dtype.is_cv_qualified:
dtype = dtype.cv_base_type
to_memview_c = MemoryViewSliceType(dtype, to_axes_c)
to_memview_f = MemoryViewSliceType(dtype, to_axes_f)
for to_memview, cython_name in [(to_memview_c, "copy"),
(to_memview_f, "copy_fortran")]:
copy_func_type = CFuncType(
to_memview,
[CFuncTypeArg("memviewslice", self, None)])
copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview)
entry = scope.declare_cfunction(
cython_name,
copy_func_type, pos=pos, defining=1,
cname=copy_cname)
entry.utility_code_definition = MemoryView.get_copy_new_utility(pos, self, to_memview)
MemoryView.use_cython_array_utility_code(env)
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')):
is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_bint_type,
args=[CFuncTypeArg("memviewslice", self, None)],
exception_value=-1,
)
entry = scope.declare_cfunction(cython_name,
cfunctype,
pos=pos,
defining=1,
cname=is_contig_name)
entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim)
return True
def get_entry(self, node, cname=None, type=None):
from . import MemoryView, Symtab
if cname is None:
assert node.is_simple() or node.is_temp or node.is_elemental
cname = node.result()
if type is None:
type = node.type
entry = Symtab.Entry(cname, cname, type, node.pos)
return MemoryView.MemoryViewSliceBufferEntry(entry)
def conforms_to(self, dst, broadcast=False, copying=False):
"""
Returns True if src conforms to dst, False otherwise.
If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
Any packing/access spec is conformable to itself.
'direct' and 'ptr' are conformable to 'full'.
'contig' and 'follow' are conformable to 'strided'.
Any other combo is not conformable.
"""
from . import MemoryView
src = self
#if not copying and self.writable_needed and not dst.writable_needed:
# return False
src_dtype, dst_dtype = src.dtype, dst.dtype
# We can add but not remove const/volatile modifiers
# (except if we are copying by value, then anything is fine)
if not copying:
if src_dtype.is_const and not dst_dtype.is_const:
return False
if src_dtype.is_volatile and not dst_dtype.is_volatile:
return False
# const/volatile checks are done, remove those qualifiers
if src_dtype.is_cv_qualified:
src_dtype = src_dtype.cv_base_type
if dst_dtype.is_cv_qualified:
dst_dtype = dst_dtype.cv_base_type
if not src_dtype.same_as(dst_dtype):
return False
if src.ndim != dst.ndim:
if broadcast:
src, dst = MemoryView.broadcast_types(src, dst)
else:
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
dst_access, dst_packing = dst_spec
if src_access != dst_access and dst_access != 'full':
return False
if src_packing != dst_packing and dst_packing != 'strided' and not copying:
return False
return True
def valid_dtype(self, dtype, i=0):
"""
Return whether type dtype can be used as the base type of a
memoryview slice.
We support structs, numeric types and objects
"""
if dtype.is_complex and dtype.real_type.is_int:
return False
if dtype.is_struct and dtype.kind == 'struct':
for member in dtype.scope.var_entries:
if not self.valid_dtype(member.type):
return False
return True
return (
dtype.is_error or
# Pointers are not valid (yet)
# (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
(dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or
dtype.is_numeric or
dtype.is_pyobject or
dtype.is_fused or # accept this as it will be replaced by specializations later
(dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type))
)
def validate_memslice_dtype(self, pos):
if not self.valid_dtype(self.dtype):
error(pos, "Invalid base type for memoryview slice: %s" % self.dtype)
def assert_direct_dims(self, pos):
for access, packing in self.axes:
if access != 'direct':
error(pos, "All dimensions must be direct")
return False
return True
def transpose(self, pos):
if not self.assert_direct_dims(pos):
return error_type
return MemoryViewSliceType(self.dtype, self.axes[::-1])
def specialization_name(self):
return '%s_%s' % (
super().specialization_name(),
self.specialization_suffix())
def specialization_suffix(self):
return "%s_%s" % (self.axes_to_name(), self.dtype_name)
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def check_for_null_code(self, cname):
return cname + '.memview'
def create_from_py_utility_code(self, env):
from . import MemoryView, Buffer
# We don't have 'code', so use a LazyUtilityCode with a callback.
def lazy_utility_callback(code):
context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype)
return TempitaUtilityCode.load(
"ObjectToMemviewSlice", "MemoryView_C.c", context=context)
env.use_utility_code(
MemoryView.get_view_utility_code(env.context.shared_utility_qualified_name))
env.use_utility_code(LazyUtilityCode(lazy_utility_callback))
if self.is_c_contig:
c_or_f_flag = "__Pyx_IS_C_CONTIG"
elif self.is_f_contig:
c_or_f_flag = "__Pyx_IS_F_CONTIG"
else:
c_or_f_flag = "0"
suffix = self.specialization_suffix()
funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix
context = dict(
MemoryView.template_context,
buf_flag = self.flags,
ndim = self.ndim,
axes_specs = ', '.join(self.axes_to_code()),
dtype_typedecl = self.dtype.empty_declaration_code(),
struct_nesting_depth = self.dtype.struct_nesting_depth(),
c_or_f_flag = c_or_f_flag,
funcname = funcname,
)
self.from_py_function = funcname
return True
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None,
special_none_cvalue=None):
# NOTE: auto-detection of readonly buffers is disabled:
# writable = self.writable_needed or not self.dtype.is_const
writable = not self.dtype.is_const
return self._assign_from_py_code(
source_code, result_code, error_pos, code, from_py_function, error_condition,
extra_args=['PyBUF_WRITABLE' if writable else '0'],
special_none_cvalue=special_none_cvalue,
)
def create_to_py_utility_code(self, env):
self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
return True
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
assert self._dtype_to_py_func
assert self._dtype_from_py_func
to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func
from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func
tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject)
return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup
def dtype_object_conversion_funcs(self, env):
get_function = "__pyx_memview_get_%s" % self.dtype_name
set_function = "__pyx_memview_set_%s" % self.dtype_name
context = dict(
get_function = get_function,
set_function = set_function,
)
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
self.dtype.create_to_py_utility_code(env)
to_py_function = self.dtype.to_py_function
from_py_function = None
if not self.dtype.is_const:
self.dtype.create_from_py_utility_code(env)
from_py_function = self.dtype.from_py_function
if not (to_py_function or from_py_function):
return "NULL", "NULL"
if not to_py_function:
get_function = "NULL"
if not from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
to_py_function=to_py_function,
from_py_function=from_py_function,
dtype=self.dtype.empty_declaration_code(),
error_condition=error_condition,
dtype_is_const=self.dtype.is_const,
)
utility = TempitaUtilityCode.load_cached(
utility_name, "MemoryView_C.c", context=context)
env.use_utility_code(utility)
return get_function, set_function
def axes_to_code(self):
"""Return a list of code constants for each axis"""
from . import MemoryView
d = MemoryView._spec_to_const
return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes]
def axes_to_name(self):
"""Return an abbreviated name for our axes"""
from . import MemoryView
d = MemoryView._spec_to_abbrev
return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes])
def error_condition(self, result_code):
return "!%s.memview" % result_code
def __str__(self):
from . import MemoryView
axes_code_list = []
for idx, (access, packing) in enumerate(self.axes):
flag = MemoryView.get_memoryview_flag(access, packing)
if flag == "strided":
axes_code_list.append(":")
else:
if flag == 'contiguous':
have_follow = [p for a, p in self.axes[idx - 1:idx + 2]
if p == 'follow']
if have_follow or self.ndim == 1:
flag = '1'
axes_code_list.append("::" + flag)
if self.dtype.is_pyobject:
dtype_name = self.dtype.name
else:
dtype_name = self.dtype
return "%s[%s]" % (dtype_name, ", ".join(axes_code_list))
def specialize(self, values):
"""This does not validate the base type!!"""
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return MemoryViewSliceType(dtype, self.axes)
return self
def cast_code(self, expr_code):
return expr_code
# When memoryviews are increfed currently seems heavily special-cased.
# Therefore, use our own function for now
def generate_incref(self, code, name, **kwds):
pass
def generate_incref_memoryviewslice(self, code, slice_cname, have_gil):
# TODO ideally would be done separately
code.putln("__PYX_INC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil)))
# decref however did look to always apply for memoryview slices
# with "have_gil" set to True by default
def generate_xdecref(self, code, cname, nanny, have_gil):
code.putln("__PYX_XCLEAR_MEMVIEW(&%s, %d);" % (cname, int(have_gil)))
def generate_decref(self, code, cname, nanny, have_gil):
# Fall back to xdecref since we don't care to have a separate decref version for this.
self.generate_xdecref(code, cname, nanny, have_gil)
def generate_xdecref_clear(self, code, cname, clear_before_decref, **kwds):
self.generate_xdecref(code, cname, **kwds)
code.putln("%s.memview = NULL; %s.data = NULL;" % (cname, cname))
def generate_decref_clear(self, code, cname, **kwds):
# memoryviews don't currently distinguish between xdecref and decref
self.generate_xdecref_clear(code, cname, **kwds)
# memoryviews don't participate in giveref/gotref
generate_gotref = generate_xgotref = generate_xgiveref = generate_giveref = lambda *args: None
|
MemoryViewSliceType
|
python
|
tornadoweb__tornado
|
tornado/test/simple_httpclient_test.py
|
{
"start": 31308,
"end": 32252
}
|
class ____(AsyncHTTPTestCase):
def get_app(self):
class ChunkedWithContentLength(RequestHandler):
def get(self):
# Add an invalid Transfer-Encoding to the response
self.set_header("Transfer-Encoding", "chunked")
self.write("Hello world")
return Application([("/chunkwithcl", ChunkedWithContentLength)])
def get_http_client(self):
return SimpleAsyncHTTPClient()
def test_chunked_with_content_length(self):
# Make sure the invalid headers are detected
with ExpectLog(
gen_log,
(
"Malformed HTTP message from None: Message "
"with both Transfer-Encoding and Content-Length"
),
level=logging.INFO,
):
with self.assertRaises(HTTPStreamClosedError):
self.fetch("/chunkwithcl", raise_error=True)
|
ChunkedWithContentLengthTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/distribute-elements-into-two-arrays-ii.py
|
{
"start": 89,
"end": 711
}
|
class ____(object):
def resultArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
sl1, sl2 = SortedList([nums[0]]), SortedList([nums[1]])
a, b = [nums[0]], [nums[1]]
for i in xrange(2, len(nums)):
cnt1 = len(sl1)-sl1.bisect_right(nums[i])
cnt2 = len(sl2)-sl2.bisect_right(nums[i])
if cnt1 > cnt2 or (cnt1 == cnt2 and len(a) <= len(b)):
sl1.add(nums[i])
a.append(nums[i])
else:
sl2.add(nums[i])
b.append(nums[i])
return a+b
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/owlv2/modeling_owlv2.py
|
{
"start": 10107,
"end": 13001
}
|
class ____(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
Classification logits (including no-object) for all queries.
image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes
image embeddings for each patch.
query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes
image embeddings for each patch.
target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual target image in the batch
(disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to
retrieve the unnormalized bounding boxes.
query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual query image in the batch
(disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to
retrieve the unnormalized bounding boxes.
class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total
number of patches is (image_size / patch_size)**2.
text_model_output (tuple[`BaseModelOutputWithPooling`]):
The output of the [`Owlv2TextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`Owlv2VisionModel`].
"""
logits: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
query_image_embeds: Optional[torch.FloatTensor] = None
target_pred_boxes: Optional[torch.FloatTensor] = None
query_pred_boxes: Optional[torch.FloatTensor] = None
class_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
# Copied from transformers.models.owlvit.modeling_owlvit.OwlViTVisionEmbeddings with OwlViT->Owlv2
|
Owlv2ImageGuidedObjectDetectionOutput
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_math.py
|
{
"start": 3365,
"end": 27615
}
|
class ____(CUDATestCase):
def unary_template_float16(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float16, np.float16, start, stop)
def unary_template_float32(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float32, np.float32, start, stop)
def unary_template_float64(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float64, np.float64, start, stop)
def unary_template_int64(self, func, npfunc, start=0, stop=50):
self.unary_template(func, npfunc, np.int64, np.float64, start, stop)
def unary_template_uint64(self, func, npfunc, start=0, stop=50):
self.unary_template(func, npfunc, np.uint64, np.float64, start, stop)
def unary_template(self, func, npfunc, npdtype, nprestype, start, stop):
nelem = 50
A = np.linspace(start, stop, nelem).astype(npdtype)
B = np.empty_like(A).astype(nprestype)
arytype = numpy_support.from_dtype(npdtype)[::1]
restype = numpy_support.from_dtype(nprestype)[::1]
cfunc = cuda.jit((arytype, restype))(func)
cfunc[1, nelem](A, B)
# When this test was originally written it used
# assertTrue(np.allclose(...), which has different default tolerance
# values to assert_allclose. The tolerance values here are chosen as
# the tightest under which the tests will pass.
if npdtype == np.float64:
rtol = 1e-13
elif npdtype == np.float32:
rtol = 1e-6
else:
rtol = 1e-3
np.testing.assert_allclose(npfunc(A), B, rtol=rtol)
def unary_bool_special_values(self, func, npfunc, npdtype, npmtype):
fi = np.finfo(npdtype)
denorm = fi.tiny / 4
A = np.array([0., denorm, fi.tiny, 0.5, 1., fi.max, np.inf, np.nan],
dtype=npdtype)
B = np.empty_like(A, dtype=np.int32)
cfunc = cuda.jit((npmtype[::1], int32[::1]))(func)
cfunc[1, A.size](A, B)
np.testing.assert_array_equal(B, npfunc(A))
cfunc[1, A.size](-A, B)
np.testing.assert_array_equal(B, npfunc(-A))
def unary_bool_special_values_float32(self, func, npfunc):
self.unary_bool_special_values(func, npfunc, np.float32, float32)
def unary_bool_special_values_float64(self, func, npfunc):
self.unary_bool_special_values(func, npfunc, np.float64, float64)
def unary_bool_template_float32(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float32, np.float32, start, stop)
def unary_bool_template_float64(self, func, npfunc, start=0, stop=1):
self.unary_template(func, npfunc, np.float64, np.float64, start, stop)
def unary_bool_template_int32(self, func, npfunc, start=0, stop=49):
self.unary_template(func, npfunc, np.int32, np.int32, start, stop)
def unary_bool_template_int64(self, func, npfunc, start=0, stop=49):
self.unary_template(func, npfunc, np.int64, np.int64, start, stop)
def unary_bool_template(self, func, npfunc, npdtype, npmtype, start, stop):
nelem = 50
A = np.linspace(start, stop, nelem).astype(npdtype)
B = np.empty(A.shape, dtype=np.int32)
iarytype = npmtype[::1]
oarytype = int32[::1]
cfunc = cuda.jit((iarytype, oarytype))(func)
cfunc[1, nelem](A, B)
np.testing.assert_allclose(npfunc(A), B)
def binary_template_float32(self, func, npfunc, start=0, stop=1):
self.binary_template(func, npfunc, np.float32, np.float32, start, stop)
def binary_template_float64(self, func, npfunc, start=0, stop=1):
self.binary_template(func, npfunc, np.float64, np.float64, start, stop)
def binary_template_int64(self, func, npfunc, start=0, stop=50):
self.binary_template(func, npfunc, np.int64, np.float64, start, stop)
def binary_template_uint64(self, func, npfunc, start=0, stop=50):
self.binary_template(func, npfunc, np.uint64, np.float64, start, stop)
def binary_template(self, func, npfunc, npdtype, nprestype, start, stop):
nelem = 50
A = np.linspace(start, stop, nelem).astype(npdtype)
B = np.empty_like(A).astype(nprestype)
arytype = numpy_support.from_dtype(npdtype)[::1]
restype = numpy_support.from_dtype(nprestype)[::1]
cfunc = cuda.jit((arytype, arytype, restype))(func)
cfunc[1, nelem](A, A, B)
np.testing.assert_allclose(npfunc(A, A), B)
#---------------------------------------------------------------------------
# test_math_acos
def test_math_acos(self):
self.unary_template_float32(math_acos, np.arccos)
self.unary_template_float64(math_acos, np.arccos)
# For integers we can only test with zero, since <=-1 and >=1 result in
# invalid values.
self.unary_template_int64(math_acos, np.arccos, start=0, stop=0)
self.unary_template_uint64(math_acos, np.arccos, start=0, stop=0)
#---------------------------------------------------------------------------
# test_math_asin
def test_math_asin(self):
self.unary_template_float32(math_asin, np.arcsin)
self.unary_template_float64(math_asin, np.arcsin)
# For integers we can only test with zero, since <=-1 and >=1 result in
# invalid values.
self.unary_template_int64(math_asin, np.arcsin, start=0, stop=0)
self.unary_template_uint64(math_asin, np.arcsin, start=0, stop=0)
#---------------------------------------------------------------------------
# test_math_atan
def test_math_atan(self):
self.unary_template_float32(math_atan, np.arctan)
self.unary_template_float64(math_atan, np.arctan)
self.unary_template_int64(math_atan, np.arctan)
self.unary_template_uint64(math_atan, np.arctan)
#---------------------------------------------------------------------------
# test_math_acosh
def test_math_acosh(self):
self.unary_template_float32(math_acosh, np.arccosh, start=1, stop=2)
self.unary_template_float64(math_acosh, np.arccosh, start=1, stop=2)
self.unary_template_int64(math_acosh, np.arccosh, start=1, stop=2)
self.unary_template_uint64(math_acosh, np.arccosh, start=1, stop=2)
#---------------------------------------------------------------------------
# test_math_asinh
def test_math_asinh(self):
self.unary_template_float32(math_asinh, np.arcsinh)
self.unary_template_float64(math_asinh, np.arcsinh)
self.unary_template_int64(math_asinh, np.arcsinh)
self.unary_template_uint64(math_asinh, np.arcsinh)
#---------------------------------------------------------------------------
# test_math_atanh
def test_math_atanh(self):
self.unary_template_float32(math_atanh, np.arctanh, start=0, stop=.9)
self.unary_template_float64(math_atanh, np.arctanh, start=0, stop=.9)
self.unary_template_int64(math_atanh, np.arctanh, start=0, stop=.9)
self.unary_template_uint64(math_atanh, np.arctanh, start=0, stop=.9)
#---------------------------------------------------------------------------
# test_math_cos
def test_math_cos(self):
self.unary_template_float32(math_cos, np.cos)
self.unary_template_float64(math_cos, np.cos)
self.unary_template_int64(math_cos, np.cos)
self.unary_template_uint64(math_cos, np.cos)
@skip_unless_cc_53
def test_math_fp16(self):
self.unary_template_float16(math_sin, np.sin)
self.unary_template_float16(math_cos, np.cos)
self.unary_template_float16(math_exp, np.exp)
self.unary_template_float16(math_log, np.log, start=1)
self.unary_template_float16(math_log2, np.log2, start=1)
self.unary_template_float16(math_log10, np.log10, start=1)
self.unary_template_float16(math_fabs, np.fabs, start=-1)
self.unary_template_float16(math_sqrt, np.sqrt)
self.unary_template_float16(math_ceil, np.ceil)
self.unary_template_float16(math_floor, np.floor)
@skip_on_cudasim("numpy does not support trunc for float16")
@skip_unless_cc_53
def test_math_fp16_trunc(self):
self.unary_template_float16(math_trunc, np.trunc)
#---------------------------------------------------------------------------
# test_math_sin
def test_math_sin(self):
self.unary_template_float32(math_sin, np.sin)
self.unary_template_float64(math_sin, np.sin)
self.unary_template_int64(math_sin, np.sin)
self.unary_template_uint64(math_sin, np.sin)
#---------------------------------------------------------------------------
# test_math_tan
def test_math_tan(self):
self.unary_template_float32(math_tan, np.tan)
self.unary_template_float64(math_tan, np.tan)
self.unary_template_int64(math_tan, np.tan)
self.unary_template_uint64(math_tan, np.tan)
#---------------------------------------------------------------------------
# test_math_cosh
def test_math_cosh(self):
self.unary_template_float32(math_cosh, np.cosh)
self.unary_template_float64(math_cosh, np.cosh)
self.unary_template_int64(math_cosh, np.cosh)
self.unary_template_uint64(math_cosh, np.cosh)
#---------------------------------------------------------------------------
# test_math_sinh
def test_math_sinh(self):
self.unary_template_float32(math_sinh, np.sinh)
self.unary_template_float64(math_sinh, np.sinh)
self.unary_template_int64(math_sinh, np.sinh)
self.unary_template_uint64(math_sinh, np.sinh)
#---------------------------------------------------------------------------
# test_math_tanh
def test_math_tanh(self):
self.unary_template_float32(math_tanh, np.tanh)
self.unary_template_float64(math_tanh, np.tanh)
self.unary_template_int64(math_tanh, np.tanh)
self.unary_template_uint64(math_tanh, np.tanh)
#---------------------------------------------------------------------------
# test_math_atan2
def test_math_atan2(self):
self.binary_template_float32(math_atan2, np.arctan2)
self.binary_template_float64(math_atan2, np.arctan2)
self.binary_template_int64(math_atan2, np.arctan2)
self.binary_template_uint64(math_atan2, np.arctan2)
#---------------------------------------------------------------------------
# test_math_erf
def test_math_erf(self):
@vectorize
def ufunc(x):
return math.erf(x)
self.unary_template_float32(math_erf, ufunc)
self.unary_template_float64(math_erf, ufunc)
self.unary_template_int64(math_erf, ufunc)
self.unary_template_uint64(math_erf, ufunc)
#---------------------------------------------------------------------------
# test_math_erfc
def test_math_erfc(self):
@vectorize
def ufunc(x):
return math.erfc(x)
self.unary_template_float32(math_erfc, ufunc)
self.unary_template_float64(math_erfc, ufunc)
self.unary_template_int64(math_erfc, ufunc)
self.unary_template_uint64(math_erfc, ufunc)
#---------------------------------------------------------------------------
# test_math_exp
def test_math_exp(self):
self.unary_template_float32(math_exp, np.exp)
self.unary_template_float64(math_exp, np.exp)
self.unary_template_int64(math_exp, np.exp)
self.unary_template_uint64(math_exp, np.exp)
#---------------------------------------------------------------------------
# test_math_expm1
def test_math_expm1(self):
self.unary_template_float32(math_expm1, np.expm1)
self.unary_template_float64(math_expm1, np.expm1)
self.unary_template_int64(math_expm1, np.expm1)
self.unary_template_uint64(math_expm1, np.expm1)
#---------------------------------------------------------------------------
# test_math_fabs
def test_math_fabs(self):
self.unary_template_float32(math_fabs, np.fabs, start=-1)
self.unary_template_float64(math_fabs, np.fabs, start=-1)
self.unary_template_int64(math_fabs, np.fabs, start=-1)
self.unary_template_uint64(math_fabs, np.fabs, start=-1)
#---------------------------------------------------------------------------
# test_math_gamma
def test_math_gamma(self):
@vectorize
def ufunc(x):
return math.gamma(x)
self.unary_template_float32(math_gamma, ufunc, start=0.1)
self.unary_template_float64(math_gamma, ufunc, start=0.1)
self.unary_template_int64(math_gamma, ufunc, start=1)
self.unary_template_uint64(math_gamma, ufunc, start=1)
#---------------------------------------------------------------------------
# test_math_lgamma
def test_math_lgamma(self):
@vectorize
def ufunc(x):
return math.lgamma(x)
self.unary_template_float32(math_lgamma, ufunc, start=0.1)
self.unary_template_float64(math_lgamma, ufunc, start=0.1)
self.unary_template_int64(math_lgamma, ufunc, start=1)
self.unary_template_uint64(math_lgamma, ufunc, start=1)
#---------------------------------------------------------------------------
# test_math_log
def test_math_log(self):
self.unary_template_float32(math_log, np.log, start=1)
self.unary_template_float64(math_log, np.log, start=1)
self.unary_template_int64(math_log, np.log, start=1)
self.unary_template_uint64(math_log, np.log, start=1)
#---------------------------------------------------------------------------
# test_math_log2
def test_math_log2(self):
self.unary_template_float32(math_log2, np.log2, start=1)
self.unary_template_float64(math_log2, np.log2, start=1)
self.unary_template_int64(math_log2, np.log2, start=1)
self.unary_template_uint64(math_log2, np.log2, start=1)
#---------------------------------------------------------------------------
# test_math_log10
def test_math_log10(self):
self.unary_template_float32(math_log10, np.log10, start=1)
self.unary_template_float64(math_log10, np.log10, start=1)
self.unary_template_int64(math_log10, np.log10, start=1)
self.unary_template_uint64(math_log10, np.log10, start=1)
#---------------------------------------------------------------------------
# test_math_log1p
def test_math_log1p(self):
self.unary_template_float32(math_log1p, np.log1p)
self.unary_template_float64(math_log1p, np.log1p)
self.unary_template_int64(math_log1p, np.log1p)
self.unary_template_uint64(math_log1p, np.log1p)
#---------------------------------------------------------------------------
# test_math_remainder
def test_math_remainder(self):
self.binary_template_float32(math_remainder, np.remainder, start=1e-11)
self.binary_template_float64(math_remainder, np.remainder, start=1e-11)
self.binary_template_int64(math_remainder, np.remainder, start=1)
self.binary_template_uint64(math_remainder, np.remainder, start=1)
@skip_on_cudasim('math.remainder(0, 0) raises a ValueError on CUDASim')
def test_math_remainder_0_0(self):
@cuda.jit(void(float64[::1], int64, int64))
def test_0_0(r, x, y):
r[0] = math.remainder(x, y)
r = np.zeros(1, np.float64)
test_0_0[1, 1](r, 0, 0)
self.assertTrue(np.isnan(r[0]))
#---------------------------------------------------------------------------
# test_math_sqrt
def test_math_sqrt(self):
self.unary_template_float32(math_sqrt, np.sqrt)
self.unary_template_float64(math_sqrt, np.sqrt)
self.unary_template_int64(math_sqrt, np.sqrt)
self.unary_template_uint64(math_sqrt, np.sqrt)
#---------------------------------------------------------------------------
# test_math_hypot
def test_math_hypot(self):
self.binary_template_float32(math_hypot, np.hypot)
self.binary_template_float64(math_hypot, np.hypot)
self.binary_template_int64(math_hypot, np.hypot)
self.binary_template_uint64(math_hypot, np.hypot)
#---------------------------------------------------------------------------
# test_math_pow
def pow_template_int32(self, npdtype):
nelem = 50
A = np.linspace(0, 25, nelem).astype(npdtype)
B = np.arange(nelem, dtype=np.int32)
C = np.empty_like(A)
arytype = numpy_support.from_dtype(npdtype)[::1]
cfunc = cuda.jit((arytype, int32[::1], arytype))(math_pow)
cfunc[1, nelem](A, B, C)
# NumPy casting rules result in a float64 output always, which doesn't
# match the overflow to inf of math.pow and libdevice.powi for large
# values of float32, so we compute the reference result with math.pow.
Cref = np.empty_like(A)
for i in range(len(A)):
Cref[i] = math.pow(A[i], B[i])
np.testing.assert_allclose(np.power(A, B).astype(npdtype), C, rtol=1e-6)
def test_math_pow(self):
self.binary_template_float32(math_pow, np.power)
self.binary_template_float64(math_pow, np.power)
self.pow_template_int32(np.float32)
self.pow_template_int32(np.float64)
#---------------------------------------------------------------------------
# test_math_pow_binop
def test_math_pow_binop(self):
self.binary_template_float32(math_pow_binop, np.power)
self.binary_template_float64(math_pow_binop, np.power)
#---------------------------------------------------------------------------
# test_math_ceil
def test_math_ceil(self):
self.unary_template_float32(math_ceil, np.ceil)
self.unary_template_float64(math_ceil, np.ceil)
self.unary_template_int64(math_ceil, np.ceil)
self.unary_template_uint64(math_ceil, np.ceil)
#---------------------------------------------------------------------------
# test_math_floor
def test_math_floor(self):
self.unary_template_float32(math_floor, np.floor)
self.unary_template_float64(math_floor, np.floor)
self.unary_template_int64(math_floor, np.floor)
self.unary_template_uint64(math_floor, np.floor)
#---------------------------------------------------------------------------
# test_math_trunc
#
# Note that math.trunc() is only supported on NumPy float64s, and not
# other float types or int types. See NumPy Issue #13375:
#
# - https://github.com/numpy/numpy/issues/13375 - "Add methods from the
# builtin float types to the numpy floating point types"
def test_math_trunc(self):
self.unary_template_float64(math_trunc, np.trunc)
@skip_on_cudasim('trunc only supported on NumPy float64')
def test_math_trunc_non_float64(self):
self.unary_template_float32(math_trunc, np.trunc)
self.unary_template_int64(math_trunc, np.trunc)
self.unary_template_uint64(math_trunc, np.trunc)
#---------------------------------------------------------------------------
# test_math_copysign
def test_math_copysign(self):
self.binary_template_float32(math_copysign, np.copysign, start=-1)
self.binary_template_float64(math_copysign, np.copysign, start=-1)
#---------------------------------------------------------------------------
# test_math_modf
def test_math_modf(self):
def modf_template_nan(dtype, arytype):
A = np.array([np.nan], dtype=dtype)
B = np.zeros_like(A)
C = np.zeros_like(A)
cfunc = cuda.jit((arytype, arytype, arytype))(math_modf)
cfunc[1, len(A)](A, B, C)
self.assertTrue(np.isnan(B))
self.assertTrue(np.isnan(C))
def modf_template_compare(A, dtype, arytype):
A = A.astype(dtype)
B = np.zeros_like(A)
C = np.zeros_like(A)
cfunc = cuda.jit((arytype, arytype, arytype))(math_modf)
cfunc[1, len(A)](A, B, C)
D, E = np.modf(A)
self.assertTrue(np.array_equal(B,D))
self.assertTrue(np.array_equal(C,E))
nelem = 50
#32 bit float
with self.subTest("float32 modf on simple float"):
modf_template_compare(np.linspace(0, 10, nelem), dtype=np.float32,
arytype=float32[:])
with self.subTest("float32 modf on +- infinity"):
modf_template_compare(np.array([np.inf, -np.inf]), dtype=np.float32,
arytype=float32[:])
with self.subTest("float32 modf on nan"):
modf_template_nan(dtype=np.float32, arytype=float32[:])
#64 bit float
with self.subTest("float64 modf on simple float"):
modf_template_compare(np.linspace(0, 10, nelem), dtype=np.float64,
arytype=float64[:])
with self.subTest("float64 modf on +- infinity"):
modf_template_compare(np.array([np.inf, -np.inf]), dtype=np.float64,
arytype=float64[:])
with self.subTest("float64 modf on nan"):
modf_template_nan(dtype=np.float64, arytype=float64[:])
#---------------------------------------------------------------------------
# test_math_fmod
def test_math_fmod(self):
self.binary_template_float32(math_fmod, np.fmod, start=1)
self.binary_template_float64(math_fmod, np.fmod, start=1)
#---------------------------------------------------------------------------
# test_math_mod_binop
def test_math_mod_binop(self):
self.binary_template_float32(math_mod_binop, np.fmod, start=1)
self.binary_template_float64(math_mod_binop, np.fmod, start=1)
#---------------------------------------------------------------------------
# test_math_isnan
def test_math_isnan(self):
self.unary_bool_template_float32(math_isnan, np.isnan)
self.unary_bool_template_float64(math_isnan, np.isnan)
self.unary_bool_template_int32(math_isnan, np.isnan)
self.unary_bool_template_int64(math_isnan, np.isnan)
self.unary_bool_special_values_float32(math_isnan, np.isnan)
self.unary_bool_special_values_float64(math_isnan, np.isnan)
#---------------------------------------------------------------------------
# test_math_isinf
def test_math_isinf(self):
self.unary_bool_template_float32(math_isinf, np.isinf)
self.unary_bool_template_float64(math_isinf, np.isinf)
self.unary_bool_template_int32(math_isinf, np.isinf)
self.unary_bool_template_int64(math_isinf, np.isinf)
self.unary_bool_special_values_float32(math_isinf, np.isinf)
self.unary_bool_special_values_float64(math_isinf, np.isinf)
#---------------------------------------------------------------------------
# test_math_isfinite
def test_math_isfinite(self):
self.unary_bool_template_float32(math_isfinite, np.isfinite)
self.unary_bool_template_float64(math_isfinite, np.isfinite)
self.unary_bool_template_int32(math_isfinite, np.isfinite)
self.unary_bool_template_int64(math_isfinite, np.isfinite)
self.unary_bool_special_values_float32(math_isfinite, np.isfinite)
self.unary_bool_special_values_float64(math_isfinite, np.isfinite)
#---------------------------------------------------------------------------
# test_math_degrees
def test_math_degrees(self):
self.unary_bool_template_float32(math_degrees, np.degrees)
self.unary_bool_template_float64(math_degrees, np.degrees)
#---------------------------------------------------------------------------
# test_math_radians
def test_math_radians(self):
self.unary_bool_template_float32(math_radians, np.radians)
self.unary_bool_template_float64(math_radians, np.radians)
if __name__ == '__main__':
unittest.main()
|
TestCudaMath
|
python
|
pytorch__pytorch
|
test/test_proxy_tensor.py
|
{
"start": 29785,
"end": 29871
}
|
class ____(TestGenericProxyTensor):
tracing_mode = "fake"
|
TestGenericProxyTensorFake
|
python
|
huggingface__transformers
|
src/transformers/models/qwen2_moe/modular_qwen2_moe.py
|
{
"start": 2123,
"end": 2735
}
|
class ____(GemmaMLP):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
|
Qwen2MoeMLP
|
python
|
huggingface__transformers
|
src/transformers/models/falcon_mamba/modular_falcon_mamba.py
|
{
"start": 25556,
"end": 25779
}
|
class ____(MambaRMSNorm):
def forward(self, hidden_states):
return self.weight.to(hidden_states.device) * rms_forward(
hidden_states, variance_epsilon=self.variance_epsilon
)
|
FalconMambaRMSNorm
|
python
|
doocs__leetcode
|
solution/0400-0499/0405.Convert a Number to Hexadecimal/Solution.py
|
{
"start": 0,
"end": 311
}
|
class ____:
def toHex(self, num: int) -> str:
if num == 0:
return '0'
chars = '0123456789abcdef'
s = []
for i in range(7, -1, -1):
x = (num >> (4 * i)) & 0xF
if s or x != 0:
s.append(chars[x])
return ''.join(s)
|
Solution
|
python
|
dagster-io__dagster
|
examples/docs_projects/project_ask_ai_dagster/src/project_ask_ai_dagster/defs/io_managers.py
|
{
"start": 111,
"end": 1322
}
|
class ____(dg.IOManager):
def __init__(self, base_dir):
self.base_dir = base_dir
os.makedirs(base_dir, exist_ok=True)
def handle_output(self, context, obj):
# Convert documents to simple dicts
file_path = os.path.join(self.base_dir, f"{context.asset_key.path[-1]}.json")
# Convert documents to simple dicts
serialized_docs = [
{"page_content": doc.page_content, "metadata": doc.metadata} for doc in obj
]
# Save as JSON
with open(file_path, "w") as f:
json.dump(serialized_docs, f)
def load_input(self, context):
file_path = os.path.join(self.base_dir, f"{context.asset_key.path[-1]}.json")
if not os.path.exists(file_path):
return []
# Load and reconstruct Documents
with open(file_path) as f:
data = json.load(f)
return [
Document(page_content=doc["page_content"], metadata=doc["metadata"]) for doc in data
]
# end_io_manager
@dg.io_manager(config_schema={"base_dir": str})
def document_io_manager(init_context):
return DocumentIOManager(base_dir=init_context.resource_config["base_dir"])
|
DocumentIOManager
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 137092,
"end": 137499
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(SecurityAdvisoryOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
|
SecurityAdvisoryOrder
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/mro3.py
|
{
"start": 285,
"end": 357
}
|
class ____(DerivableObject, SubclassableObject):
pass
|
InheritingObject
|
python
|
ray-project__ray
|
doc/source/train/doc_code/metric_logging.py
|
{
"start": 3148,
"end": 3705
}
|
class ____(ray.train.UserCallback):
def after_report(
self,
run_context,
metrics: List[Dict[str, Any]],
checkpoint: Optional[ray.train.Checkpoint],
):
rank_0_metrics = metrics[0]
print(rank_0_metrics)
# Ex: Write metrics to a file...
trainer = ray.train.torch.TorchTrainer(
train_fn_per_worker,
scaling_config=ray.train.ScalingConfig(num_workers=2),
run_config=ray.train.RunConfig(callbacks=[CustomMetricsCallback()]),
)
trainer.fit()
# __report_callback_end__
|
CustomMetricsCallback
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/torch_function.py
|
{
"start": 8367,
"end": 11615
}
|
class ____:
def __init__(self, py_stack: Iterable[Any]) -> None:
# This is annoyingly complicated because of how the torch function subclass + mode C API was designed
# There are two exposed C knobs here as contexts: torch._C.DisableTorchFunction and torch._C.DisableTorchFunctionSubclass
# These are their definitions:
# 1) torch._C._is_torch_function_enabled indicates that neither of the above knobs have been entered
# (if either are entered, this will be False)
# 2) torch._C._is_torch_function_mode_enabled indicates that either the torch mode stack is empty OR
# torch._C.DisableTorchFunction has been entered
# To disambiguate these and keep myself sane I added a C API to check whether all torch function
# concepts (modes and subclasses) are enabled.
# This only returns true iff we have not entered torch._C.DisableTorchFunction and allows us to separate
# the stack length from the enablement state of torch function modes.
# This is important because now if a mode is pushed while dynamo is tracing, we know whether
# or not torch function modes are enabled and whether we should trace it.
self.torch_function_subclass_enabled = torch._C._is_torch_function_enabled()
# This differs from the C API of the same name
# this will only be false iff we have entered torch._C.DisableTorchFunction
# and does not take into account the mode stack length, while the C API bundles these
# two concepts
self.torch_function_mode_enabled = (
not torch._C._is_torch_function_all_disabled()
)
self.cur_mode = None
TorchFunctionModeStackVariable.reset()
self.mode_stack: collections.deque[TorchFunctionModeVariable] = (
collections.deque()
)
for i, val in enumerate(py_stack):
self.mode_stack.append(
LazyVariableTracker.create(val, source=TorchFunctionModeStackSource(i)) # type: ignore[arg-type]
)
def in_torch_function_mode(self) -> bool:
return len(self.mode_stack) > 0
def pop_torch_function_mode(self) -> TorchFunctionModeVariable:
return self.mode_stack.pop()
def push_torch_function_mode(self, mode_var: TorchFunctionModeVariable) -> None:
self.mode_stack.append(mode_var)
def call_torch_function_mode(
self,
tx: "InstructionTranslator",
fn: VariableTracker,
types: TupleVariable,
args: Iterable[Any],
kwargs: dict[str, Any],
) -> Any:
with self._pop_mode_for_inlining() as cur_mode:
return cur_mode.call_torch_function(tx, fn, types, args, kwargs)
@contextlib.contextmanager
def _pop_mode_for_inlining(
self,
) -> Generator[TorchFunctionModeVariable, None, None]:
old_mode = self.cur_mode
self.cur_mode = self.pop_torch_function_mode() # type: ignore[assignment]
try:
yield self.cur_mode # type: ignore[misc]
finally:
mode = self.cur_mode
self.cur_mode = old_mode
self.push_torch_function_mode(mode) # type: ignore[arg-type]
|
SymbolicTorchFunctionState
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_ratelimit_middleware.py
|
{
"start": 1104,
"end": 10603
}
|
class ____(TestCase, BaseTestCase):
middleware = RatelimitMiddleware(lambda request: sentinel.response)
@cached_property
def factory(self):
return RequestFactory()
class TestEndpoint(Endpoint):
enforce_rate_limit = True
def get(self):
raise NotImplementedError
class TestEndpointNoRateLimits(Endpoint):
enforce_rate_limit = False
def get(self):
raise NotImplementedError
_test_endpoint = TestEndpoint.as_view()
_test_endpoint_no_rate_limits = TestEndpointNoRateLimits.as_view()
def _populate_public_integration_request(self, request) -> None:
install = self.create_sentry_app_installation(organization=self.organization)
token = install.api_token
with assume_test_silo_mode_of(User):
request.user = User.objects.get(id=install.sentry_app.proxy_user_id)
request.auth = token
def _populate_internal_integration_request(self, request) -> None:
internal_integration = self.create_internal_integration(
name="my_app",
organization=self.organization,
scopes=("project:read",),
webhook_url="http://example.com",
)
token = self.create_internal_integration_token(
user=self.user,
internal_integration=internal_integration,
)
with assume_test_silo_mode_of(User):
request.user = User.objects.get(id=internal_integration.proxy_user_id)
request.auth = token
@patch("sentry.middleware.ratelimit.get_rate_limit_value", side_effect=Exception)
def test_fails_open(self, default_rate_limit_mock: MagicMock) -> None:
"""Test that if something goes wrong in the rate limit middleware,
the request still goes through"""
request = self.factory.get("/")
with freeze_time("2000-01-01"):
default_rate_limit_mock.return_value = RateLimit(limit=0, window=100)
self.middleware.process_view(request, self._test_endpoint, [], {})
def test_process_response_fails_open(self) -> None:
request = self.factory.get("/")
bad_response = sentinel.response
assert self.middleware.process_response(request, bad_response) is bad_response
class BadRequest(HttpRequest):
def __getattr__(self, attr):
raise Exception("nope")
bad_request = BadRequest()
assert self.middleware.process_response(bad_request, bad_response) is bad_response
@patch("sentry.middleware.ratelimit.get_rate_limit_value")
def test_positive_rate_limit_check(self, default_rate_limit_mock: MagicMock) -> None:
request = self.factory.get("/")
with freeze_time("2000-01-01"):
default_rate_limit_mock.return_value = RateLimit(limit=0, window=100)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.will_be_rate_limited
with freeze_time("2000-01-02"):
# 10th request in a 10 request window should get rate limited
default_rate_limit_mock.return_value = RateLimit(limit=10, window=100)
for _ in range(10):
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.will_be_rate_limited
@patch("sentry.middleware.ratelimit.get_rate_limit_value")
def test_positive_rate_limit_response_headers(self, default_rate_limit_mock: MagicMock) -> None:
request = self.factory.get("/")
with (
freeze_time("2000-01-01"),
patch.object(RatelimitMiddlewareTest.TestEndpoint, "enforce_rate_limit", True),
):
default_rate_limit_mock.return_value = RateLimit(limit=0, window=100)
response = self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.will_be_rate_limited
assert response
assert isinstance(response, HttpResponse)
assert orjson.loads(response.content) == {
"detail": "You are attempting to use this endpoint too frequently. Limit is 0 requests in 100 seconds"
}
assert response["Access-Control-Allow-Methods"] == "GET"
assert response["Access-Control-Allow-Origin"] == "*"
assert response["Access-Control-Allow-Headers"]
assert response["Access-Control-Expose-Headers"]
@patch("sentry.middleware.ratelimit.get_rate_limit_value")
@patch("sentry.ratelimits.utils.ratelimiter.is_limited_with_value")
@override_settings(ENFORCE_CONCURRENT_RATE_LIMITS=True)
def test_positive_concurrent_rate_limit_response_headers(
self, is_limited_with_value, default_rate_limit_mock
):
request = self.factory.get("/")
with (
freeze_time("2000-01-01"),
patch.object(RatelimitMiddlewareTest.TestEndpoint, "enforce_rate_limit", True),
patch("sentry.ratelimits.concurrent.rate_limit_info") as rate_limit_info,
):
rate_limit_info.return_value = (1, False, 0)
default_rate_limit_mock.return_value = RateLimit(
limit=0, window=100, concurrent_limit=1
)
is_limited_with_value.return_value = (False, 0, 0)
response = self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.will_be_rate_limited
assert response
assert isinstance(response, HttpResponse)
assert orjson.loads(response.content) == {
"detail": "You are attempting to go above the allowed concurrency for this endpoint. Concurrency limit is 1"
}
assert response["Access-Control-Allow-Methods"] == "GET"
assert response["Access-Control-Allow-Origin"] == "*"
assert response["Access-Control-Allow-Headers"]
assert response["Access-Control-Expose-Headers"]
@patch("sentry.middleware.ratelimit.get_rate_limit_value")
def test_negative_rate_limit_check(self, default_rate_limit_mock: MagicMock) -> None:
request = self.factory.get("/")
default_rate_limit_mock.return_value = RateLimit(limit=10, window=100)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
# Requests outside the current window should not be rate limited
default_rate_limit_mock.return_value = RateLimit(limit=1, window=1)
with freeze_time("2000-01-01") as frozen_time:
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
frozen_time.shift(1)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
@patch("sentry.middleware.ratelimit.get_rate_limit_value")
@override_settings(SENTRY_SELF_HOSTED=True)
def test_self_hosted_rate_limit_check(self, default_rate_limit_mock: MagicMock) -> None:
"""Check that for self hosted installs we don't rate limit"""
request = self.factory.get("/")
default_rate_limit_mock.return_value = RateLimit(limit=10, window=100)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
default_rate_limit_mock.return_value = RateLimit(limit=1, window=1)
with freeze_time("2000-01-01") as frozen_time:
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
frozen_time.shift(1)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert not request.will_be_rate_limited
def test_rate_limit_category(self) -> None:
request = self.factory.get("/")
request.META["REMOTE_ADDR"] = None
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.rate_limit_category is None
request = self.factory.get("/")
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.rate_limit_category == RateLimitCategory.IP
request.session = {}
request.user = self.user
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.rate_limit_category == RateLimitCategory.USER
self._populate_public_integration_request(request)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.rate_limit_category == RateLimitCategory.ORGANIZATION
self._populate_internal_integration_request(request)
self.middleware.process_view(request, self._test_endpoint, [], {})
assert request.rate_limit_category == RateLimitCategory.ORGANIZATION
def test_enforce_rate_limit_is_false(self) -> None:
request = self.factory.get("/")
self.middleware.process_view(request, self._test_endpoint_no_rate_limits, [], {})
assert request.will_be_rate_limited is False
assert request.rate_limit_category is None
assert hasattr(request, "rate_limit_key") is False
assert hasattr(request, "rate_limit_metadata") is False
@override_settings(SENTRY_SELF_HOSTED=False)
|
RatelimitMiddlewareTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py
|
{
"start": 10031,
"end": 35543
}
|
class ____(base_delegate.DelegatingTrackableMixin,
optimizer_v2.OptimizerV2):
"""An optimizer that applies loss scaling to prevent numeric underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
intermediate gradients to be scaled by the loss scale as well. The final
gradients are divided (or "unscaled") by the loss scale to bring them back to
their original value.
`LossScaleOptimizer` wraps another optimizer and applies loss scaling to it.
By default, the loss scale is dynamically updated over time so you do not have
to choose the loss scale. The `minimize` method automatically scales the loss,
unscales the gradients, and updates the loss scale so all you have to do is
wrap your optimizer with a `LossScaleOptimizer` if you use `minimize`. For
example:
>>> opt = tf.keras.optimizers.SGD(0.25)
>>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
>>> var = tf.Variable(1.)
>>> loss_fn = lambda: var ** 2
>>> # 'minimize' applies loss scaling and updates the loss sale.
>>> opt.minimize(loss_fn, var_list=var)
>>> var.numpy()
0.5
If a `tf.GradientTape` is used to compute gradients instead of `minimize`, you
must scale the loss and gradients manually. This can be done with the
`LossScaleOptimizer.get_scaled_loss` and
`LossScaleOptimizer.get_unscaled_gradients` methods. For example:
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
... scaled_loss = opt.get_scaled_loss(loss)
>>> scaled_grad = tape.gradient(scaled_loss, var)
>>> (grad,) = opt.get_unscaled_gradients([scaled_grad])
>>> opt.apply_gradients([(grad, var)]) # Loss scale is updated here
>>> var.numpy()
0.25
Warning: If you forget to call `get_scaled_loss` or `get_unscaled_gradients`
(or both) when using a `tf.GradientTape`, the model will likely converge to a
worse quality. Please make sure you call each function exactly once.
When mixed precision with float16 is used, there is typically no risk of
underflow affecting model quality if loss scaling is properly used. See
[the mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for more information
on how to use mixed precision.
Args:
inner_optimizer: The `tf.keras.optimizers.Optimizer` instance to wrap.
dynamic: Bool indicating whether dynamic loss scaling is used. Defaults to
True. If True, the loss scale will be dynamically updated over time using
an algorithm that keeps the loss scale at approximately its optimal value.
If False, a single fixed loss scale is used and `initial_scale` must be
specified, which is used as the loss scale. Recommended to keep as True,
as choosing a fixed loss scale can be tricky. Currently, there is a small
performance overhead to dynamic loss scaling compared to fixed loss
scaling.
initial_scale: The initial loss scale. If `dynamic` is True, this defaults
to `2 ** 15`. If `dynamic` is False, this must be specified and acts as
the sole loss scale, as the loss scale does not change over time. When
dynamic loss scaling is used, is better for this to be a very high number,
because a loss scale that is too high gets lowered far more quickly than a
loss scale that is too low gets raised.
dynamic_growth_steps: With dynamic loss scaling, every
`dynamic_growth_steps` steps with finite gradients, the loss scale is
doubled. Defaults to 2000. If a nonfinite gradient is encountered, the
count is reset back to zero, gradients are skipped that step, and the loss
scale is halved. The count can be queried with
`LossScaleOptimizer.dynamic_counter`. This argument can only be specified
if `dynamic` is True.
`LossScaleOptimizer` will occasionally skip applying gradients to the
variables, in which case the trainable variables will not change that step.
This is done because the dynamic loss scale will sometimes be raised too
high, causing overflow in the gradients. Typically, the first 2 to 15 steps of
the model are skipped as the initial loss scale is very high, but afterwards
steps will only be skipped on average 0.05% of the time (the fraction of steps
skipped is `1 / dynamic_growth_steps`).
`LossScaleOptimizer` delegates all public `Optimizer` methods to the inner
optimizer. Additionally, in methods `minimize` and `get_gradients`, it scales
the loss and unscales the gradients. In methods `minimize` and
`apply_gradients`, it additionally updates the loss scale and skips applying
gradients if any gradient has a nonfinite value.
### Hyperparameters
Hyperparameters can be accessed and set on the LossScaleOptimizer, which will
be delegated to the wrapped optimizer.
>>> opt = tf.keras.optimizers.Adam(beta_1=0.8, epsilon=1e-5)
>>> opt = tf.keras.mixed_precision.LossScaleOptimizer(opt)
>>> opt.beta_1 # Equivalent to `opt.inner_optimizer.beta_1`
0.8
>>> opt.beta_1 = 0.7 # Equivalent to `opt.inner_optimizer.beta_1 = 0.7`
>>> opt.beta_1
0.7
>>> opt.inner_optimizer.beta_1
0.7
However, accessing or setting non-hyperparameters is not delegated to the
LossScaleOptimizer. In an Adam optimizer, `beta_1` is a hyperparameter but
`epsilon` is not, as the Adam optimizer only calls `Optimizer._set_hyper` on
`beta_1`.
>>> opt.inner_optimizer.epsilon
1e-5
>>> opt.epsilon
Traceback (most recent call last):
...
AttributeError: 'LossScaleOptimizer' object has no attribute 'epsilon'
>>> opt.epsilon = 1e-4 # This does NOT set epsilon on `opt.inner_optimizer`
>>> opt.inner_optimizer.epsilon
>>> 1e-5
In the above example, despite epsilon being set on the LossScaleOptimizer, the
old epsilon value will still be used when training as epsilon was not set on
the inner optimizer.
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self, inner_optimizer, dynamic=True, initial_scale=None,
dynamic_growth_steps=None):
if not isinstance(inner_optimizer, optimizer_v2.OptimizerV2):
raise TypeError('"inner_optimizer" must be an instance of OptimizerV2, '
'but got: %s' % inner_optimizer)
if not isinstance(dynamic, bool):
# Catch errors if a user incorrectly passes a string or float to the
# second argument argument, as this is commonly done for
# LossScaleOptimizerV1.
raise TypeError('"dynamic" argument to LossScaleOptimizer.__init__ must '
'be a bool, but got: %r' % (dynamic,))
if isinstance(inner_optimizer, LossScaleOptimizer):
raise TypeError('LossScaleOptimizer cannot wrap another '
'LossScaleOptimizer, but got: %s' % (inner_optimizer,))
self._raise_if_strategy_unsupported()
if getattr(inner_optimizer, '_is_wrapped_by_loss_scale_optimizer', False):
# TODO(reedwm): Maybe support this. The difficulty is that LSO has the
# same checkpoint format as the inner optimizer, so multiple LSOs wrapping
# the same optimizer causes the checkpointing logic to become confused.
raise ValueError('"inner_optimizer" is already wrapped by a '
'LossScaleOptimizer. An optimizer can only be wrapped '
'by a single LossScaleOptimizer')
self._optimizer = inner_optimizer
self._optimizer._is_wrapped_by_loss_scale_optimizer = True
# We don't call super().__init__, since we do not want to call OptimizerV2's
# constructor.
base_delegate.DelegatingTrackableMixin.__init__(self, self._optimizer)
if dynamic:
if initial_scale is None:
initial_scale = _DEFAULT_INITIAL_SCALE
if dynamic_growth_steps is None:
dynamic_growth_steps = _DEFAULT_GROWTH_STEPS
self._loss_scale = _DynamicLossScaleState(
initial_scale, dynamic_growth_steps, multiplier=2)
self._track_trackable(self._loss_scale, 'loss_scale')
else:
if initial_scale is None:
raise ValueError('"initial_scale" must be specified if "dynamic" is '
'False')
self._loss_scale = float(initial_scale)
if dynamic_growth_steps is not None:
raise ValueError('"dynamic_growth_steps" must be None if "dynamic" '
'is False, but got: %s' % (dynamic_growth_steps,))
# To support restoring TensorFlow 2.2 checkpoints.
self._track_trackable(FakeOptimizerForRestoration(self._optimizer),
'base_optimizer')
@property
def dynamic(self):
"""Bool indicating whether dynamic loss scaling is used."""
return isinstance(self._loss_scale, _DynamicLossScaleState)
@property
def loss_scale(self):
"""The current loss scale as a float32 scalar tensor."""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return tensor_conversion.convert_to_tensor_v2_with_dispatch(
self._loss_scale.current_loss_scale
)
else:
return tensor_conversion.convert_to_tensor_v2_with_dispatch(
self._loss_scale
)
@property
def dynamic_counter(self):
"""The number of steps since the loss scale was last increased or decreased.
This is None if `LossScaleOptimizer.dynamic` is False.
The counter is incremented every step. Once it reaches
`LossScaleOptimizer.dynamic_growth_steps`, the loss scale will be doubled
and the counter will be reset back to zero. If nonfinite gradients are
encountered, the loss scale will be halved and the counter will be reset
back to zero.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.counter
else:
return None
@property
def initial_scale(self):
"""The initial loss scale.
If `LossScaleOptimizer.dynamic` is False, this is the same number as
`LossScaleOptimizer.loss_scale`, as the loss scale never changes.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.initial_loss_scale
else:
return self._loss_scale
@property
def dynamic_growth_steps(self):
"""The number of steps it takes to increase the loss scale.
This is None if `LossScaleOptimizer.dynamic` is False.
Every `dynamic_growth_steps` consecutive steps with finite gradients, the
loss scale is increased.
"""
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.growth_steps
else:
return None
@property
def inner_optimizer(self):
"""The optimizer that this LossScaleOptimizer is wrapping."""
return self._optimizer
def get_scaled_loss(self, loss):
"""Scales the loss by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to scale the loss before
passing the loss to `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_unscaled_gradients` should also be called.
See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for
an example.
Args:
loss: The loss, which will be multiplied by the loss scale. Can either be
a tensor or a callable returning a tensor.
Returns:
`loss` multiplied by `LossScaleOptimizer.loss_scale`.
"""
if callable(loss):
def new_loss():
loss_val = loss()
return loss_val * math_ops.cast(self.loss_scale, loss_val.dtype)
return new_loss
else:
return loss * math_ops.cast(self.loss_scale, loss.dtype)
def get_unscaled_gradients(self, grads):
"""Unscales the gradients by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to unscale the gradients
after computing them with `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_scaled_loss` should also be called. See
the `tf.keras.mixed_precision.LossScaleOptimizer` doc for an
example.
Args:
grads: A list of tensors, each which will be divided by the loss scale.
Can have None values, which are ignored.
Returns:
A new list the same size as `grads`, where every non-None value in `grads`
is divided by `LossScaleOptimizer.loss_scale`.
"""
loss_scale_reciprocal = 1. / self.loss_scale
return [
_multiply_gradient(g, loss_scale_reciprocal) if g is not None else None
for g in grads
]
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
tape = backprop.GradientTape() if tape is None else tape
with tape:
loss = self.get_scaled_loss(loss)
grads_and_vars = self._optimizer._compute_gradients( # pylint: disable=protected-access
loss,
var_list,
grad_loss,
tape=tape)
grads = [g for g, _ in grads_and_vars]
weights = [v for _, v in grads_and_vars]
unscaled_grads = self.get_unscaled_gradients(grads)
return list(zip(unscaled_grads, weights))
def get_gradients(self, loss, params):
loss = self.get_scaled_loss(loss)
grads = self._optimizer.get_gradients(loss, params)
return self.get_unscaled_gradients(grads)
def _create_all_weights(self, var_list):
self._optimizer._create_all_weights(var_list) # pylint: disable=protected-access
def apply_gradients(self,
grads_and_vars,
name=None,
experimental_aggregate_gradients=True):
if distribute_lib.in_cross_replica_context():
raise ValueError('apply_gradients() must be called in a replica context.')
# We check for the strategy here despite already checking in the constructor
# as frequently the optimizer is created outside the strategy's scope.
self._raise_if_strategy_unsupported()
grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)
if experimental_aggregate_gradients:
# We must aggregate the gradients here instead of in
# self.optimizer.apply_gradients, so that any NaN or Inf gradients are
# propagated to each replica. If any replica has a NaN or Inf gradient,
# they must all have a NaN or Inf gradient so that they all skip the step.
# pylint: disable=protected-access
grads_and_vars = self._optimizer._transform_unaggregated_gradients(
grads_and_vars)
grads_and_vars = self._optimizer._aggregate_gradients(grads_and_vars)
# pylint: enable=protected-access
grads_and_vars = tuple(grads_and_vars)
grads = [g for g, _ in grads_and_vars]
# We do not want DistributionStrategy to unwrap any MirroredVariables in
# grads_and_vars, because even in a replica context, the wrapped
# optimizer expects mirrored variables. So we wrap the variables with an
# _UnwrapPreventer, preventing DistributionStrategy from unwrapping the
# MirroredVariables.
wrapped_vars = _UnwrapPreventer([v for _, v in grads_and_vars])
def do_not_apply_fn():
# Normally self._optimizer.iterations is incremented in
# self._optimizer.apply_gradients(). Since that is not called in this
# branch, we increment it here instead.
return self._optimizer.iterations.assign_add(1, read_value=False)
def _if_should_apply_grads(grads):
if isinstance(self._loss_scale, _DynamicLossScaleState):
return self._loss_scale.update(grads)
else:
return (control_flow_ops.no_op(), True)
if optimizer_utils.strategy_supports_no_merge_call():
loss_scale_update_op, should_apply_grads = _if_should_apply_grads(grads)
def apply_fn():
return self._apply_gradients(grads, wrapped_vars, name)
maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn,
do_not_apply_fn)
return control_flow_ops.group(maybe_apply_op, loss_scale_update_op)
else:
def _apply_gradients_cross_replica(distribution, grads, wrapped_vars,
name):
loss_scale_update_op, should_apply_grads = _if_should_apply_grads(grads)
def apply_fn():
return distribution.extended.call_for_each_replica(
self._apply_gradients,
args=(grads, wrapped_vars, name))
# Note: We must call this cond() in a cross-replica context.
# DistributionStrategy does not support having a cond in a replica
# context with a branch that calls `merge_call`, and
# self._optimizer.apply_gradients calls `merge_call`.
maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn,
do_not_apply_fn)
return control_flow_ops.group(maybe_apply_op, loss_scale_update_op)
return distribute_lib.get_replica_context().merge_call(
_apply_gradients_cross_replica,
args=(grads, wrapped_vars, name))
def _apply_gradients(self, grads, wrapped_vars, name):
# Pass experimental_aggregate_gradients=False since LossScaleOptimizer
# already aggregated the gradients.
# TODO(reedwm): This will raise a fairly cryptic error message if
# self._optimizer.apply_gradients does not take
# experimental_aggregate_gradients.
return self._optimizer.apply_gradients(
list(zip(grads, wrapped_vars.value)), name,
experimental_aggregate_gradients=False)
def get_config(self):
serialized_optimizer = optimizers.serialize(self._optimizer)
return {
'inner_optimizer': serialized_optimizer,
'dynamic': self.dynamic,
'initial_scale': self.initial_scale,
'dynamic_growth_steps': self.dynamic_growth_steps,
}
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy() # Make a copy, since we mutate config
if 'loss_scale' in config:
# If loss_scale is in config, we assume we are deserializing a
# LossScaleOptimizer from TF 2.3 or below. We convert the config so it
# can be deserialized in the current LossScaleOptimizer.
loss_scale = keras_loss_scale_module.deserialize(
config.pop('loss_scale'))
if isinstance(loss_scale, loss_scale_module.FixedLossScale):
config['dynamic'] = False
config['initial_scale'] = loss_scale._loss_scale_value # pylint: disable=protected-access
elif isinstance(loss_scale, loss_scale_module.DynamicLossScale):
config['dynamic'] = True
config['initial_scale'] = loss_scale.initial_loss_scale
config['dynamic_growth_steps'] = loss_scale.increment_period
if loss_scale.multiplier != 2:
raise ValueError('Cannot deserialize LossScaleOptimizer with a '
'DynamicLossScale whose multiplier is not 2. Got '
'DynamicLossScale: %s' % (loss_scale,))
else:
raise ValueError(
'Serialized LossScaleOptimizers with a LossScale that is neither a '
'FixedLossScale nor a DynamicLossScale can no longer be '
'deserialized')
config['inner_optimizer'] = config.pop('optimizer')
config['inner_optimizer'] = optimizers.deserialize(
config['inner_optimizer'], custom_objects=custom_objects)
return cls(**config)
def _raise_if_strategy_unsupported(self):
if not strategy_supports_loss_scaling():
strategy = distribute_lib.get_strategy()
if isinstance(strategy,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1,
tpu_strategy.TPUStrategyV2)):
raise ValueError(
'Loss scaling is not supported with TPUStrategy. Loss scaling is '
'unnecessary with TPUs, since they support bfloat16 instead of '
'float16 and bfloat16 does not require loss scaling. You should '
'remove the use of the LossScaleOptimizer when TPUs are used.')
else:
raise ValueError('Loss scaling is not supported with the '
'tf.distribute.Strategy: %s. Try using a different '
'Strategy, e.g. a MirroredStrategy' %
strategy.__class__.__name__)
# Delegations: We delegate most OptimizerV2 methods to the wrapped optimizer
# below.
@property
def iterations(self):
return self._optimizer.iterations
@iterations.setter
def iterations(self, variable):
self._optimizer.iterations = variable
def get_slot_names(self):
return self._optimizer.get_slot_names()
def variables(self):
return self._optimizer.variables()
@property
def weights(self):
return self._optimizer.weights
def get_weights(self):
return self._optimizer.get_weights()
def set_weights(self, weights):
return self._optimizer.set_weights(weights)
@property
def clipnorm(self):
return self._optimizer.clipnorm
@clipnorm.setter
def clipnorm(self, val):
self._optimizer.clipnorm = val
@property
def global_clipnorm(self):
return self._optimizer.global_clipnorm
@global_clipnorm.setter
def global_clipnorm(self, val):
self._optimizer.global_clipnorm = val
@property
def clipvalue(self):
return self._optimizer.clipvalue
@clipvalue.setter
def clipvalue(self, val):
self._optimizer.clipvalue = val
def _aggregate_gradients(self, grads_and_vars):
return self._optimizer._aggregate_gradients(grads_and_vars) # pylint: disable=protected-access
def _restore_slot_variable(self, slot_name, variable, slot_variable):
return self._optimizer._restore_slot_variable(slot_name, variable, # pylint: disable=protected-access
slot_variable)
def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,
variable):
return self._optimizer._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position, slot_name, variable)
def get_slot(self, var, slot_name):
return self._optimizer.get_slot(var, slot_name)
def add_slot(self, var, slot_name, initializer='zeros'):
return self._optimizer.add_slot(var, slot_name, initializer)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError as e:
if name == '_optimizer' or name == '_hyper':
# Avoid infinite recursion
raise e
# Delegate hyperparameter accesses to inner optimizer.
if name == 'lr':
name = 'learning_rate'
if name in self._optimizer._hyper:
return self._optimizer._get_hyper(name)
raise e
def __dir__(self):
result = set(super(LossScaleOptimizer, self).__dir__())
if '_optimizer' in result:
result |= self._optimizer._hyper.keys()
if 'learning_rate' in self._optimizer._hyper.keys():
result.add('lr')
return list(result)
def __setattr__(self, name, value):
if name == 'lr':
name = 'learning_rate'
# Delegate setting hyperparameter to inner optimizer if the attribute does
# not exist on the LossScaleOptimizer
try:
# We cannot check for the 'iterations' attribute as it cannot be set after
# it is accessed.
if name != 'iterations':
object.__getattribute__(self, name)
has_attribute = True
except AttributeError:
has_attribute = False
if (name != '_optimizer' and name in self._optimizer._hyper
and not has_attribute):
self._optimizer._set_hyper(name, value)
else:
super(LossScaleOptimizer, self).__setattr__(name, value)
# Explicitly delegate learning_rate. Normally hyperparameters are delegated in
# __getattribute__, but if a hyperparameter is not in self._optimizer._hyper
# (e.g. because self._optimizer itself wraps another optimizer), then it won't
# be delegated. Since learning_rate is a very commonly accessed
# hyperparameter, we delegate it here.
@property
def learning_rate(self):
return self._optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, value):
self._optimizer.learning_rate = value
@property
def lr(self):
return self._optimizer.learning_rate
@lr.setter
def lr(self, value):
self._optimizer.lr = value
# We do not override some OptimizerV2 methods. For each, we describe why we do
# not delegate them to self._optimizer:
# * get_updates: get_updates() calls get_gradients(). Since we override
# get_gradients(), we cannot delegate get_updates() to self._optimizer,
# otherwise the overridden get_gradients() method would not be called.
# Luckily, get_updates() does not access any OptimizerV2 fields, so
# inheriting the OptimizerV2 version works fine.
# * minimize: We don't delegate for a similar as get_updates(): it calls
# both self._compute_gradients() and self.apply_gradients(), and both need
# to have the LossScaleOptimizer version called.
# TODO(reedwm): Maybe throw an error if mixed precision is used without this
# optimizer being used.
|
LossScaleOptimizer
|
python
|
psf__black
|
tests/data/cases/dummy_implementations.py
|
{
"start": 1485,
"end": 1575
}
|
class ____:
def f(self):
...
def f2(self):
print(10)
|
ClassE
|
python
|
gevent__gevent
|
src/gevent/tests/test__hub.py
|
{
"start": 12343,
"end": 13126
}
|
class ____(unittest.TestCase):
def test_implemensts_ILoop(self):
from gevent.testing import verify
from gevent._interfaces import ILoop
loop = get_hub().loop
verify.verifyObject(ILoop, loop)
def test_callback_implements_ICallback(self):
from gevent.testing import verify
from gevent._interfaces import ICallback
loop = get_hub().loop
cb = loop.run_callback(lambda: None)
verify.verifyObject(ICallback, cb)
def test_callback_ts_implements_ICallback(self):
from gevent.testing import verify
from gevent._interfaces import ICallback
loop = get_hub().loop
cb = loop.run_callback_threadsafe(lambda: None)
verify.verifyObject(ICallback, cb)
|
TestLoopInterface
|
python
|
dagster-io__dagster
|
examples/docs_projects/project_dspy/config.py
|
{
"start": 248,
"end": 5375
}
|
class ____(BaseSettings):
"""Project settings with environment variable support."""
model_config = SettingsConfigDict(
env_file=".env", env_file_encoding="utf-8", case_sensitive=False, extra="ignore"
)
# Google Gemini Configuration
gemini_api_key: str = Field(default="", env="GEMINI_API_KEY")
gemini_model: str = Field(default="gemini-2.0-flash-exp", env="GEMINI_MODEL")
gemini_judge_model: str = Field(
default="gemini-2.0-flash-exp", env="GEMINI_JUDGE_MODEL"
)
gemini_max_tokens: int = Field(default=8192, env="GEMINI_MAX_TOKENS")
gemini_temperature: float = Field(default=0.7, env="GEMINI_TEMPERATURE")
# Connections Data Configuration
connections_data_path: str = Field(
default="./data/Connections_Data.csv", env="CONNECTIONS_DATA_PATH"
)
# Dagster Configuration
dagster_home: str = Field(default="/tmp/dagster_home", env="DAGSTER_HOME")
dagster_host: str = Field(default="localhost", env="DAGSTER_HOST")
dagster_port: int = Field(default=3000, env="DAGSTER_PORT")
# DSPy Configuration
dspy_optimizer: Literal[
"MIPROv2", "BootstrapFewShot", "BootstrapFewShotWithRandomSearch"
] = Field(default="MIPROv2", env="DSPY_OPTIMIZER")
# start_dspy_auto_mode
dspy_auto_mode: Literal["light", "medium", "heavy"] = Field(
default="light", env="DSPY_AUTO_MODE"
)
# end_dspy_auto_mode
dspy_min_samples: int = Field(default=10, env="DSPY_MIN_SAMPLES")
# start_dspy_performance_threshold
dspy_performance_threshold: float = Field(
default=0.3, env="DSPY_PERFORMANCE_THRESHOLD"
)
dspy_improvement_threshold: float = Field(
default=0.05, env="DSPY_IMPROVEMENT_THRESHOLD"
)
# end_dspy_performance_threshold
# Connections Puzzle Configuration
train_test_split: float = Field(default=0.25, env="TRAIN_TEST_SPLIT")
# start_eval_subset_size
eval_subset_size: int = Field(default=50, env="EVAL_SUBSET_SIZE")
max_puzzle_attempts: int = Field(default=6, env="MAX_PUZZLE_ATTEMPTS")
# end_eval_subset_size
max_invalid_responses: int = Field(default=3, env="MAX_INVALID_RESPONSES")
# Monitoring & Optimization
# start_enable_optimization
enable_optimization: bool = Field(default=True, env="ENABLE_OPTIMIZATION")
optimization_schedule: str = Field(default="0 2 * * *", env="OPTIMIZATION_SCHEDULE")
# end_enable_optimization
# start_accuracy_alert_threshold
accuracy_alert_threshold: float = Field(
default=0.65, env="ACCURACY_ALERT_THRESHOLD"
)
# end_accuracy_alert_threshold
# Streamlit Configuration
streamlit_port: int = Field(default=8501, env="STREAMLIT_PORT")
streamlit_theme: Literal["light", "dark"] = Field(
default="light", env="STREAMLIT_THEME"
)
# Demo Mode
demo_mode: bool = Field(default=False, env="DEMO_MODE")
use_cached_results: bool = Field(default=True, env="USE_CACHED_RESULTS")
mock_api_calls: bool = Field(default=False, env="MOCK_API_CALLS")
# Paths
data_dir: Path = Field(default=Path("./data"), env="DATA_DIR")
models_dir: Path = Field(default=Path("./models"), env="MODELS_DIR")
logs_dir: Path = Field(default=Path("./logs"), env="LOGS_DIR")
@field_validator("data_dir", "models_dir", "logs_dir", mode="before")
@classmethod
def create_directories(cls, v):
"""Ensure directories exist."""
path = Path(v)
path.mkdir(parents=True, exist_ok=True)
return path
@field_validator("gemini_api_key")
@classmethod
def validate_api_key(cls, v):
"""Validate Gemini API key is provided."""
if not v and not os.getenv("MOCK_API_CALLS", "false").lower() == "true":
raise ValueError(
"GEMINI_API_KEY must be set in environment variables or .env file"
)
return v
@property
def connections_csv_path(self) -> Path:
"""Path to Connections CSV data file."""
return Path(self.connections_data_path)
@property
def model_checkpoint_dir(self) -> Path:
"""Directory for DSPy model checkpoints."""
return self.models_dir / "checkpoints"
# start_get_dspy_lm_config
def get_dspy_lm_config(self) -> dict:
"""Get DSPy language model configuration for Gemini."""
return {
"model": f"gemini/{self.gemini_model}",
"api_key": self.gemini_api_key,
"max_tokens": self.gemini_max_tokens,
"temperature": self.gemini_temperature,
}
# end_get_dspy_lm_config
# start_get_optimizer_config
def get_optimizer_config(self) -> dict:
"""Get DSPy optimizer configuration."""
return {
"optimizer": self.dspy_optimizer,
"auto_mode": self.dspy_auto_mode,
"min_samples": self.dspy_min_samples,
"performance_threshold": self.dspy_performance_threshold,
"improvement_threshold": self.dspy_improvement_threshold,
}
# end_get_optimizer_config
# Create global settings instance
settings = Settings()
|
Settings
|
python
|
spyder-ide__spyder
|
spyder/widgets/collectionseditor.py
|
{
"start": 75533,
"end": 77230
}
|
class ____(CollectionsDelegate):
"""CollectionsEditor Item Delegate"""
def __init__(self, parent=None, namespacebrowser=None):
CollectionsDelegate.__init__(self, parent, namespacebrowser)
def get_value(self, index):
if index.isValid():
source_index = index.model().mapToSource(index)
name = source_index.model().keys[source_index.row()]
return self.parent().get_value(name)
def set_value(self, index, value):
if index.isValid():
source_index = index.model().mapToSource(index)
name = source_index.model().keys[source_index.row()]
self.parent().new_value(name, value)
def make_data_function(
self,
index: QModelIndex
) -> Optional[Callable[[], Any]]:
"""
Construct function which returns current value of data.
The returned function uses the associated console to retrieve the
current value of the variable. This is used to refresh editors created
from that variable.
Parameters
----------
index : QModelIndex
Index of item whose current value is to be returned by the
function constructed here.
Returns
-------
Optional[Callable[[], Any]]
Function which returns the current value of the data, or None if
such a function cannot be constructed.
"""
source_index = index.model().mapToSource(index)
name = source_index.model().keys[source_index.row()]
parent = self.parent()
def get_data():
return parent.get_value(name)
return get_data
|
RemoteCollectionsDelegate
|
python
|
eventlet__eventlet
|
tests/pools_test.py
|
{
"start": 5712,
"end": 6104
}
|
class ____(TestCase):
mode = 'static'
def setUp(self):
self.pool = IntPool(min_size=3, max_size=3)
def test_something(self):
self.assertEqual(len(self.pool.free_items), 3)
# Cover the clause in get where we get from the free list instead of creating
# an item on get
gotten = self.pool.get()
self.assertEqual(gotten, 1)
|
TestIntPool2
|
python
|
great-expectations__great_expectations
|
docs/sphinx_api_docs_source/public_api_report.py
|
{
"start": 3155,
"end": 3886
}
|
class ____:
"""File contents as a string and file path.
Args:
filepath: Absolute path to the file.
contents: String of the file contents.
"""
def __init__(self, filepath: pathlib.Path, contents: str):
self.filepath = filepath
self.contents = contents
@classmethod
def create_from_local_file(cls, filepath: pathlib.Path) -> FileContents:
with open(filepath) as f:
file_contents: str = f.read()
return cls(filepath=filepath, contents=file_contents)
@classmethod
def create_from_local_files(cls, filepaths: Set[pathlib.Path]) -> Set[FileContents]:
return {cls.create_from_local_file(filepath) for filepath in filepaths}
|
FileContents
|
python
|
apache__avro
|
lang/py/avro/schema.py
|
{
"start": 4971,
"end": 5639
}
|
class ____(collections.abc.Hashable):
"""A mixin that defines equality as equal if the json deserializations are equal."""
fingerprint: Callable[..., bytes]
def __eq__(self, that: object) -> bool:
try:
that_obj = json.loads(str(that))
except json.decoder.JSONDecodeError:
return False
return cast(bool, json.loads(str(self)) == that_obj)
def __hash__(self) -> int:
"""Make it so a schema can be in a set or a key in a dictionary.
NB: Python has special rules for this method being defined in the same class as __eq__.
"""
return hash(self.fingerprint())
|
EqualByJsonMixin
|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py
|
{
"start": 2992,
"end": 5396
}
|
class ____(nn.Module):
"""Relative positional encoding module."""
def __init__(self, config):
super().__init__()
self.max_len = config.max_source_positions
self.d_model = config.hidden_size
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
def extend_pe(self, x):
# Reset the positional encodings
if self.pe is not None:
# self.pe contains both positive and negative parts
# the length of self.pe is 2 * input_len - 1
if self.pe.size(1) >= x.size(1) * 2 - 1:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
# Suppose `i` is the position of query vector and `j` is the
# position of key vector. We use positive relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j).
pe_positive = torch.zeros(x.size(1), self.d_model)
pe_negative = torch.zeros(x.size(1), self.d_model)
position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.d_model)
)
pe_positive[:, 0::2] = torch.sin(position * div_term)
pe_positive[:, 1::2] = torch.cos(position * div_term)
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
# Reverse the order of positive indices and concat both positive and
# negative indices. This is used to support the shifting trick
# as in https://huggingface.co/papers/1901.02860
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
pe_negative = pe_negative[1:].unsqueeze(0)
pe = torch.cat([pe_positive, pe_negative], dim=1)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, hidden_states: torch.Tensor):
self.extend_pe(hidden_states)
start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1
end_idx = self.pe.size(1) // 2 + hidden_states.size(1)
relative_position_embeddings = self.pe[:, start_idx:end_idx]
return relative_position_embeddings
|
Wav2Vec2BertRelPositionalEmbedding
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/context.py
|
{
"start": 97253,
"end": 102666
}
|
class ____(_QueryEntity):
"""mapper/class/AliasedClass entity"""
__slots__ = (
"expr",
"mapper",
"entity_zero",
"is_aliased_class",
"path",
"_extra_entities",
"_label_name",
"_with_polymorphic_mappers",
"selectable",
"_polymorphic_discriminator",
)
expr: _InternalEntityType
mapper: Mapper[Any]
entity_zero: _InternalEntityType
is_aliased_class: bool
path: PathRegistry
_label_name: str
def __init__(
self, compile_state, entity, entities_collection, is_current_entities
):
entities_collection.append(self)
if is_current_entities:
if compile_state._primary_entity is None:
compile_state._primary_entity = self
compile_state._has_mapper_entities = True
compile_state._has_orm_entities = True
entity = entity._annotations["parententity"]
entity._post_inspect
ext_info = self.entity_zero = entity
entity = ext_info.entity
self.expr = entity
self.mapper = mapper = ext_info.mapper
self._extra_entities = (self.expr,)
if ext_info.is_aliased_class:
self._label_name = ext_info.name
else:
self._label_name = mapper.class_.__name__
self.is_aliased_class = ext_info.is_aliased_class
self.path = ext_info._path_registry
self.selectable = ext_info.selectable
self._with_polymorphic_mappers = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = ext_info.polymorphic_on
if mapper._should_select_with_poly_adapter:
compile_state._create_with_polymorphic_adapter(
ext_info, self.selectable
)
supports_single_entity = True
_non_hashable_value = True
use_id_for_hash = True
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
return _entity_corresponds_to(self.entity_zero, entity)
def _get_entity_clauses(self, compile_state):
adapter = None
if not self.is_aliased_class:
if compile_state._polymorphic_adapters:
adapter = compile_state._polymorphic_adapters.get(
self.mapper, None
)
else:
adapter = self.entity_zero._adapter
if adapter:
if compile_state._from_obj_alias:
ret = adapter.wrap(compile_state._from_obj_alias)
else:
ret = adapter
else:
ret = compile_state._from_obj_alias
return ret
def row_processor(self, context, result):
compile_state = context.compile_state
adapter = self._get_entity_clauses(compile_state)
if compile_state.compound_eager_adapter and adapter:
adapter = adapter.wrap(compile_state.compound_eager_adapter)
elif not adapter:
adapter = compile_state.compound_eager_adapter
if compile_state._primary_entity is self:
only_load_props = compile_state.compile_options._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self,
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator,
)
return _instance, self._label_name, self._extra_entities
def setup_dml_returning_compile_state(
self,
compile_state: _ORMCompileState,
adapter: Optional[_DMLReturningColFilter],
) -> None:
loading._setup_entity_query(
compile_state,
self.mapper,
self,
self.path,
adapter,
compile_state.primary_columns,
with_polymorphic=self._with_polymorphic_mappers,
only_load_props=compile_state.compile_options._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator,
)
def setup_compile_state(self, compile_state):
adapter = self._get_entity_clauses(compile_state)
single_table_crit = self.mapper._single_table_criterion
if (
single_table_crit is not None
or ("additional_entity_criteria", self.mapper)
in compile_state.global_attributes
):
ext_info = self.entity_zero
compile_state.extra_criteria_entities[ext_info] = (
ext_info,
ext_info._adapter if ext_info.is_aliased_class else None,
)
loading._setup_entity_query(
compile_state,
self.mapper,
self,
self.path,
adapter,
compile_state.primary_columns,
with_polymorphic=self._with_polymorphic_mappers,
only_load_props=compile_state.compile_options._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator,
)
compile_state._fallback_from_clauses.append(self.selectable)
|
_MapperEntity
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2/modular_lfm2.py
|
{
"start": 2879,
"end": 8467
}
|
class ____:
"""
Attention and conv cache for Lfm2.
It stores the Key and Value states as a list of tensors, one for each layer.
Attention layer cache shape: `[batch_size, num_heads, seq_len, head_dim]`.
Conv layer cache shape: `[batch_size, hidden_size, L_cache-1]`.
"""
# Override @property existing in Cache
max_batch_size = None
is_compileable = False
key_cache = None
value_cache = None
def __init__(
self,
config: Lfm2Config,
max_batch_size: int,
dtype: torch.dtype = torch.float32,
device: Union[torch.device, str, None] = None,
):
self.key_cache = []
self.value_cache = []
self.max_batch_size = max_batch_size
self.layer_types = config.layer_types
self.first_attention_layer = self.layer_types.index("full_attention")
self.conv_L_cache = config.conv_L_cache
self._dtype = dtype
self.conv_cache: list[torch.Tensor] = []
device = torch.device(device) if device is not None else None
for _ in range(config.num_hidden_layers):
conv_state = torch.zeros(
self.max_batch_size,
config.hidden_size,
self.conv_L_cache,
dtype=self._dtype,
device=device,
)
self.conv_cache.append(conv_state)
self.key_cache.append(torch.tensor([]))
self.value_cache.append(torch.tensor([]))
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`Dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if self.key_cache[layer_idx].numel() == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
if self.key_cache[layer_idx].numel():
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
if self.conv_cache[layer_idx].numel():
device = self.conv_cache[layer_idx].device
self.conv_cache[layer_idx] = self.conv_cache[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.first_attention_layer if self.layer_types[layer_idx] != "full_attention" else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""
Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for
the given layer at `layer_idx`.
The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns (i.e. sliding_window, chunk_size),
for each layer.
"""
full_mask_kv_offset = 0
query_length = cache_position.shape[0]
past_seen_tokens = self.get_seq_length()
kv_length = query_length + past_seen_tokens
return kv_length, full_mask_kv_offset
def crop(self, max_length: int):
"""Crop the cache to the given length"""
if max_length < 0:
max_length = self.get_seq_length() - abs(max_length)
if self.get_seq_length() <= max_length:
return
for idx in range(len(self.key_cache)):
if self.key_cache[idx].numel():
self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
def __len__(self) -> int:
return len(self.key_cache)
def reset(self):
for layer_idx in range(len(self.conv_cache)):
# In-place ops prevent breaking the static address
self.conv_cache[layer_idx].zero_()
|
Lfm2HybridConvCache
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 83997,
"end": 85206
}
|
class ____(Response):
"""
Response of tasks.add_or_update_model endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "add_or_update_model"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(AddOrUpdateModelResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
AddOrUpdateModelResponse
|
python
|
huggingface__transformers
|
src/transformers/models/esm/modeling_esmfold.py
|
{
"start": 60235,
"end": 66912
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# Buffers to be lazily initialized later
# self.default_frames
# self.group_idx
# self.atom_mask
# self.lit_positions
self.layer_norm_s = LayerNorm(config.sequence_dim)
self.layer_norm_z = LayerNorm(config.pairwise_dim)
self.linear_in = EsmFoldLinear(config.sequence_dim, config.sequence_dim)
self.ipa = EsmFoldInvariantPointAttention(config)
self.ipa_dropout = nn.Dropout(config.dropout_rate)
self.layer_norm_ipa = LayerNorm(config.sequence_dim)
self.transition = EsmFoldStructureModuleTransition(config)
self.bb_update = EsmFoldBackboneUpdate(config)
self.angle_resnet = EsmFoldAngleResnet(config)
def forward(
self,
evoformer_output_dict,
aatype,
mask=None,
_offload_inference=False,
):
"""
Args:
evoformer_output_dict:
Dictionary containing:
"single":
[*, N_res, C_s] single representation
"pair":
[*, N_res, N_res, C_z] pair representation
aatype:
[*, N_res] amino acid indices
mask:
Optional [*, N_res] sequence mask
Returns:
A dictionary of outputs
"""
s = evoformer_output_dict["single"]
if mask is None:
# [*, N]
mask = s.new_ones(s.shape[:-1])
# [*, N, C_s]
s = self.layer_norm_s(s)
# [*, N, N, C_z]
z = self.layer_norm_z(evoformer_output_dict["pair"])
z_reference_list = None
if _offload_inference:
assert sys.getrefcount(evoformer_output_dict["pair"]) == 2
evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu()
z_reference_list = [z]
z = None
# [*, N, C_s]
s_initial = s
s = self.linear_in(s)
# [*, N]
rigids = Rigid.identity(
s.shape[:-1],
s.dtype,
s.device,
self.training,
fmt="quat",
)
outputs = []
for i in range(self.config.num_blocks):
# [*, N, C_s]
s = s + self.ipa(
s,
z,
rigids,
mask,
_offload_inference=_offload_inference,
_z_reference_list=z_reference_list,
)
s = self.ipa_dropout(s)
s = self.layer_norm_ipa(s)
s = self.transition(s)
# [*, N]
rigids = rigids.compose_q_update_vec(self.bb_update(s))
# To hew as closely as possible to AlphaFold, we convert our
# quaternion-based transformations to rotation-matrix ones
# here
backb_to_global = Rigid(
Rotation(rot_mats=rigids.get_rots().get_rot_mats(), quats=None),
rigids.get_trans(),
)
backb_to_global = backb_to_global.scale_translation(self.config.trans_scale_factor)
# [*, N, 7, 2]
unnormalized_angles, angles = self.angle_resnet(s, s_initial)
all_frames_to_global = self.torsion_angles_to_frames(backb_to_global, angles, aatype)
pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
scaled_rigids = rigids.scale_translation(self.config.trans_scale_factor)
preds = {
"frames": scaled_rigids.to_tensor_7(),
"sidechain_frames": all_frames_to_global.to_tensor_4x4(),
"unnormalized_angles": unnormalized_angles,
"angles": angles,
"positions": pred_xyz,
"states": s,
}
outputs.append(preds)
rigids = rigids.stop_rot_gradient()
del z, z_reference_list
if _offload_inference:
evoformer_output_dict["pair"] = evoformer_output_dict["pair"].to(s.device)
outputs = dict_multimap(torch.stack, outputs)
outputs["single"] = s
return outputs
def _init_residue_constants(self, float_dtype, device):
if not hasattr(self, "default_frames"):
self.register_buffer(
"default_frames",
torch.tensor(
residue_constants.restype_rigid_group_default_frame,
dtype=float_dtype,
device=device,
requires_grad=False,
),
persistent=False,
)
if not hasattr(self, "group_idx"):
self.register_buffer(
"group_idx",
torch.tensor(
residue_constants.restype_atom14_to_rigid_group,
device=device,
requires_grad=False,
),
persistent=False,
)
if not hasattr(self, "atom_mask"):
self.register_buffer(
"atom_mask",
torch.tensor(
residue_constants.restype_atom14_mask,
dtype=float_dtype,
device=device,
requires_grad=False,
),
persistent=False,
)
if not hasattr(self, "lit_positions"):
self.register_buffer(
"lit_positions",
torch.tensor(
residue_constants.restype_atom14_rigid_group_positions,
dtype=float_dtype,
device=device,
requires_grad=False,
),
persistent=False,
)
def torsion_angles_to_frames(self, r, alpha, f):
# Lazily initialize the residue constants on the correct device
self._init_residue_constants(alpha.dtype, alpha.device)
# Separated purely to make testing less annoying
return torsion_angles_to_frames(r, alpha, f, self.default_frames)
def frames_and_literature_positions_to_atom14_pos(self, r, f): # [*, N, 8] # [*, N]
# Lazily initialize the residue constants on the correct device
self._init_residue_constants(r.get_rots().dtype, r.get_rots().device)
return frames_and_literature_positions_to_atom14_pos(
r,
f,
self.default_frames,
self.group_idx,
self.atom_mask,
self.lit_positions,
)
|
EsmFoldStructureModule
|
python
|
celery__celery
|
t/integration/tasks.py
|
{
"start": 12181,
"end": 12241
}
|
class ____(BaseModel):
x: int
y: int
|
AddParameterModel
|
python
|
keon__algorithms
|
algorithms/tree/b_tree.py
|
{
"start": 1032,
"end": 9572
}
|
class ____:
""" Class of BTree """
def __init__(self, t_val=2):
self.min_numbers_of_keys = t_val - 1
self.max_number_of_keys = 2 * t_val - 1
self.root = Node()
def _split_child(self, parent: Node, child_index: int):
new_right_child = Node()
half_max = self.max_number_of_keys // 2
child = parent.children[child_index]
middle_key = child.keys[half_max]
new_right_child.keys = child.keys[half_max + 1:]
child.keys = child.keys[:half_max]
# child is left child of parent after splitting
if not child.is_leaf:
new_right_child.children = child.children[half_max + 1:]
child.children = child.children[:half_max + 1]
parent.keys.insert(child_index, middle_key)
parent.children.insert(child_index + 1, new_right_child)
def insert_key(self, key):
""" overflow, tree increases in height """
if len(self.root.keys) >= self.max_number_of_keys:
new_root = Node()
new_root.children.append(self.root)
self.root = new_root
self._split_child(new_root, 0)
self._insert_to_nonfull_node(self.root, key)
else:
self._insert_to_nonfull_node(self.root, key)
def _insert_to_nonfull_node(self, node: Node, key):
i = len(node.keys) - 1
while i >= 0 and node.keys[i] >= key: # find position where insert key
i -= 1
if node.is_leaf:
node.keys.insert(i + 1, key)
else:
# overflow
if len(node.children[i + 1].keys) >= self.max_number_of_keys:
self._split_child(node, i + 1)
# decide which child is going to have a new key
if node.keys[i + 1] < key:
i += 1
self._insert_to_nonfull_node(node.children[i + 1], key)
def find(self, key) -> bool:
""" Finds key """
current_node = self.root
while True:
i = len(current_node.keys) - 1
while i >= 0 and current_node.keys[i] > key:
i -= 1
if i >= 0 and current_node.keys[i] == key:
return True
if current_node.is_leaf:
return False
current_node = current_node.children[i + 1]
def remove_key(self, key):
self._remove_key(self.root, key)
def _remove_key(self, node: Node, key) -> bool:
try:
key_index = node.keys.index(key)
if node.is_leaf:
node.keys.remove(key)
else:
self._remove_from_nonleaf_node(node, key_index)
return True
except ValueError: # key not found in node
if node.is_leaf:
print("Key not found.")
return False # key not found
else:
i = 0
number_of_keys = len(node.keys)
# decide in which subtree may be key
while i < number_of_keys and key > node.keys[i]:
i += 1
action_performed = self._repair_tree(node, i)
if action_performed:
return self._remove_key(node, key)
else:
return self._remove_key(node.children[i], key)
def _repair_tree(self, node: Node, child_index: int) -> bool:
child = node.children[child_index]
# The leaf/node is correct
if self.min_numbers_of_keys < len(child.keys) <= self.max_number_of_keys:
return False
if child_index > 0 and len(node.children[child_index - 1].keys) > self.min_numbers_of_keys:
self._rotate_right(node, child_index)
return True
if (child_index < len(node.children) - 1
and len(node.children[child_index + 1].keys) > self.min_numbers_of_keys): # 0 <-- 1
self._rotate_left(node, child_index)
return True
if child_index > 0:
# merge child with brother on the left
self._merge(node, child_index - 1, child_index)
else:
# merge child with brother on the right
self._merge(node, child_index, child_index + 1)
return True
def _rotate_left(self, parent_node: Node, child_index: int):
"""
Take key from right brother of the child and transfer to the child
"""
new_child_key = parent_node.keys[child_index]
new_parent_key = parent_node.children[child_index + 1].keys.pop(0)
parent_node.children[child_index].keys.append(new_child_key)
parent_node.keys[child_index] = new_parent_key
if not parent_node.children[child_index + 1].is_leaf:
ownerless_child = parent_node.children[child_index
+ 1].children.pop(0)
# make ownerless_child as a new biggest child (with highest key)
# -> transfer from right subtree to left subtree
parent_node.children[child_index].children.append(ownerless_child)
def _rotate_right(self, parent_node: Node, child_index: int):
"""
Take key from left brother of the child and transfer to the child
"""
parent_key = parent_node.keys[child_index - 1]
new_parent_key = parent_node.children[child_index - 1].keys.pop()
parent_node.children[child_index].keys.insert(0, parent_key)
parent_node.keys[child_index - 1] = new_parent_key
if not parent_node.children[child_index - 1].is_leaf:
ownerless_child = parent_node.children[child_index
- 1].children.pop()
# make ownerless_child as a new lowest child (with lowest key)
# -> transfer from left subtree to right subtree
parent_node.children[child_index].children.insert(
0, ownerless_child)
def _merge(self, parent_node: Node, to_merge_index: int, transfered_child_index: int):
from_merge_node = parent_node.children.pop(transfered_child_index)
parent_key_to_merge = parent_node.keys.pop(to_merge_index)
to_merge_node = parent_node.children[to_merge_index]
to_merge_node.keys.append(parent_key_to_merge)
to_merge_node.keys.extend(from_merge_node.keys)
if not to_merge_node.is_leaf:
to_merge_node.children.extend(from_merge_node.children)
if parent_node == self.root and not parent_node.keys:
self.root = to_merge_node
def _remove_from_nonleaf_node(self, node: Node, key_index: int):
key = node.keys[key_index]
left_subtree = node.children[key_index]
if len(left_subtree.keys) > self.min_numbers_of_keys:
largest_key = self._find_largest_and_delete_in_left_subtree(
left_subtree)
elif len(node.children[key_index + 1].keys) > self.min_numbers_of_keys:
largest_key = self._find_largest_and_delete_in_right_subtree(
node.children[key_index + 1])
else:
self._merge(node, key_index, key_index + 1)
return self._remove_key(node, key)
node.keys[key_index] = largest_key
def _find_largest_and_delete_in_left_subtree(self, node: Node):
if node.is_leaf:
return node.keys.pop()
else:
ch_index = len(node.children) - 1
self._repair_tree(node, ch_index)
largest_key_in_subtree = self._find_largest_and_delete_in_left_subtree(
node.children[len(node.children) - 1])
# self._repair_tree(node, ch_index)
return largest_key_in_subtree
def _find_largest_and_delete_in_right_subtree(self, node: Node):
if node.is_leaf:
return node.keys.pop(0)
else:
ch_index = 0
self._repair_tree(node, ch_index)
largest_key_in_subtree = self._find_largest_and_delete_in_right_subtree(
node.children[0])
# self._repair_tree(node, ch_index)
return largest_key_in_subtree
def traverse_tree(self):
self._traverse_tree(self.root)
print()
def _traverse_tree(self, node: Node):
if node.is_leaf:
print(node.keys, end=" ")
else:
for i, key in enumerate(node.keys):
self._traverse_tree(node.children[i])
print(key, end=" ")
self._traverse_tree(node.children[-1])
|
BTree
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/custom_job.py
|
{
"start": 9856,
"end": 36914
}
|
class ____(CustomTrainingJobBaseOperator):
"""
Create Custom Container Training job.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of this TrainingPipeline.
:param command: The command to be invoked when the container is started.
It overrides the entrypoint instruction in Dockerfile when provided
:param container_uri: Required: Uri of the training container image in the GCR.
:param model_serving_container_image_uri: If the training produces a managed Vertex AI Model, the URI
of the Model serving container suitable for serving the model produced by the
training script.
:param model_serving_container_predict_route: If the training produces a managed Vertex AI Model, An
HTTP path to send prediction requests to the container, and which must be supported
by it. If not specified a default HTTP path will be used by Vertex AI.
:param model_serving_container_health_route: If the training produces a managed Vertex AI Model, an
HTTP path to send health check requests to the container, and which must be supported
by it. If not specified a standard HTTP path will be used by AI Platform.
:param model_serving_container_command: The command with which the container is run. Not executed
within a shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
:param model_serving_container_args: The arguments to the command. The Docker image's CMD is used if
this is not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
:param model_serving_container_environment_variables: The environment variables that are to be
present in the container. Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
:param model_serving_container_ports: Declaration of ports that are exposed by the container. This
field is primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
:param model_description: The description of the Model.
:param model_instance_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param model_parameters_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
:param model_prediction_schema_uri: Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
:param parent_model: Optional. The resource name or model ID of an existing model.
The new model uploaded by this job will be a version of `parent_model`.
Only set this field when training a new version of an existing model.
:param is_default_version: Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
the model produced by this job without a version specified will
use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the model version produced by this job will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
:param model_version_aliases: Optional. User provided version aliases so that the model version
uploaded by this job can be referenced via alias instead of
auto-generated version ID. A default version alias will be created
for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
:param model_version_description: Optional. The description of the model version
being uploaded by this job.
:param project_id: Project to run training in.
:param region: Location to run training in.
:param labels: Optional. The labels with user-defined metadata to
organize TrainingPipelines.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param training_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the training pipeline. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this TrainingPipeline will be secured by this key.
Note: Model trained by this TrainingPipeline is also secured
by this key if ``model_to_upload`` is not set separately.
:param model_encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, the trained Model will be secured by this key.
:param staging_bucket: Bucket used to stage source and training artifacts.
:param dataset: Vertex AI to fit this training against.
:param annotation_schema_uri: Google Cloud Storage URI points to a YAML file describing
annotation schema. The schema is defined as an OpenAPI 3.0.2
[Schema Object]
(https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object)
Only Annotations that both match this schema and belong to
DataItems not ignored by the split method are used in
respectively training, validation or test role, depending on
the role of the DataItem they are on.
When used in conjunction with
``annotations_filter``,
the Annotations used for training are filtered by both
``annotations_filter``
and
``annotation_schema_uri``.
:param model_display_name: If the script produces a managed Vertex AI Model. The display name of
the Model. The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
If not provided upon creation, the job's display_name is used.
:param model_labels: Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters, can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
:param base_output_dir: GCS output directory of job. If not provided a timestamped directory in the
staging directory will be used.
Vertex AI sets the following environment variables when it runs your training code:
- AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts,
i.e. <base_output_dir>/model/
- AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints,
i.e. <base_output_dir>/checkpoints/
- AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard
logs, i.e. <base_output_dir>/logs/
:param service_account: Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
:param network: The full name of the Compute Engine network to which the job
should be peered.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
:param bigquery_destination: Provide this field if `dataset` is a BiqQuery dataset.
The BigQuery project location where the training data is to
be written to. In the given project a new dataset is created
with name
``dataset_<dataset-id>_<annotation-type>_<timestamp-of-training-call>``
where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All
training input data will be written into that dataset. In
the dataset three tables will be created, ``training``,
``validation`` and ``test``.
- AIP_DATA_FORMAT = "bigquery".
- AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training"
- AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation"
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
:param args: Command line arguments to be passed to the Python script.
:param environment_variables: Environment variables to be passed to the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
At most 10 environment variables can be specified.
The Name of the environment variable must be unique.
:param replica_count: The number of worker replicas. If replica count = 1 then one chief
replica will be provisioned. If replica_count > 1 the remainder will be
provisioned as a worker replica pool.
:param machine_type: The type of machine to use for training.
:param accelerator_type: Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
:param accelerator_count: The number of accelerators to attach to a worker replica.
:param boot_disk_type: Type of the boot disk, default is `pd-ssd`.
Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or
`pd-standard` (Persistent Disk Hard Disk Drive).
:param boot_disk_size_gb: Size in GB of the boot disk, default is 100GB.
boot disk size must be within the range of [100, 64000].
:param training_fraction_split: Optional. The fraction of the input data that is to be used to train
the Model. This is ignored if Dataset is not provided.
:param validation_fraction_split: Optional. The fraction of the input data that is to be used to
validate the Model. This is ignored if Dataset is not provided.
:param test_fraction_split: Optional. The fraction of the input data that is to be used to evaluate
the Model. This is ignored if Dataset is not provided.
:param training_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to train the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param validation_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to validate the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param test_filter_split: Optional. A filter on DataItems of the Dataset. DataItems that match
this filter are used to test the Model. A filter with same syntax
as the one used in DatasetService.ListDataItems may be used. If a
single DataItem is matched by more than one of the FilterSplit filters,
then it is assigned to the first set that applies to it in the training,
validation, test order. This is ignored if Dataset is not provided.
:param predefined_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key (either the label's value or
value in the column) must be one of {``training``,
``validation``, ``test``}, and it defines to which set the
given piece of data is assigned. If for a piece of data the
key is not present or has an invalid value, that piece is
ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param timestamp_split_column_name: Optional. The key is a name of one of the Dataset's data
columns. The value of the key values of the key (the values in
the column) must be in RFC 3339 `date-time` format, where
`time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a
piece of data the key is not present or has an invalid value,
that piece is ignored by the pipeline.
Supported only for tabular and time series Datasets.
:param tensorboard: Optional. The name of a Vertex AI resource to which this CustomJob will upload
logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
For more information on configuring your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param psc_interface_config: Optional. Configuration for Private Service Connect interface used for
training.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: If True, run the task in the deferrable mode.
:param poll_interval: Time (seconds) to wait between two consecutive calls to check the job.
The default is 60 seconds.
"""
template_fields = (
"region",
"command",
"parent_model",
"dataset_id",
"impersonation_chain",
"display_name",
"model_display_name",
)
operator_extra_links = (
VertexAIModelLink(),
VertexAITrainingLink(),
)
def __init__(
self,
*,
command: Sequence[str] = [],
region: str,
display_name: str,
model_display_name: str | None = None,
parent_model: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
dataset_id: str | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 60,
**kwargs,
) -> None:
super().__init__(
display_name=display_name,
model_display_name=model_display_name,
region=region,
parent_model=parent_model,
impersonation_chain=impersonation_chain,
dataset_id=dataset_id,
**kwargs,
)
self.command = command
self.deferrable = deferrable
self.poll_interval = poll_interval
def execute(self, context: Context):
self.parent_model = self.parent_model.split("@")[0] if self.parent_model else None
if self.deferrable:
self.invoke_defer(context=context)
model, training_id, custom_job_id = self.hook.create_custom_container_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
container_uri=self.container_uri,
command=self.command,
model_serving_container_image_uri=self.model_serving_container_image_uri,
model_serving_container_predict_route=self.model_serving_container_predict_route,
model_serving_container_health_route=self.model_serving_container_health_route,
model_serving_container_command=self.model_serving_container_command,
model_serving_container_args=self.model_serving_container_args,
model_serving_container_environment_variables=self.model_serving_container_environment_variables,
model_serving_container_ports=self.model_serving_container_ports,
model_description=self.model_description,
model_instance_schema_uri=self.model_instance_schema_uri,
model_parameters_schema_uri=self.model_parameters_schema_uri,
model_prediction_schema_uri=self.model_prediction_schema_uri,
parent_model=self.parent_model,
is_default_version=self.is_default_version,
model_version_aliases=self.model_version_aliases,
model_version_description=self.model_version_description,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
staging_bucket=self.staging_bucket,
# RUN
dataset=Dataset(name=self.dataset_id) if self.dataset_id else None,
annotation_schema_uri=self.annotation_schema_uri,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
base_output_dir=self.base_output_dir,
service_account=self.service_account,
network=self.network,
bigquery_destination=self.bigquery_destination,
args=self.args,
environment_variables=self.environment_variables,
replica_count=self.replica_count,
machine_type=self.machine_type,
accelerator_type=self.accelerator_type,
accelerator_count=self.accelerator_count,
boot_disk_type=self.boot_disk_type,
boot_disk_size_gb=self.boot_disk_size_gb,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
tensorboard=self.tensorboard,
sync=True,
psc_interface_config=self.psc_interface_config,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
context["ti"].xcom_push(key="model_id", value=model_id)
VertexAIModelLink.persist(context=context, model_id=model_id)
else:
result = model # type: ignore
context["ti"].xcom_push(key="training_id", value=training_id)
context["ti"].xcom_push(key="custom_job_id", value=custom_job_id)
VertexAITrainingLink.persist(context=context, training_id=training_id)
return result
def invoke_defer(self, context: Context) -> None:
custom_container_training_job_obj: CustomContainerTrainingJob = self.hook.submit_custom_container_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
command=self.command,
container_uri=self.container_uri,
model_serving_container_image_uri=self.model_serving_container_image_uri,
model_serving_container_predict_route=self.model_serving_container_predict_route,
model_serving_container_health_route=self.model_serving_container_health_route,
model_serving_container_command=self.model_serving_container_command,
model_serving_container_args=self.model_serving_container_args,
model_serving_container_environment_variables=self.model_serving_container_environment_variables,
model_serving_container_ports=self.model_serving_container_ports,
model_description=self.model_description,
model_instance_schema_uri=self.model_instance_schema_uri,
model_parameters_schema_uri=self.model_parameters_schema_uri,
model_prediction_schema_uri=self.model_prediction_schema_uri,
parent_model=self.parent_model,
is_default_version=self.is_default_version,
model_version_aliases=self.model_version_aliases,
model_version_description=self.model_version_description,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
staging_bucket=self.staging_bucket,
# RUN
dataset=Dataset(name=self.dataset_id) if self.dataset_id else None,
annotation_schema_uri=self.annotation_schema_uri,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
base_output_dir=self.base_output_dir,
service_account=self.service_account,
network=self.network,
bigquery_destination=self.bigquery_destination,
args=self.args,
environment_variables=self.environment_variables,
replica_count=self.replica_count,
machine_type=self.machine_type,
accelerator_type=self.accelerator_type,
accelerator_count=self.accelerator_count,
boot_disk_type=self.boot_disk_type,
boot_disk_size_gb=self.boot_disk_size_gb,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
predefined_split_column_name=self.predefined_split_column_name,
timestamp_split_column_name=self.timestamp_split_column_name,
tensorboard=self.tensorboard,
psc_interface_config=self.psc_interface_config,
)
custom_container_training_job_obj.wait_for_resource_creation()
training_pipeline_id: str = custom_container_training_job_obj.name
context["ti"].xcom_push(key="training_id", value=training_pipeline_id)
VertexAITrainingLink.persist(context=context, training_id=training_pipeline_id)
self.defer(
trigger=CustomContainerTrainingJobTrigger(
conn_id=self.gcp_conn_id,
project_id=self.project_id,
location=self.region,
job_id=training_pipeline_id,
poll_interval=self.poll_interval,
impersonation_chain=self.impersonation_chain,
),
method_name="execute_complete",
)
|
CreateCustomContainerTrainingJobOperator
|
python
|
walkccc__LeetCode
|
solutions/2278. Percentage of Letter in String/2278.py
|
{
"start": 0,
"end": 117
}
|
class ____:
def percentageLetter(self, s: str, letter: str) -> int:
return 100 * s.count(letter) // len(s)
|
Solution
|
python
|
numpy__numpy
|
numpy/distutils/system_info.py
|
{
"start": 108157,
"end": 108306
}
|
class ____(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
|
gdk_x11_2_info
|
python
|
pallets__werkzeug
|
src/werkzeug/sansio/multipart.py
|
{
"start": 684,
"end": 743
}
|
class ____(Event):
pass
NEED_DATA = NeedData()
|
NeedData
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/bitly/provider.py
|
{
"start": 217,
"end": 436
}
|
class ____(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("profile_url")
def get_avatar_url(self):
return self.account.extra_data.get("profile_image")
|
BitlyAccount
|
python
|
vyperlang__vyper
|
tests/evm_backends/abi_contract.py
|
{
"start": 605,
"end": 783
}
|
class ____:
"""Represents a parsed log entry from a contract."""
address: ChecksumAddress
args: tuple
event: str
topics: list[bytes]
raw_data: bytes
|
ABILog
|
python
|
keon__algorithms
|
tests/test_matrix.py
|
{
"start": 7501,
"end": 7842
}
|
class ____(unittest.TestCase):
"""[summary]
Test for the file rotate_image.py
Arguments:
unittest {[type]} -- [description]
"""
def test_rotate_image(self):
self.assertEqual(rotate_image.rotate(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
[[7, 4, 1], [8, 5, 2], [9, 6, 3]])
|
TestRotateImage
|
python
|
etianen__django-reversion
|
reversion/admin.py
|
{
"start": 1218,
"end": 13397
}
|
class ____(admin.ModelAdmin):
object_history_template = "reversion/object_history.html"
change_list_template = "reversion/change_list.html"
revision_form_template = None
recover_list_template = None
recover_form_template = None
history_latest_first = False
def reversion_register(self, model, **kwargs):
"""Registers the model with reversion."""
register(model, **kwargs)
@contextmanager
def create_revision(self, request):
with create_revision():
set_user(request.user)
yield
# Revision helpers.
def _reversion_get_template_list(self, template_name):
opts = self.model._meta
return (
f"reversion/{opts.app_label}/{opts.object_name.lower()}/{template_name}",
f"reversion/{opts.app_label}/{template_name}",
"reversion/%s" % template_name,
)
def _reversion_order_version_queryset(self, queryset):
"""Applies the correct ordering to the given version queryset."""
if not self.history_latest_first:
queryset = queryset.order_by("pk")
return queryset
# Messages.
def log_addition(self, request, object, message):
change_message = message or _("Initial version.")
entry = super().log_addition(request, object, change_message)
if is_active():
set_comment(entry.get_change_message())
return entry
def log_change(self, request, object, message):
entry = super().log_change(request, object, message)
if is_active():
set_comment(entry.get_change_message())
return entry
# Auto-registration.
def _reversion_autoregister(self, model, follow):
if not is_registered(model):
for parent_model, field in model._meta.concrete_model._meta.parents.items():
follow += (field.name,)
self._reversion_autoregister(parent_model, ())
self.reversion_register(model, follow=follow)
def _reversion_introspect_inline_admin(self, inline):
inline_model = None
follow_field = None
fk_name = None
if issubclass(inline, GenericInlineModelAdmin):
inline_model = inline.model
ct_field = inline.ct_field
fk_name = inline.ct_fk_field
for field in self.model._meta.private_fields:
if (
isinstance(field, GenericRelation) and
field.remote_field.model == inline_model and
field.object_id_field_name == fk_name and
field.content_type_field_name == ct_field
):
follow_field = field.name
break
elif issubclass(inline, options.InlineModelAdmin):
inline_model = inline.model
fk_name = inline.fk_name
if not fk_name:
for field in inline_model._meta.get_fields():
if (
isinstance(field, (models.ForeignKey, models.OneToOneField)) and
issubclass(self.model, field.remote_field.model)
):
fk_name = field.name
break
if fk_name and not inline_model._meta.get_field(fk_name).remote_field.hidden:
field = inline_model._meta.get_field(fk_name)
accessor = field.remote_field.get_accessor_name()
follow_field = accessor
return inline_model, follow_field
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Automatically register models if required.
if not is_registered(self.model):
inline_fields = ()
for inline in self.inlines:
inline_model, follow_field = self._reversion_introspect_inline_admin(inline)
if inline_model:
self._reversion_autoregister(inline_model, ())
if follow_field:
inline_fields += (follow_field,)
self._reversion_autoregister(self.model, inline_fields)
def get_urls(self):
urls = super().get_urls()
admin_site = self.admin_site
opts = self.model._meta
info = opts.app_label, opts.model_name,
reversion_urls = [
re_path(r"^recover/$", admin_site.admin_view(self.recoverlist_view), name='%s_%s_recoverlist' % info),
re_path(r"^recover/(\d+)/$", admin_site.admin_view(self.recover_view), name='%s_%s_recover' % info),
re_path(
r"^([^/]+)/history/(\d+)/$",
admin_site.admin_view(self.revision_view),
name='%s_%s_revision' % info,
),
]
return reversion_urls + urls
# Views.
def add_view(self, request, form_url='', extra_context=None):
with self.create_revision(request):
return super().add_view(request, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
with self.create_revision(request):
return super().change_view(request, object_id, form_url, extra_context)
def _reversion_revisionform_view(self, request, version, template_name, extra_context=None):
# Check that database transactions are supported.
if not connections[version.db].features.uses_savepoints:
raise ImproperlyConfigured("Cannot use VersionAdmin with a database that does not support savepoints.")
# Determine whether to mute signals based on request method
if request.method == "GET":
# For GET requests (viewing revisions), mute all Django model signals
# to prevent unintended side effects from signal handlers
signal_context = mute_signals(pre_save, post_save, pre_delete, post_delete, m2m_changed)
else:
# For POST requests (actual reverts), allow signals to fire normally
signal_context = nullcontext()
# Run the view.
try:
with transaction.atomic(using=version.db), signal_context:
# Revert the revision.
version.revision.revert(delete=True)
# Run the normal changeform view.
with self.create_revision(request):
response = self.changeform_view(request, quote(version.object_id), request.path, extra_context)
# Decide on whether the keep the changes.
if request.method == "POST" and response.status_code == 302:
set_comment(_("Reverted to previous version, saved on %(datetime)s") % {
"datetime": localize(template_localtime(version.revision.date_created)),
})
elif response.status_code == 200:
response.template_name = template_name # Set the template name to the correct template.
response.render() # Eagerly render the response, so it's using the latest version.
raise _RollBackRevisionView(response) # Raise exception to undo the transaction and revision.
else:
raise RevertError(_("Could not load %(object_repr)s version - not found") % {
"object_repr": version.object_repr,
})
except (RevertError, models.ProtectedError) as ex:
opts = self.model._meta
messages.error(request, force_str(ex))
return redirect(f"{self.admin_site.name}:{opts.app_label}_{opts.model_name}_changelist")
except _RollBackRevisionView as ex:
return ex.response
return response
def recover_view(self, request, version_id, extra_context=None):
"""Displays a form that can recover a deleted model."""
# The revisionform view will check for change permission (via changeform_view),
# but we also need to check for add permissions here.
if not self.has_add_permission(request):
raise PermissionDenied
# Render the recover view.
version = get_object_or_404(Version, pk=version_id)
context = {
"title": _("Recover %(name)s") % {"name": version.object_repr},
"recover": True,
}
context.update(extra_context or {})
return self._reversion_revisionform_view(
request,
version,
self.recover_form_template or self._reversion_get_template_list("recover_form.html"),
context,
)
def revision_view(self, request, object_id, version_id, extra_context=None):
"""Displays the contents of the given revision."""
object_id = unquote(object_id) # Underscores in primary key get quoted to "_5F"
version = get_object_or_404(Version, pk=version_id, object_id=object_id)
context = {
"title": _("Revert %(name)s") % {"name": version.object_repr},
"revert": True,
}
context.update(extra_context or {})
return self._reversion_revisionform_view(
request,
version,
self.revision_form_template or self._reversion_get_template_list("revision_form.html"),
context,
)
def changelist_view(self, request, extra_context=None):
with self.create_revision(request):
context = {
"has_change_permission": self.has_change_permission(request),
}
context.update(extra_context or {})
return super().changelist_view(request, context)
def recoverlist_view(self, request, extra_context=None):
"""Displays a deleted model to allow recovery."""
# Check if user has change and add permissions for model
if not self.has_change_permission(request) or not self.has_add_permission(request):
raise PermissionDenied
model = self.model
opts = model._meta
deleted = self._reversion_order_version_queryset(
Version.objects.get_deleted(self.model).select_related("revision")
)
# Set the app name.
request.current_app = self.admin_site.name
# Get the rest of the context.
context = dict(
self.admin_site.each_context(request),
opts=opts,
app_label=opts.app_label,
module_name=capfirst(opts.verbose_name),
title=_("Recover deleted %(name)s") % {"name": force_str(opts.verbose_name_plural)},
deleted=deleted,
)
context.update(extra_context or {})
return render(
request,
self.recover_list_template or self._reversion_get_template_list("recover_list.html"),
context,
)
def history_view(self, request, object_id, extra_context=None):
"""Renders the history view."""
# Check if user has view or change permissions for model
if hasattr(self, 'has_view_or_change_permission'): # for Django >= 2.1
if not self.has_view_or_change_permission(request):
raise PermissionDenied
else:
if not self.has_change_permission(request):
raise PermissionDenied
opts = self.model._meta
action_list = [
{
"revision": version.revision,
"url": reverse(
f"{self.admin_site.name}:{opts.app_label}_{opts.model_name}_revision",
args=(quote(version.object_id), version.id)
),
}
for version
in self._reversion_order_version_queryset(Version.objects.get_for_object_reference(
self.model,
unquote(object_id), # Underscores in primary key get quoted to "_5F"
).select_related("revision__user"))
]
# Compile the context.
context = {"action_list": action_list}
context.update(extra_context or {})
return super().history_view(request, object_id, context)
|
VersionAdmin
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/test_organization_users.py
|
{
"start": 136,
"end": 2174
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-users"
def setUp(self) -> None:
self.owner_user = self.create_user("foo@localhost", username="foo")
self.user_2 = self.create_user("bar@localhost", username="bar")
self.user_3 = self.create_user("unrelated@localhost", username="unrelated")
self.org = self.create_organization(owner=self.owner_user)
self.org.member_set.create(user_id=self.user_2.id)
self.team_1 = self.create_team(
organization=self.org, members=[self.owner_user, self.user_2]
)
self.team_2 = self.create_team(organization=self.org, members=[self.user_2])
self.team_3 = self.create_team(organization=self.org, members=[self.user_3])
self.project_1 = self.create_project(teams=[self.team_1])
self.project_2 = self.create_project(teams=[self.team_2])
self.project_3 = self.create_project(teams=[self.team_3])
self.login_as(user=self.user_2)
def test_simple(self) -> None:
projects = [self.project_1, self.project_2]
projects_ids = [p.id for p in projects]
response = self.get_success_response(self.org.slug, project=projects_ids)
expected = serialize(
list(
self.org.member_set.filter(
user_id__in=[self.owner_user.id, self.user_2.id]
).order_by("user_email")
),
self.user_2,
OrganizationMemberWithProjectsSerializer(projects=projects),
)
assert response.data == expected
projects = [self.project_2]
projects_ids = [p.id for p in projects]
response = self.get_success_response(self.org.slug, project=projects_ids)
expected = serialize(
list(self.org.member_set.filter(user_id__in=[self.user_2.id]).order_by("id")),
self.user_2,
OrganizationMemberWithProjectsSerializer(projects=projects),
)
assert response.data == expected
|
OrganizationMemberListTest
|
python
|
django__django
|
tests/inspectdb/models.py
|
{
"start": 4241,
"end": 4725
}
|
class ____(models.Model):
field1 = models.IntegerField()
field2 = models.CharField(max_length=10)
from_field = models.IntegerField(db_column="from")
non_unique = models.IntegerField(db_column="non__unique_column")
non_unique_0 = models.IntegerField(db_column="non_unique__column")
class Meta:
unique_together = [
("field1", "field2"),
("from_field", "field1"),
("non_unique", "non_unique_0"),
]
|
UniqueTogether
|
python
|
arrow-py__arrow
|
arrow/locales.py
|
{
"start": 103878,
"end": 105378
}
|
class ____(Locale):
names = ["lv", "lv-lv"]
past = "pirms {0}"
future = "pēc {0}"
and_word = "un"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "tagad",
"second": "sekundes",
"seconds": "{0} sekundēm",
"minute": "minūtes",
"minutes": "{0} minūtēm",
"hour": "stundas",
"hours": "{0} stundām",
"day": "dienas",
"days": "{0} dienām",
"week": "nedēļas",
"weeks": "{0} nedēļām",
"month": "mēneša",
"months": "{0} mēnešiem",
"year": "gada",
"years": "{0} gadiem",
}
month_names = [
"",
"janvāris",
"februāris",
"marts",
"aprīlis",
"maijs",
"jūnijs",
"jūlijs",
"augusts",
"septembris",
"oktobris",
"novembris",
"decembris",
]
month_abbreviations = [
"",
"jan",
"feb",
"marts",
"apr",
"maijs",
"jūnijs",
"jūlijs",
"aug",
"sept",
"okt",
"nov",
"dec",
]
day_names = [
"",
"pirmdiena",
"otrdiena",
"trešdiena",
"ceturtdiena",
"piektdiena",
"sestdiena",
"svētdiena",
]
day_abbreviations = [
"",
"pi",
"ot",
"tr",
"ce",
"pi",
"se",
"sv",
]
|
LatvianLocale
|
python
|
ray-project__ray
|
python/ray/tune/experimental/output.py
|
{
"start": 24809,
"end": 27752
}
|
class ____(ProgressReporter):
_heartbeat_threshold = AirVerbosity.DEFAULT
_wrap_headers = False
_intermediate_result_verbosity = AirVerbosity.VERBOSE
_start_end_verbosity = AirVerbosity.DEFAULT
_addressing_tmpl = "Trial {}"
def __init__(
self,
verbosity: AirVerbosity,
num_samples: int = 0,
metric: Optional[str] = None,
mode: Optional[str] = None,
config: Optional[Dict] = None,
progress_metrics: Optional[Union[List[str], List[Dict[str, str]]]] = None,
):
self._num_samples = num_samples
self._metric = metric
self._mode = mode
# will be populated when first result comes in.
self._inferred_metric = None
self._inferred_params = _infer_params(config or {})
super(TuneReporterBase, self).__init__(
verbosity=verbosity, progress_metrics=progress_metrics
)
def setup(
self,
start_time: Optional[float] = None,
total_samples: Optional[int] = None,
**kwargs,
):
super().setup(start_time=start_time)
self._num_samples = total_samples
def _get_overall_trial_progress_str(self, trials):
result = " | ".join(
[
f"{len(trials)} {status}"
for status, trials in _get_trials_by_state(trials).items()
]
)
return f"Trial status: {result}"
# TODO: Return a more structured type to share code with Jupyter flow.
def _get_heartbeat(
self, trials, *sys_args, force_full_output: bool = False
) -> Tuple[List[str], _TrialTableData]:
result = list()
# Trial status: 1 RUNNING | 7 PENDING
result.append(self._get_overall_trial_progress_str(trials))
# Current time: 2023-02-24 12:35:39 (running for 00:00:37.40)
result.append(self._time_heartbeat_str)
# Logical resource usage: 8.0/64 CPUs, 0/0 GPUs
result.extend(sys_args)
# Current best trial: TRIAL NAME, metrics: {...}, parameters: {...}
current_best_trial, metric = _current_best_trial(
trials, self._metric, self._mode
)
if current_best_trial:
result.append(_best_trial_str(current_best_trial, metric))
# Now populating the trial table data.
if not self._inferred_metric:
# try inferring again.
self._inferred_metric = _infer_user_metrics(trials)
all_metrics = list(DEFAULT_COLUMNS.keys()) + self._inferred_metric
trial_table_data = _get_trial_table_data(
trials,
param_keys=self._inferred_params,
metric_keys=all_metrics,
all_rows=force_full_output,
wrap_headers=self._wrap_headers,
)
return result, trial_table_data
def _print_heartbeat(self, trials, *sys_args, force: bool = False):
raise NotImplementedError
|
TuneReporterBase
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py
|
{
"start": 1208,
"end": 1342
}
|
class ____:
"""Represents the details of a DAG."""
id: str | None = None
team_name: str | None = None
@dataclass
|
DagDetails
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarTuple14.py
|
{
"start": 1186,
"end": 2104
}
|
class ____:
@classmethod
def method1(cls, *shape: *Ts) -> tuple[*Ts]: ...
def func1(target: Callable[[*Ts], int]) -> tuple[*Ts]: ...
def func2(a: int, b: str, /) -> int: ...
def func3(action: Callable[[int, str], int]):
v1 = func1(func2)
reveal_type(v1, expected_text="tuple[int, str]")
v2 = func1(action)
reveal_type(v2, expected_text="tuple[int, str]")
def func4(*args: *tuple[int, str]): ...
func4(1, "")
# This should generate an error.
func4()
# This should generate an error.
func4(1)
# This should generate an error.
func4(1, "", "")
def func5(*args: *tuple[int, *tuple[str, ...], int]): ...
func5(1, 1)
func5(1, "", 1)
func5(1, "", "", 1)
# This should generate an error.
func5()
# This should generate an error.
func5(1)
# This should generate an error.
func5("")
# This should generate an error.
func5(1, "")
# This should generate an error.
func5(1, "", "")
|
ClassA
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.