language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/llm/tests/batch/cpu/processor/test_processor_base.py | {
"start": 12342,
"end": 16860
} | class ____:
"""Tests for preprocess_map_kwargs and postprocess_map_kwargs."""
def test_map_kwargs_stored_in_processor(self):
"""Test that map kwargs are correctly stored in Processor."""
preprocess_kwargs = {"num_cpus": 0.5}
postprocess_kwargs = {"num_cpus": 0.25, "memory": 1024}
processor = Processor(
config=ProcessorConfig(batch_size=64),
stages=[],
preprocess=lambda row: {"val": row["id"]},
postprocess=lambda row: {"result": row["val"]},
preprocess_map_kwargs=preprocess_kwargs,
postprocess_map_kwargs=postprocess_kwargs,
)
assert processor.preprocess_map_kwargs == preprocess_kwargs
assert processor.postprocess_map_kwargs == postprocess_kwargs
def test_map_kwargs_defaults_to_empty_dict(self):
"""Test that map kwargs default to empty dict when None."""
processor = Processor(
config=ProcessorConfig(batch_size=64),
stages=[],
)
assert processor.preprocess_map_kwargs == {}
assert processor.postprocess_map_kwargs == {}
def test_map_kwargs_passthrough_via_builder(self):
"""Test that map kwargs are passed through ProcessorBuilder."""
def build_processor_simple(
config: ProcessorConfig,
preprocess=None,
postprocess=None,
preprocess_map_kwargs=None,
postprocess_map_kwargs=None,
) -> Processor:
return Processor(
config,
[],
preprocess=preprocess,
postprocess=postprocess,
preprocess_map_kwargs=preprocess_map_kwargs,
postprocess_map_kwargs=postprocess_map_kwargs,
)
ProcessorBuilder.clear_registry()
ProcessorBuilder.register(DummyProcessorConfig, build_processor_simple)
config = DummyProcessorConfig(batch_size=64)
# Test through ProcessorBuilder which is called by build_llm_processor
processor = ProcessorBuilder.build(
config,
preprocess=lambda row: {"val": row["id"]},
postprocess=lambda row: {"result": row["val"]},
preprocess_map_kwargs={"num_cpus": 0.5},
postprocess_map_kwargs={"num_cpus": 0.25},
)
assert processor.preprocess_map_kwargs == {"num_cpus": 0.5}
assert processor.postprocess_map_kwargs == {"num_cpus": 0.25}
def test_builder_kwargs_conflict_with_map_kwargs(self):
"""Test that builder_kwargs validation rejects map kwargs."""
# Test the validation that build_llm_processor calls
with pytest.raises(ValueError, match="builder_kwargs cannot contain"):
ProcessorBuilder.validate_builder_kwargs(
{"preprocess_map_kwargs": {"num_cpus": 0.5}}
)
with pytest.raises(ValueError, match="builder_kwargs cannot contain"):
ProcessorBuilder.validate_builder_kwargs(
{"postprocess_map_kwargs": {"num_cpus": 0.5}}
)
def test_end_to_end_with_map_kwargs(self):
"""Test end-to-end execution with map kwargs."""
processor = Processor(
config=ProcessorConfig(batch_size=64),
stages=[],
preprocess=lambda row: {"val": row["id"] * 2},
postprocess=lambda row: {"result": row["val"] + 1, "id": row["id"]},
preprocess_map_kwargs={"num_cpus": 0.5},
postprocess_map_kwargs={"num_cpus": 0.25},
)
ds = ray.data.range(5)
result = processor(ds).take_all()
for row in result:
# Verify the computation: val = id * 2, result = val + 1
assert row["result"] == row["id"] * 2 + 1
def test_backward_compatibility_without_map_kwargs(self):
"""Test that existing code without map kwargs still works."""
processor = Processor(
config=ProcessorConfig(batch_size=64),
stages=[],
preprocess=lambda row: {"val": row["id"]},
postprocess=lambda row: {"result": row["val"]},
)
ds = ray.data.range(5)
result = processor(ds).take_all()
# Sort results by result value since order is not guaranteed
result = sorted(result, key=lambda x: x["result"])
for i, row in enumerate(result):
assert row["result"] == i
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestMapKwargs |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 19826,
"end": 26195
} | class ____(
fixtures.RemovesEvents, _fixtures.FixtureTest, AssertsCompiledSQL
):
__dialect__ = "default"
def test_listen_on_mapper_mapper_event_fn(self, registry):
from sqlalchemy.orm import mapper
m1 = Mock()
with expect_deprecated(
r"The `sqlalchemy.orm.mapper\(\)` symbol is deprecated and "
"will be removed"
):
@event.listens_for(mapper, "before_configured")
def go():
m1()
@registry.mapped
class MyClass:
__tablename__ = "t1"
id = Column(Integer, primary_key=True)
registry.configure()
eq_(m1.mock_calls, [call()])
def test_listen_on_mapper_instrumentation_event_fn(self, registry):
from sqlalchemy.orm import mapper
m1 = Mock()
with expect_deprecated(
r"The `sqlalchemy.orm.mapper\(\)` symbol is deprecated and "
"will be removed"
):
@event.listens_for(mapper, "init")
def go(target, args, kwargs):
m1(target, args, kwargs)
@registry.mapped
class MyClass:
__tablename__ = "t1"
id = Column(Integer, primary_key=True)
mc = MyClass(id=5)
eq_(m1.mock_calls, [call(mc, (), {"id": 5})])
def test_we_couldnt_remove_mapper_yet(self):
"""test that the mapper() function is present but raises an
informative error when used.
The function itself was to be removed as of 2.0, however we forgot
to mark deprecated the use of the function as an event target,
so it needs to stay around for another cycle at least.
"""
class MyClass:
pass
t1 = Table("t1", MetaData(), Column("id", Integer, primary_key=True))
from sqlalchemy.orm import mapper
with assertions.expect_raises_message(
sa_exc.InvalidRequestError,
r"The 'sqlalchemy.orm.mapper\(\)' function is removed as of "
"SQLAlchemy 2.0.",
):
mapper(MyClass, t1)
def test_deferred_scalar_loader_name_change(self):
class Foo:
pass
def myloader(*arg, **kw):
pass
instrumentation.register_class(Foo)
manager = instrumentation.manager_of_class(Foo)
with testing.expect_deprecated(
"The ClassManager.deferred_scalar_loader attribute is now named "
"expired_attribute_loader"
):
manager.deferred_scalar_loader = myloader
is_(manager.expired_attribute_loader, myloader)
with testing.expect_deprecated(
"The ClassManager.deferred_scalar_loader attribute is now named "
"expired_attribute_loader"
):
is_(manager.deferred_scalar_loader, myloader)
def test_comparable_column(self):
users, User = self.tables.users, self.classes.User
class MyComparator(sa.orm.properties.ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
# lower case comparison
return func.lower(self.__clause_element__()) == func.lower(
other
)
def intersects(self, other):
# non-standard comparator
return self.__clause_element__().op("&=")(other)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"name": sa.orm.column_property(
users.c.name, comparator_factory=MyComparator
)
},
)
assert_raises_message(
AttributeError,
"Neither 'InstrumentedAttribute' object nor "
"'MyComparator' object associated with User.name has "
"an attribute 'nonexistent'",
getattr,
User.name,
"nonexistent",
)
eq_(
str(
(User.name == "ed").compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"lower(users.name) = lower(:lower_1)",
)
eq_(
str(
(User.name.intersects("ed")).compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"users.name &= :name_1",
)
def test_add_property(self):
users = self.tables.users
assert_col = []
class User(ComparableEntity):
def _get_name(self):
assert_col.append(("get", self._name))
return self._name
def _set_name(self, name):
assert_col.append(("set", name))
self._name = name
name = property(_get_name, _set_name)
m = self.mapper_registry.map_imperatively(User, users)
m.add_property("_name", deferred(users.c.name))
m.add_property("name", synonym("_name"))
sess = fixture_session()
assert sess.get(User, 7)
u = sess.query(User).filter_by(name="jack").one()
def go():
eq_(u.name, "jack")
eq_(assert_col, [("get", "jack")], str(assert_col))
self.sql_count_(1, go)
@testing.variation("prop_type", ["relationship", "col_prop"])
def test_prop_replacement_warns(self, prop_type: testing.Variation):
users, User = self.tables.users, self.classes.User
addresses, Address = self.tables.addresses, self.classes.Address
m = self.mapper(
User,
users,
properties={
"foo": column_property(users.c.name),
"addresses": relationship(Address),
},
)
self.mapper(Address, addresses)
if prop_type.relationship:
key = "addresses"
new_prop = relationship(Address)
elif prop_type.col_prop:
key = "foo"
new_prop = column_property(users.c.name)
else:
prop_type.fail()
with expect_deprecated(
f"Property User.{key} on Mapper|User|users being replaced "
f"with new property User.{key}; the old property will "
"be discarded",
):
m.add_property(key, new_prop)
| DeprecatedMapperTest |
python | apache__airflow | providers/apache/hive/src/airflow/providers/apache/hive/transfers/vertica_to_hive.py | {
"start": 1296,
"end": 5590
} | class ____(BaseOperator):
"""
Moves data from Vertica to Hive.
The operator runs your query against Vertica, stores the file
locally before loading it into a Hive table. If the ``create``
or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the Vertica database. (templated)
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:param create: whether to create the table if it doesn't exist
:param recreate: whether to drop and recreate the table at every execution
:param partition: target partition as a dict of partition columns
and values. (templated)
:param delimiter: field delimiter in the file
:param vertica_conn_id: source Vertica connection
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param hive_auth: optional authentication option passed for the Hive connection
"""
template_fields: Sequence[str] = ("sql", "partition", "hive_table")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#b4e0ff"
def __init__(
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: dict | None = None,
delimiter: str = chr(1),
vertica_conn_id: str = "vertica_default",
hive_cli_conn_id: str = "hive_cli_default",
hive_auth: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = str(delimiter)
self.vertica_conn_id = vertica_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.hive_auth = hive_auth
@classmethod
def type_map(cls, vertica_type):
"""
Manually hack Vertica-Python type mapping.
The stock datatype.py does not provide the full type mapping access.
Reference:
https://github.com/uber/vertica-python/blob/master/vertica_python/vertica/column.py
"""
type_map = {
5: "BOOLEAN",
6: "INT",
7: "FLOAT",
8: "STRING",
9: "STRING",
16: "FLOAT",
}
return type_map.get(vertica_type, "STRING")
def execute(self, context: Context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id, auth=self.hive_auth)
vertica = VerticaHook(vertica_conn_id=self.vertica_conn_id)
self.log.info("Dumping Vertica query results to local file")
conn = vertica.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
with NamedTemporaryFile(mode="w", encoding="utf-8") as f:
csv_writer = csv.writer(f, delimiter=self.delimiter)
field_dict = {}
for col_count, field in enumerate(cursor.description, start=1):
col_position = f"Column{col_count}"
field_dict[col_position if field[0] == "" else field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor.iterate())
f.flush()
cursor.close()
conn.close()
self.log.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
)
| VerticaToHiveOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-intercom/components.py | {
"start": 9548,
"end": 10334
} | class ____(DefaultErrorHandler):
"""
Custom error handler that triggers a reset on HTTP 500 errors.
"""
def interpret_response(self, response_or_exception: Optional[Union[requests.Response, Exception]]) -> ErrorResolution:
if isinstance(response_or_exception, requests.Response) and response_or_exception.status_code == 500:
reset_signal = ResetCursorSignal()
reset_signal.trigger_reset()
return ErrorResolution(
response_action=ResponseAction.RETRY,
failure_type=FailureType.transient_error,
error_message="HTTP 500 encountered. Triggering reset to retry from the beginning...",
)
return super().interpret_response(response_or_exception)
| IntercomErrorHandler |
python | huggingface__transformers | src/transformers/models/flaubert/modeling_flaubert.py | {
"start": 29624,
"end": 31776
} | class ____(PreTrainedModel):
config: FlaubertConfig
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
init.normal_(module.weight, mean=0, std=self.config.init_std)
if module.bias is not None:
init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
if isinstance(module, FlaubertModel) and self.config.sinusoidal_embeddings:
init.copy_(
module.position_embeddings.weight,
create_sinusoidal_embeddings(
self.config.max_position_embeddings,
self.config.emb_dim,
out=torch.empty_like(module.position_embeddings.weight),
),
)
@auto_docstring
| FlaubertPreTrainedModel |
python | readthedocs__readthedocs.org | readthedocs/audit/serializers.py | {
"start": 631,
"end": 769
} | class ____(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ["id", "slug"]
| OrganizationSerializer |
python | getsentry__sentry-python | sentry_sdk/profiler/continuous_profiler.py | {
"start": 5344,
"end": 5471
} | class ____:
active: bool = True
def stop(self):
# type: () -> None
self.active = False
| ContinuousProfile |
python | huggingface__transformers | src/transformers/models/edgetam_video/modeling_edgetam_video.py | {
"start": 2221,
"end": 3530
} | class ____(nn.LayerNorm):
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, *, eps=1e-6, data_format="channels_last", **kwargs):
super().__init__(normalized_shape, eps=eps, **kwargs)
if data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError(f"Unsupported data format: {data_format}")
self.data_format = data_format
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels)
"""
if self.data_format == "channels_first":
features = features.permute(0, 2, 3, 1)
features = super().forward(features)
features = features.permute(0, 3, 1, 2)
else:
features = super().forward(features)
return features
# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
| EdgeTamVideoLayerNorm |
python | gevent__gevent | src/gevent/_util.py | {
"start": 5100,
"end": 5691
} | class ____(object):
"""
A non-data descriptor used just like @property. The
difference is the function value is assigned to the instance
dict the first time it is accessed and then the function is never
called again.
Contrast with `readproperty`.
"""
def __init__(self, func):
self.data = (func, func.__name__)
update_wrapper(self, func)
def __get__(self, inst, class_):
if inst is None:
return self
func, name = self.data
value = func(inst)
inst.__dict__[name] = value
return value
| Lazy |
python | doocs__leetcode | solution/0300-0399/0316.Remove Duplicate Letters/Solution.py | {
"start": 0,
"end": 413
} | class ____:
def removeDuplicateLetters(self, s: str) -> str:
last = {c: i for i, c in enumerate(s)}
stk = []
vis = set()
for i, c in enumerate(s):
if c in vis:
continue
while stk and stk[-1] > c and last[stk[-1]] > i:
vis.remove(stk.pop())
stk.append(c)
vis.add(c)
return ''.join(stk)
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/dotnet.py | {
"start": 20409,
"end": 21059
} | class ____(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
| CSharpAspxLexer |
python | urllib3__urllib3 | src/urllib3/util/retry.py | {
"start": 651,
"end": 822
} | class ____(typing.NamedTuple):
method: str | None
url: str | None
error: Exception | None
status: int | None
redirect_location: str | None
| RequestHistory |
python | celery__celery | t/smoke/tests/stamping/workers/legacy.py | {
"start": 198,
"end": 1491
} | class ____(CeleryWorkerContainer):
@property
def client(self) -> Any:
return self
@classmethod
def version(cls) -> str:
return "4.4.7" # Last version of 4.x
@classmethod
def log_level(cls) -> str:
return "INFO"
@classmethod
def worker_name(cls) -> str:
return "celery_legacy_tests_worker"
@classmethod
def worker_queue(cls) -> str:
return "celery_legacy_tests_queue"
celery_legacy_worker_image = build(
path=".",
dockerfile="t/smoke/workers/docker/pypi",
tag="t/smoke/worker:legacy",
buildargs=LegacyWorkerContainer.buildargs(),
)
celery_legacy_worker_container = container(
image="{celery_legacy_worker_image.id}",
environment=fxtr("default_worker_env"),
network="{default_pytest_celery_network.name}",
volumes={"{default_worker_volume.name}": defaults.DEFAULT_WORKER_VOLUME},
wrapper_class=LegacyWorkerContainer,
timeout=defaults.DEFAULT_WORKER_CONTAINER_TIMEOUT,
)
@pytest.fixture
def celery_legacy_worker(
celery_legacy_worker_container: LegacyWorkerContainer,
celery_setup_app: Celery,
) -> CeleryTestWorker:
worker = CeleryTestWorker(celery_legacy_worker_container, app=celery_setup_app)
yield worker
worker.teardown()
| LegacyWorkerContainer |
python | run-llama__llama_index | llama-index-integrations/graph_stores/llama-index-graph-stores-memgraph/llama_index/graph_stores/memgraph/kg_base.py | {
"start": 1030,
"end": 5662
} | class ____(GraphStore):
def __init__(
self,
username: str,
password: str,
url: str,
database: str = "memgraph",
node_label: str = "Entity",
**kwargs: Any,
) -> None:
try:
import neo4j
except ImportError:
raise ImportError("Please install neo4j: pip install neo4j")
self.node_label = node_label
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
self.schema = ""
# verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Memgraph database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Memgraph database. "
"Please ensure that the username and password are correct"
)
# set schema
self.refresh_schema()
# create constraint
self.query(
"""
CREATE CONSTRAINT ON (n:%s) ASSERT n.id IS UNIQUE;
"""
% (self.node_label)
)
# create index
self.query(
"""
CREATE INDEX ON :%s(id);
"""
% (self.node_label)
)
@property
def client(self) -> Any:
return self._driver
def query(self, query: str, param_map: Optional[Dict[str, Any]] = {}) -> Any:
"""Execute a Cypher query."""
with self._driver.session(database=self._database) as session:
result = session.run(query, param_map)
return [record.data() for record in result]
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = f"""
MATCH (n1:{self.node_label})-[r]->(n2:{self.node_label})
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
with self._driver.session(database=self._database) as session:
data = session.run(query, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2
) -> Dict[str, List[List[str]]]:
"""Get flat relation map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = (
f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() """
f"""{"WHERE n1.id IN $subjs" if subjs else ""} """
"UNWIND relationships(p) AS rel "
"WITH n1.id AS subj, collect([type(rel), endNode(rel).id]) AS rels "
"RETURN subj, rels"
)
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet."""
query = f"""
MERGE (n1:`{self.node_label}` {{id:$subj}})
MERGE (n2:`{self.node_label}` {{id:$obj}})
MERGE (n1)-[:`{rel.replace(" ", "_").upper()}`]->(n2)
"""
self.query(query, {"subj": subj, "obj": obj})
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet."""
query = f"""
MATCH (n1:`{self.node_label}`)-[r:`{rel}`]->(n2:`{self.node_label}`)
WHERE n1.id = $subj AND n2.id = $obj
DELETE r
"""
self.query(query, {"subj": subj, "obj": obj})
def refresh_schema(self) -> None:
"""
Refreshes the Memgraph graph schema information.
"""
node_properties = self.query(node_properties_query)
relationships_properties = self.query(rel_properties_query)
relationships = self.query(rel_query)
self.schema = f"""
Node properties are the following:
{node_properties}
Relationship properties are the following:
{relationships_properties}
The relationships are the following:
{relationships}
"""
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the MemgraphGraph store."""
if self.schema and not refresh:
return self.schema
self.refresh_schema()
logger.debug(f"get_schema() schema:\n{self.schema}")
return self.schema
| MemgraphGraphStore |
python | huggingface__transformers | src/transformers/models/informer/modeling_informer.py | {
"start": 31781,
"end": 37264
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: InformerConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = InformerAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
if config.attention_type == "prob":
self.self_attn = InformerProbSparseAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
sampling_factor=config.sampling_factor,
is_decoder=True,
layer_idx=layer_idx,
)
else:
self.self_attn = InformerAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
| InformerDecoderLayer |
python | great-expectations__great_expectations | great_expectations/core/config_substitutor.py | {
"start": 545,
"end": 19604
} | class ____:
"""
Responsible for encapsulating all logic around $VARIABLE (or ${VARIABLE}) substitution.
While the config variables utilized for substitution are provided at runtime, all the
behavior necessary to actually update config objects with their appropriate runtime values
should be defined herein.
"""
AWS_PATTERN = r"^secret\|arn:aws:secretsmanager:([a-z\-0-9]+):([0-9]{12}):secret:([a-zA-Z0-9\/_\+=\.@\-]+)" # noqa: E501 # FIXME CoP
AWS_SSM_PATTERN = (
r"^secret\|arn:aws:ssm:([a-z\-0-9]+):([0-9]{12}):parameter\/([a-zA-Z0-9\/_\+=\.@\-]+)"
)
GCP_PATTERN = r"^secret\|projects\/([a-z0-9\_\-]{6,30})\/secrets/([a-zA-Z\_\-]{1,255})"
AZURE_PATTERN = (
r"^secret\|(https:\/\/[a-zA-Z0-9\-]{3,24}\.vault\.azure\.net)\/secrets\/([0-9a-zA-Z-]+)"
)
def __init__(self) -> None:
# Using the @lru_cache decorator on method calls can create memory leaks - an attr is preferred here. # noqa: E501 # FIXME CoP
# Ref: https://stackoverflow.com/a/68550238
self._secret_store_cache = lru_cache(maxsize=None)(self._substitute_value_from_secret_store)
def substitute_all_config_variables(
self,
data: Any,
replace_variables_dict: Dict[str, str],
dollar_sign_escape_string: str = r"\$",
) -> Any:
"""
Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like
config object for their values.
The method traverses the dictionary recursively.
:param data:
:param replace_variables_dict:
:param dollar_sign_escape_string: a reserved character for specifying parameters
:return: a dictionary with all the variables replaced with their values
"""
if isinstance(data, BaseYamlConfig):
data = (data.__class__.get_schema_class())().dump(data)
if isinstance(data, (dict, OrderedDict)):
return {
k: self.substitute_all_config_variables(v, replace_variables_dict)
for k, v in data.items()
}
elif isinstance(data, list):
return [self.substitute_all_config_variables(v, replace_variables_dict) for v in data]
return self.substitute_config_variable(
data, replace_variables_dict, dollar_sign_escape_string
)
def substitute_config_variable(
self,
template_str: str,
config_variables_dict: Dict[str, str],
dollar_sign_escape_string: str = r"\$",
) -> Optional[str]:
"""
This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,
returns a string where the pattern is replaced with the value of SOME_VARIABLE,
otherwise returns the string unchanged. These patterns are case sensitive. There can be multiple
patterns in a string, e.g. all 3 will be substituted in the following:
$SOME_VARIABLE${some_OTHER_variable}$another_variable
If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.
If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).
If it is not found there, the input string is returned as is.
If the value to substitute is not a string, it is returned as-is.
If the value to substitute begins with dollar_sign_escape_string it is not substituted.
If the value starts with the keyword `secret|`, it tries to apply secret store substitution.
:param template_str: a string that might or might not be of the form ${SOME_VARIABLE}
or $SOME_VARIABLE
:param config_variables_dict: a dictionary of config variables. It is loaded from the
config variables store (by default, "uncommitted/config_variables.yml file)
:param dollar_sign_escape_string: a string that will be used in place of a `$` when substitution
is not desired.
:return: a string with values substituted, or the same object if template_str is not a string.
""" # noqa: E501 # FIXME CoP
if template_str is None:
return template_str
# 1. Make substitutions for non-escaped patterns
try:
match = re.finditer(TEMPLATE_STR_REGEX, template_str)
except TypeError:
# If the value is not a string (e.g., a boolean), we should return it as is
return template_str
for m in match:
# Match either the first group e.g. ${Variable} or the second e.g. $Variable
config_variable_name = m.group(1) or m.group(2)
config_variable_value = config_variables_dict.get(config_variable_name)
if config_variable_value is not None:
if not isinstance(config_variable_value, str):
return config_variable_value
template_str = template_str.replace(m.group(), config_variable_value)
else:
raise gx_exceptions.MissingConfigVariableError( # noqa: TRY003 # FIXME CoP
f"""\n\nUnable to find a match for config substitution variable: `{config_variable_name}`.
Please add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.
See https://docs.greatexpectations.io/docs/core/configure_project_settings/configure_credentials""", # noqa: E501 # FIXME CoP
missing_config_variable=config_variable_name,
)
# 2. Replace the "$"'s that had been escaped
template_str = template_str.replace(dollar_sign_escape_string, "$")
template_str = self._secret_store_cache(template_str)
return template_str
def _substitute_value_from_secret_store(self, value: str) -> str:
"""
This method takes a value, tries to parse the value to fetch a secret from a secret manager
and returns the secret's value only if the input value is a string and contains one of the following patterns:
- AWS Secrets Manager: the input value starts with ``secret|arn:aws:secretsmanager``
- GCP Secret Manager: the input value matches the following regex ``^secret\\|projects\\/[a-z0-9\\_\\-]{6,30}\\/secrets``
- Azure Key Vault: the input value matches the following regex ``^secret\\|https:\\/\\/[a-zA-Z0-9\\-]{3,24}\\.vault\\.azure\\.net``
Input value examples:
- AWS Secrets Manager: ``secret|arn:aws:secretsmanager:eu-west-3:123456789012:secret:my_secret``
- GCP Secret Manager: ``secret|projects/gcp_project_id/secrets/my_secret``
- Azure Key Vault: ``secret|https://vault-name.vault.azure.net/secrets/my-secret``
:param value: a string that might or might not start with `secret|`
:return: a string with the value substituted by the secret from the secret store,
or the same object if value is not a string.
""" # noqa: E501 # FIXME CoP
if isinstance(value, str):
if re.match(self.AWS_PATTERN, value):
return self._substitute_value_from_aws_secrets_manager(value)
elif re.match(self.AWS_SSM_PATTERN, value):
return self._substitute_value_from_aws_ssm(value)
elif re.match(self.GCP_PATTERN, value):
return self._substitute_value_from_gcp_secret_manager(value)
elif re.match(self.AZURE_PATTERN, value):
return self._substitute_value_from_azure_keyvault(value)
return value
def _substitute_value_from_aws_secrets_manager(self, value: str) -> str:
"""
This methods uses a boto3 client and the secretsmanager service to try to retrieve the secret value
from the elements it is able to parse from the input value.
- value: string with pattern ``secret|arn:aws:secretsmanager:${region_name}:${account_id}:secret:${secret_name}``
optional : after the value above, a secret version can be added ``:${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- region_name: `AWS region used by the secrets manager <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_
- account_id: `Account ID for the AWS account used by the secrets manager <https://docs.aws.amazon.com/en_us/IAM/latest/UserGuide/console_account-alias.html>`_
This value is currently not used.
- secret_name: Name of the secret
- secret_version: UUID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that starts with ``secret|arn:aws:secretsmanager``
:return: a string with the value substituted by the secret from the AWS Secrets Manager store
:raises: ImportError, ValueError
""" # noqa: E501 # FIXME CoP
regex = re.compile(
rf"{self.AWS_PATTERN}(?:\:([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}))?(?:\|([^\|]+))?$"
)
if not aws.boto3:
logger.error(
"boto3 is not installed, please install great_expectations with aws_secrets extra > " # noqa: E501 # FIXME CoP
"pip install great_expectations[aws_secrets]"
)
raise ImportError("Could not import boto3") # noqa: TRY003 # FIXME CoP
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}") # noqa: TRY003 # FIXME CoP
region_name = matches.group(1)
secret_name = matches.group(3)
secret_version = matches.group(4)
secret_key = matches.group(5)
# Create a Secrets Manager client
session = aws.boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=region_name)
if secret_version:
secret_response = client.get_secret_value(
SecretId=secret_name, VersionId=secret_version
)
else:
secret_response = client.get_secret_value(SecretId=secret_name)
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated. # noqa: E501 # FIXME CoP
if "SecretString" in secret_response:
secret = secret_response["SecretString"]
else:
secret = base64.b64decode(secret_response["SecretBinary"]).decode("utf-8")
if secret_key:
secret = json.loads(secret)[secret_key]
return secret
def _substitute_value_from_aws_ssm(self, value: str) -> str:
"""
This methods uses a boto3 client and the systemmanager service to try to retrieve the secret value
from the elements it is able to parse from the input value.
- value: string with pattern ``secret|arn:aws:ssm:${region_name}:${account_id}:parameter:${secret_name}``
optional : after the value above, a secret version can be added ``:${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- region_name: `AWS region used by the System Manager Parameter Store <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_
- account_id: `Account ID for the AWS account used by the parameter store <https://docs.aws.amazon.com/en_us/IAM/latest/UserGuide/console_account-alias.html>`_
This value is currently not used.
- secret_name: Name of the secret
- secret_version: UUID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that starts with ``secret|arn:aws:ssm``
:return: a string with the value substituted by the secret from the AWS Secrets Manager store
:raises: ImportError, ValueError
""" # noqa: E501 # FIXME CoP
regex = re.compile(
rf"{self.AWS_SSM_PATTERN}(?:\:([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}))?(?:\|([^\|]+))?$"
)
if not aws.boto3:
logger.error(
"boto3 is not installed, please install great_expectations with aws_secrets extra > " # noqa: E501 # FIXME CoP
"pip install great_expectations[aws_secrets]"
)
raise ImportError("Could not import boto3") # noqa: TRY003 # FIXME CoP
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}") # noqa: TRY003 # FIXME CoP
region_name = matches.group(1)
secret_name = matches.group(3)
secret_version = matches.group(4)
secret_key = matches.group(5)
# Create a Secrets Manager client
session = aws.boto3.session.Session()
client = session.client(service_name="ssm", region_name=region_name)
if secret_version:
secret_response = client.get_parameter(
Name=secret_name, WithDecryption=True, Version=secret_version
)
else:
secret_response = client.get_parameter(Name=secret_name, WithDecryption=True)
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated. # noqa: E501 # FIXME CoP
secret = secret_response["Parameter"]["Value"]
if secret_key:
secret = json.loads(secret_response["Parameter"]["Value"])[secret_key]
return secret
def _substitute_value_from_gcp_secret_manager(self, value: str) -> str:
"""
This methods uses a google.cloud.secretmanager.SecretManagerServiceClient to try to retrieve the secret value
from the elements it is able to parse from the input value.
value: string with pattern ``secret|projects/${project_id}/secrets/${secret_name}``
optional : after the value above, a secret version can be added ``/versions/${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- project_id: `Project ID of the GCP project on which the secret manager is implemented <https://cloud.google.com/resource-manager/docs/creating-managing-projects#before_you_begin>`_
- secret_name: Name of the secret
- secret_version: ID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that matches the following regex ``^secret|projects/[a-z0-9_-]{6,30}/secrets``
:return: a string with the value substituted by the secret from the GCP Secret Manager store
:raises: ImportError, ValueError
""" # noqa: E501 # FIXME CoP
regex = re.compile(rf"{self.GCP_PATTERN}(?:\/versions\/([a-z0-9]+))?(?:\|([^\|]+))?$")
if not google.secretmanager:
logger.error(
"secretmanager is not installed, please install great_expectations with gcp extra > " # noqa: E501 # FIXME CoP
"pip install great_expectations[gcp]"
)
raise ImportError("Could not import secretmanager from google.cloud") # noqa: TRY003 # FIXME CoP
client = google.secretmanager.SecretManagerServiceClient()
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}") # noqa: TRY003 # FIXME CoP
project_id = matches.group(1)
secret_id = matches.group(2)
secret_version = matches.group(3)
secret_key = matches.group(4)
if not secret_version:
secret_version = "latest"
name = f"projects/{project_id}/secrets/{secret_id}/versions/{secret_version}"
try:
secret = client.access_secret_version(name=name)._pb.payload.data.decode("utf-8")
except AttributeError:
secret = client.access_secret_version(name=name).payload.data.decode(
"utf-8"
) # for google-cloud-secret-manager < 2.0.0
if secret_key:
secret = json.loads(secret)[secret_key]
return secret
def _substitute_value_from_azure_keyvault(self, value: str) -> str:
"""
This methods uses a azure.identity.DefaultAzureCredential to authenticate to the Azure SDK for Python
and a azure.keyvault.secrets.SecretClient to try to retrieve the secret value from the elements
it is able to parse from the input value.
- value: string with pattern ``secret|https://${vault_name}.vault.azure.net/secrets/${secret_name}``
optional : after the value above, a secret version can be added ``/${secret_version}``
optional : after the value above, a secret key can be added ``|${secret_key}``
- vault_name: `Vault name of the secret manager <https://docs.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates#objects-identifiers-and-versioning>`_
- secret_name: Name of the secret
- secret_version: ID of the version of the secret
- secret_key: Only if the secret's data is a JSON string, which key of the dict should be retrieve
:param value: a string that matches the following regex ``^secret|https://[a-zA-Z0-9-]{3,24}.vault.azure.net``
:return: a string with the value substituted by the secret from the Azure Key Vault store
:raises: ImportError, ValueError
""" # noqa: E501 # FIXME CoP
regex = re.compile(rf"{self.AZURE_PATTERN}(?:\/([a-f0-9]{32}))?(?:\|([^\|]+))?$")
if not azure.SecretClient: # type: ignore[truthy-function] # False if NotImported
logger.error(
"SecretClient is not installed, please install great_expectations with azure_secrets extra > " # noqa: E501 # FIXME CoP
"pip install great_expectations[azure_secrets]"
)
raise ImportError("Could not import SecretClient from azure.keyvault.secrets") # noqa: TRY003 # FIXME CoP
matches = regex.match(value)
if not matches:
raise ValueError(f"Could not match the value with regex {regex}") # noqa: TRY003 # FIXME CoP
keyvault_uri = matches.group(1)
secret_name = matches.group(2)
secret_version = matches.group(3)
secret_key = matches.group(4)
credential = azure.DefaultAzureCredential()
client = azure.SecretClient(vault_url=keyvault_uri, credential=credential)
secret = client.get_secret(name=secret_name, version=secret_version).value
if secret_key:
secret = json.loads(secret)[secret_key] # type: ignore[arg-type] # secret could be None
return secret # type: ignore[return-value] # secret could be None
| _ConfigurationSubstitutor |
python | apache__airflow | providers/google/src/airflow/providers/google/common/hooks/base_google.py | {
"start": 32008,
"end": 33491
} | class ____(BaseHook):
"""GoogleBaseAsyncHook inherits from BaseHook class, run on the trigger worker."""
sync_hook_class: Any = None
def __init__(self, **kwargs: Any) -> None:
# add default value to gcp_conn_id
if "gcp_conn_id" not in kwargs:
kwargs["gcp_conn_id"] = "google_cloud_default"
self._hook_kwargs = kwargs
self._sync_hook = None
async def get_sync_hook(self) -> Any:
"""Sync version of the Google Cloud Hook makes blocking calls in ``__init__``; don't inherit it."""
if not self._sync_hook:
self._sync_hook = await sync_to_async(self.sync_hook_class)(**self._hook_kwargs)
return self._sync_hook
async def get_token(self, *, session: ClientSession | None = None) -> _CredentialsToken:
"""Return a Token instance for use in [gcloud-aio](https://talkiq.github.io/gcloud-aio/) clients."""
sync_hook = await self.get_sync_hook()
return await _CredentialsToken.from_hook(sync_hook, session=session)
async def service_file_as_context(self) -> Any:
"""
Provide a Google Cloud credentials for Application Default Credentials (ADC) strategy support.
This is the async equivalent of the non-async GoogleBaseHook's `provide_gcp_credential_file_as_context` method.
"""
sync_hook = await self.get_sync_hook()
return await sync_to_async(sync_hook.provide_gcp_credential_file_as_context)()
| GoogleBaseAsyncHook |
python | pallets__jinja | tests/test_filters.py | {
"start": 604,
"end": 32741
} | class ____:
def test_filter_calling(self, env):
rv = env.call_filter("sum", [1, 2, 3])
assert rv == 6
def test_capitalize(self, env):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == "Foo bar"
def test_center(self, env):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == " foo "
def test_default(self, env):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given="yes") == "no|False|no|yes"
@pytest.mark.parametrize(
"args,expect",
(
("", "[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]"),
("true", "[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]"),
('by="value"', "[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]"),
("reverse=true", "[('c', 2), ('b', 1), ('AB', 3), ('aa', 0)]"),
),
)
def test_dictsort(self, env, args, expect):
t = env.from_string(f"{{{{ foo|dictsort({args}) }}}}")
out = t.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == expect
def test_batch(self, env):
tmpl = env.from_string("{{ foo|batch(3)|list }}|{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]"
)
def test_slice(self, env):
tmpl = env.from_string("{{ foo|slice(3)|list }}|{{ foo|slice(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == (
"[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]"
)
def test_escape(self, env):
tmpl = env.from_string("""{{ '<">&'|escape }}""")
out = tmpl.render()
assert out == "<">&"
@pytest.mark.parametrize(
("chars", "expect"), [(None, "..stays.."), (".", " ..stays"), (" .", "stays")]
)
def test_trim(self, env, chars, expect):
tmpl = env.from_string("{{ foo|trim(chars) }}")
out = tmpl.render(foo=" ..stays..", chars=chars)
assert out == expect
def test_striptags(self, env):
tmpl = env.from_string("""{{ foo|striptags }}""")
out = tmpl.render(
foo=' <p>just a small \n <a href="#">'
"example</a> link</p>\n<p>to a webpage</p> "
"<!-- <p>and some commented stuff</p> -->"
)
assert out == "just a small example link to a webpage"
def test_filesizeformat(self, env):
tmpl = env.from_string(
"{{ 100|filesizeformat }}|"
"{{ 1000|filesizeformat }}|"
"{{ 1000000|filesizeformat }}|"
"{{ 1000000000|filesizeformat }}|"
"{{ 1000000000000|filesizeformat }}|"
"{{ 100|filesizeformat(true) }}|"
"{{ 1000|filesizeformat(true) }}|"
"{{ 1000000|filesizeformat(true) }}|"
"{{ 1000000000|filesizeformat(true) }}|"
"{{ 1000000000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|"
"1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB"
)
def test_filesizeformat_issue59(self, env):
tmpl = env.from_string(
"{{ 300|filesizeformat }}|"
"{{ 3000|filesizeformat }}|"
"{{ 3000000|filesizeformat }}|"
"{{ 3000000000|filesizeformat }}|"
"{{ 3000000000000|filesizeformat }}|"
"{{ 300|filesizeformat(true) }}|"
"{{ 3000|filesizeformat(true) }}|"
"{{ 3000000|filesizeformat(true) }}"
)
out = tmpl.render()
assert out == (
"300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|2.9 KiB|2.9 MiB"
)
def test_first(self, env):
tmpl = env.from_string("{{ foo|first }}")
out = tmpl.render(foo=list(range(10)))
assert out == "0"
@pytest.mark.parametrize(
("value", "expect"), (("42", "42.0"), ("abc", "0.0"), ("32.32", "32.32"))
)
def test_float(self, env, value, expect):
t = env.from_string("{{ value|float }}")
assert t.render(value=value) == expect
def test_float_default(self, env):
t = env.from_string("{{ value|float(default=1.0) }}")
assert t.render(value="abc") == "1.0"
def test_format(self, env):
tmpl = env.from_string("{{ '%s|%s'|format('a', 'b') }}")
out = tmpl.render()
assert out == "a|b"
@staticmethod
def _test_indent_multiline_template(env, markup=False):
text = "\n".join(["", "foo bar", '"baz"', ""])
if markup:
text = Markup(text)
t = env.from_string("{{ foo|indent(2, false, false) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, false, true) }}")
assert t.render(foo=text) == '\n foo bar\n "baz"\n '
t = env.from_string("{{ foo|indent(2, true, false) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n'
t = env.from_string("{{ foo|indent(2, true, true) }}")
assert t.render(foo=text) == ' \n foo bar\n "baz"\n '
def test_indent(self, env):
self._test_indent_multiline_template(env)
t = env.from_string('{{ "jinja"|indent }}')
assert t.render() == "jinja"
t = env.from_string('{{ "jinja"|indent(first=true) }}')
assert t.render() == " jinja"
t = env.from_string('{{ "jinja"|indent(blank=true) }}')
assert t.render() == "jinja"
def test_indent_markup_input(self, env):
"""
Tests cases where the filter input is a Markup type
"""
self._test_indent_multiline_template(env, markup=True)
def test_indent_width_string(self, env):
t = env.from_string("{{ 'jinja\nflask'|indent(width='>>> ', first=True) }}")
assert t.render() == ">>> jinja\n>>> flask"
@pytest.mark.parametrize(
("value", "expect"),
(
("42", "42"),
("abc", "0"),
("32.32", "32"),
("12345678901234567890", "12345678901234567890"),
("1e10000", "0"),
),
)
def test_int(self, env, value, expect):
t = env.from_string("{{ value|int }}")
assert t.render(value=value) == expect
@pytest.mark.parametrize(
("value", "base", "expect"),
(("0x4d32", 16, "19762"), ("011", 8, "9"), ("0x33Z", 16, "0")),
)
def test_int_base(self, env, value, base, expect):
t = env.from_string("{{ value|int(base=base) }}")
assert t.render(value=value, base=base) == expect
def test_int_default(self, env):
t = env.from_string("{{ value|int(default=1) }}")
assert t.render(value="abc") == "1"
def test_int_special_method(self, env):
class IntIsh:
def __int__(self):
return 42
t = env.from_string("{{ value|int }}")
assert t.render(value=IntIsh()) == "42"
def test_join(self, env):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == "1|2|3"
env2 = Environment(autoescape=True)
tmpl = env2.from_string('{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == "<foo><span>foo</span>"
def test_join_attribute(self, env):
User = namedtuple("User", "username")
tmpl = env.from_string("""{{ users|join(', ', 'username') }}""")
assert tmpl.render(users=map(User, ["foo", "bar"])) == "foo, bar"
def test_last(self, env):
tmpl = env.from_string("""{{ foo|last }}""")
out = tmpl.render(foo=list(range(10)))
assert out == "9"
def test_length(self, env):
tmpl = env.from_string("""{{ "hello world"|length }}""")
out = tmpl.render()
assert out == "11"
def test_lower(self, env):
tmpl = env.from_string("""{{ "FOO"|lower }}""")
out = tmpl.render()
assert out == "foo"
def test_items(self, env):
d = {i: c for i, c in enumerate("abc")}
tmpl = env.from_string("""{{ d|items|list }}""")
out = tmpl.render(d=d)
assert out == "[(0, 'a'), (1, 'b'), (2, 'c')]"
def test_items_undefined(self, env):
tmpl = env.from_string("""{{ d|items|list }}""")
out = tmpl.render()
assert out == "[]"
def test_pprint(self, env):
from pprint import pformat
tmpl = env.from_string("""{{ data|pprint }}""")
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self, env, request):
# restore the random state when the test ends
state = random.getstate()
request.addfinalizer(lambda: random.setstate(state))
# generate the random values from a known seed
random.seed("jinja")
expected = [random.choice("1234567890") for _ in range(10)]
# check that the random sequence is generated again by a template
# ensures that filter result is not constant folded
random.seed("jinja")
t = env.from_string('{{ "1234567890"|random }}')
for value in expected:
assert t.render() == value
def test_reverse(self, env):
tmpl = env.from_string(
"{{ 'foobar'|reverse|join }}|{{ [1, 2, 3]|reverse|list }}"
)
assert tmpl.render() == "raboof|[3, 2, 1]"
def test_string(self, env):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string("""{{ obj|string }}""")
assert tmpl.render(obj=x) == str(x)
def test_title(self, env):
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "foo's bar"|title }}""")
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string("""{{ "foo bar"|title }}""")
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string("""{{ "f bar f"|title }}""")
assert tmpl.render() == "F Bar F"
tmpl = env.from_string("""{{ "foo-bar"|title }}""")
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string("""{{ "foo\tbar"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "FOO\tBAR"|title }}""")
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string("""{{ "foo (bar)"|title }}""")
assert tmpl.render() == "Foo (Bar)"
tmpl = env.from_string("""{{ "foo {bar}"|title }}""")
assert tmpl.render() == "Foo {Bar}"
tmpl = env.from_string("""{{ "foo [bar]"|title }}""")
assert tmpl.render() == "Foo [Bar]"
tmpl = env.from_string("""{{ "foo <bar>"|title }}""")
assert tmpl.render() == "Foo <Bar>"
class Foo:
def __str__(self):
return "foo-bar"
tmpl = env.from_string("""{{ data|title }}""")
out = tmpl.render(data=Foo())
assert out == "Foo-Bar"
def test_truncate(self, env):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
"{{ smalldata|truncate(15) }}"
)
out = tmpl.render(data="foobar baz bar" * 1000, smalldata="foobar baz bar")
assert out == "foobar baz b>>>|foobar baz>>>|foobar baz bar"
def test_truncate_very_short(self, env):
tmpl = env.from_string(
'{{ "foo bar baz"|truncate(9) }}|{{ "foo bar baz"|truncate(9, true) }}'
)
out = tmpl.render()
assert out == "foo bar baz|foo bar baz"
def test_truncate_end_length(self, env):
tmpl = env.from_string('{{ "Joel is a slug"|truncate(7, true) }}')
out = tmpl.render()
assert out == "Joel..."
def test_upper(self, env):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == "FOO"
def test_urlize(self, env):
tmpl = env.from_string('{{ "foo example.org bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="https://example.org" rel="noopener">example.org</a> bar'
)
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/" rel="noopener">'
"http://www.example.com/</a> bar"
)
tmpl = env.from_string('{{ "foo mailto:email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
tmpl = env.from_string('{{ "foo email@example.com bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="mailto:email@example.com">email@example.com</a> bar'
)
def test_urlize_rel_policy(self):
env = Environment()
env.policies["urlize.rel"] = None
tmpl = env.from_string('{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == (
'foo <a href="http://www.example.com/">http://www.example.com/</a> bar'
)
def test_urlize_target_parameter(self, env):
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize(target="_blank") }}'
)
assert (
tmpl.render()
== 'foo <a href="http://www.example.com/" rel="noopener" target="_blank">'
"http://www.example.com/</a> bar"
)
def test_urlize_extra_schemes_parameter(self, env):
tmpl = env.from_string(
'{{ "foo tel:+1-514-555-1234 ftp://localhost bar"|'
'urlize(extra_schemes=["tel:", "ftp:"]) }}'
)
assert tmpl.render() == (
'foo <a href="tel:+1-514-555-1234" rel="noopener">'
'tel:+1-514-555-1234</a> <a href="ftp://localhost" rel="noopener">'
"ftp://localhost</a> bar"
)
def test_wordcount(self, env):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == "3"
strict_env = Environment(undefined=StrictUndefined)
t = strict_env.from_string("{{ s|wordcount }}")
with pytest.raises(UndefinedError):
t.render()
def test_block(self, env):
tmpl = env.from_string("{% filter lower|escape %}<HEHE>{% endfilter %}")
assert tmpl.render() == "<hehe>"
def test_chaining(self, env):
tmpl = env.from_string("""{{ ['<foo>', '<bar>']|first|upper|escape }}""")
assert tmpl.render() == "<FOO>"
def test_sum(self, env):
tmpl = env.from_string("""{{ [1, 2, 3, 4, 5, 6]|sum }}""")
assert tmpl.render() == "21"
def test_sum_attributes(self, env):
tmpl = env.from_string("""{{ values|sum('value') }}""")
assert tmpl.render(values=[{"value": 23}, {"value": 1}, {"value": 18}]) == "42"
def test_sum_attributes_nested(self, env):
tmpl = env.from_string("""{{ values|sum('real.value') }}""")
assert (
tmpl.render(
values=[
{"real": {"value": 23}},
{"real": {"value": 1}},
{"real": {"value": 18}},
]
)
== "42"
)
def test_sum_attributes_tuple(self, env):
tmpl = env.from_string("""{{ values.items()|sum('1') }}""")
assert tmpl.render(values={"foo": 23, "bar": 1, "baz": 18}) == "42"
def test_abs(self, env):
tmpl = env.from_string("""{{ -1|abs }}|{{ 1|abs }}""")
assert tmpl.render() == "1|1", tmpl.render()
def test_round_positive(self, env):
tmpl = env.from_string(
"{{ 2.7|round }}|{{ 2.1|round }}|"
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}"
)
assert tmpl.render() == "3.0|2.0|2.123|3.0", tmpl.render()
def test_round_negative(self, env):
tmpl = env.from_string(
"{{ 21.3|round(-1)}}|"
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}"
)
assert tmpl.render() == "20.0|30.0|20.0", tmpl.render()
def test_xmlattr(self, env):
tmpl = env.from_string(
"{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}"
)
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
@pytest.mark.parametrize("sep", ("\t", "\n", "\f", " ", "/", ">", "="))
def test_xmlattr_key_invalid(self, env: Environment, sep: str) -> None:
with pytest.raises(ValueError, match="Invalid character"):
env.from_string("{{ {key: 'my_class'}|xmlattr }}").render(
key=f"class{sep}onclick=alert(1)"
)
def test_sort1(self, env):
tmpl = env.from_string("{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}")
assert tmpl.render() == "[1, 2, 3]|[3, 2, 1]"
def test_sort2(self, env):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == "AbcD"
def test_sort3(self, env):
tmpl = env.from_string("""{{ ['foo', 'Bar', 'blah']|sort }}""")
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value')|join }}""")
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == "1234"
def test_sort5(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value.0')|join }}""")
assert tmpl.render(items=map(Magic, [[3], [2], [4], [1]])) == "[1][2][3][4]"
def test_sort6(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value1,value2')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(2,2)(2,5)(3,1)"
)
def test_sort7(self, env):
tmpl = env.from_string("""{{ items|sort(attribute='value2,value1')|join }}""")
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]), [(3, 1), (2, 2), (2, 1), (2, 5)]
)
)
== "(2,1)(3,1)(2,2)(2,5)"
)
def test_sort8(self, env):
tmpl = env.from_string(
"""{{ items|sort(attribute='value1.0,value2.0')|join }}"""
)
assert (
tmpl.render(
items=map(
lambda x: Magic2(x[0], x[1]),
[([3], [1]), ([2], [2]), ([2], [1]), ([2], [5])],
)
)
== "([2],[1])([2],[2])([2],[5])([3],[1])"
)
def test_unique(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique) }}')
assert t.render() == "bA"
def test_unique_case_sensitive(self, env):
t = env.from_string('{{ "".join(["b", "A", "a", "b"]|unique(true)) }}')
assert t.render() == "bAa"
def test_unique_attribute(self, env):
t = env.from_string("{{ items|unique(attribute='value')|join }}")
assert t.render(items=map(Magic, [3, 2, 4, 1, 2])) == "3241"
@pytest.mark.parametrize(
"source,expect",
(
('{{ ["a", "B"]|min }}', "a"),
('{{ ["a", "B"]|min(case_sensitive=true) }}', "B"),
("{{ []|min }}", ""),
('{{ ["a", "B"]|max }}', "B"),
('{{ ["a", "B"]|max(case_sensitive=true) }}', "a"),
("{{ []|max }}", ""),
),
)
def test_min_max(self, env, source, expect):
t = env.from_string(source)
assert t.render() == expect
@pytest.mark.parametrize(("name", "expect"), [("min", "1"), ("max", "9")])
def test_min_max_attribute(self, env, name, expect):
t = env.from_string("{{ items|" + name + '(attribute="value") }}')
assert t.render(items=map(Magic, [5, 1, 9])) == expect
def test_groupby(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render().split("|") == ["1: 1, 2: 1, 1", "2: 2, 3", "3: 3, 4", ""]
def test_groupby_tuple_index(self, env):
tmpl = env.from_string(
"""
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render() == "a:1:2|b:1|"
def test_groupby_multidot(self, env):
Date = namedtuple("Date", "day,month,year")
Article = namedtuple("Article", "title,date")
articles = [
Article("aha", Date(1, 1, 1970)),
Article("interesting", Date(2, 1, 1970)),
Article("really?", Date(3, 1, 1970)),
Article("totally not", Date(1, 1, 1971)),
]
tmpl = env.from_string(
"""
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}"""
)
assert tmpl.render(articles=articles).split("|") == [
"1970[aha][interesting][really?]",
"1971[totally not]",
"",
]
def test_groupby_default(self, env):
tmpl = env.from_string(
"{% for city, items in users|groupby('city', default='NY') %}"
"{{ city }}: {{ items|map(attribute='name')|join(', ') }}\n"
"{% endfor %}"
)
out = tmpl.render(
users=[
{"name": "emma", "city": "NY"},
{"name": "smith", "city": "WA"},
{"name": "john"},
]
)
assert out == "NY: emma, john\nWA: smith\n"
@pytest.mark.parametrize(
("case_sensitive", "expect"),
[
(False, "a: 1, 3\nb: 2\n"),
(True, "A: 3\na: 1\nb: 2\n"),
],
)
def test_groupby_case(self, env, case_sensitive, expect):
tmpl = env.from_string(
"{% for k, vs in data|groupby('k', case_sensitive=cs) %}"
"{{ k }}: {{ vs|join(', ', attribute='v') }}\n"
"{% endfor %}"
)
out = tmpl.render(
data=[{"k": "a", "v": 1}, {"k": "b", "v": 2}, {"k": "A", "v": 3}],
cs=case_sensitive,
)
assert out == expect
def test_filtertag(self, env):
tmpl = env.from_string(
"{% filter upper|replace('FOO', 'foo') %}foobar{% endfilter %}"
)
assert tmpl.render() == "fooBAR"
def test_replace(self, env):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string="<foo>") == "<f4242>"
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string="<foo>") == "42foo>"
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup("foo")) == "f>x<>x<"
def test_forceescape(self, env):
tmpl = env.from_string("{{ x|forceescape }}")
assert tmpl.render(x=Markup("<div />")) == "<div />"
def test_safe(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == "<div>foo</div>"
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == "<div>foo</div>"
@pytest.mark.parametrize(
("value", "expect"),
[
("Hello, world!", "Hello%2C%20world%21"),
("Hello, world\u203d", "Hello%2C%20world%E2%80%BD"),
({"f": 1}, "f=1"),
([("f", 1), ("z", 2)], "f=1&z=2"),
({"\u203d": 1}, "%E2%80%BD=1"),
({0: 1}, "0=1"),
([("a b/c", "a b/c")], "a+b%2Fc=a+b%2Fc"),
("a b/c", "a%20b/c"),
],
)
def test_urlencode(self, value, expect):
e = Environment(autoescape=True)
t = e.from_string("{{ value|urlencode }}")
assert t.render(value=value) == expect
def test_simple_map(self, env):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
assert tmpl.render() == "6"
def test_map_sum(self, env):
tmpl = env.from_string('{{ [[1,2], [3], [4,5,6]]|map("sum")|list }}')
assert tmpl.render() == "[3, 3, 15]"
def test_attribute_map(self, env):
User = namedtuple("User", "name")
env = Environment()
users = [
User("john"),
User("jane"),
User("mike"),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == "john|jane|mike"
def test_empty_map(self, env):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == "[]"
def test_map_default(self, env):
Fullname = namedtuple("Fullname", "firstname,lastname")
Firstname = namedtuple("Firstname", "firstname")
env = Environment()
tmpl = env.from_string(
'{{ users|map(attribute="lastname", default="smith")|join(", ") }}'
)
test_list = env.from_string(
'{{ users|map(attribute="lastname", default=["smith","x"])|join(", ") }}'
)
test_str = env.from_string(
'{{ users|map(attribute="lastname", default="")|join(", ") }}'
)
users = [
Fullname("john", "lennon"),
Fullname("jane", "edwards"),
Fullname("jon", None),
Firstname("mike"),
]
assert tmpl.render(users=users) == "lennon, edwards, None, smith"
assert test_list.render(users=users) == "lennon, edwards, None, ['smith', 'x']"
assert test_str.render(users=users) == "lennon, edwards, None, "
def test_simple_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
assert tmpl.render() == "1|3|5"
def test_bool_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}')
assert tmpl.render() == "1|2|3|4|5"
def test_simple_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
assert tmpl.render() == "2|4"
def test_bool_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}')
assert tmpl.render() == "None|False|0"
def test_simple_select_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|selectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|jane"
def test_simple_reject_attr(self, env):
User = namedtuple("User", "name,is_active")
env = Environment()
users = [
User("john", True),
User("jane", True),
User("mike", False),
]
tmpl = env.from_string(
'{{ users|rejectattr("is_active")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "mike"
def test_func_select_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|selectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "john|mike"
def test_func_reject_attr(self, env):
User = namedtuple("User", "id,name")
env = Environment()
users = [
User(1, "john"),
User(2, "jane"),
User(3, "mike"),
]
tmpl = env.from_string(
'{{ users|rejectattr("id", "odd")|map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == "jane"
def test_json_dump(self):
env = Environment(autoescape=True)
t = env.from_string("{{ x|tojson }}")
assert t.render(x={"foo": "bar"}) == '{"foo": "bar"}'
assert t.render(x="\"ba&r'") == r'"\"ba\u0026r\u0027"'
assert t.render(x="<bar>") == r'"\u003cbar\u003e"'
def my_dumps(value, **options):
assert options == {"foo": "bar"}
return "42"
env.policies["json.dumps_function"] = my_dumps
env.policies["json.dumps_kwargs"] = {"foo": "bar"}
assert t.render(x=23) == "42"
def test_wordwrap(self, env):
env.newline_sequence = "\n"
t = env.from_string("{{ s|wordwrap(20) }}")
result = t.render(s="Hello!\nThis is Jinja saying something.")
assert result == "Hello!\nThis is Jinja saying\nsomething."
def test_filter_undefined(self, env):
with pytest.raises(TemplateAssertionError, match="No filter named 'f'"):
env.from_string("{{ var|f }}")
def test_filter_undefined_in_if(self, env):
t = env.from_string("{%- if x is defined -%}{{ x|f }}{%- else -%}x{% endif %}")
assert t.render() == "x"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_elif(self, env):
t = env.from_string(
"{%- if x is defined -%}{{ x }}{%- elif y is defined -%}"
"{{ y|f }}{%- else -%}foo{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(y=42)
def test_filter_undefined_in_else(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{{ x|f }}{%- endif -%}"
)
assert t.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=42)
def test_filter_undefined_in_nested_if(self, env):
t = env.from_string(
"{%- if x is not defined -%}foo{%- else -%}{%- if y "
"is defined -%}{{ y|f }}{%- endif -%}{{ x }}{%- endif -%}"
)
assert t.render() == "foo"
assert t.render(x=42) == "42"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t.render(x=24, y=42)
def test_filter_undefined_in_condexpr(self, env):
t1 = env.from_string("{{ x|f if x is defined else 'foo' }}")
t2 = env.from_string("{{ 'foo' if x is not defined else x|f }}")
assert t1.render() == t2.render() == "foo"
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t1.render(x=42)
with pytest.raises(TemplateRuntimeError, match="No filter named 'f'"):
t2.render(x=42)
| TestFilter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultFunction2.py | {
"start": 1270,
"end": 1383
} | class ____(Generic[T6, T7, T8]):
def __new__(cls, x: T7, /) -> Self: ...
def method1(self) -> T7: ...
| ClassC |
python | sympy__sympy | sympy/solvers/ode/single.py | {
"start": 97535,
"end": 101899
} | class ____(SingleODESolver):
r"""
Gives solution of the Bessel differential equation
.. math :: x^2 \frac{d^2y}{dx^2} + x \frac{dy}{dx} y(x) + (x^2-n^2) y(x)
if `n` is integer then the solution is of the form ``Eq(f(x), C0 besselj(n,x)
+ C1 bessely(n,x))`` as both the solutions are linearly independent else if
`n` is a fraction then the solution is of the form ``Eq(f(x), C0 besselj(n,x)
+ C1 besselj(-n,x))`` which can also transform into ``Eq(f(x), C0 besselj(n,x)
+ C1 bessely(n,x))``.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Symbol
>>> v = Symbol('v', positive=True)
>>> from sympy import dsolve, Function
>>> f = Function('f')
>>> y = f(x)
>>> genform = x**2*y.diff(x, 2) + x*y.diff(x) + (x**2 - v**2)*y
>>> dsolve(genform)
Eq(f(x), C1*besselj(v, x) + C2*bessely(v, x))
References
==========
https://math24.net/bessel-differential-equation.html
"""
hint = "2nd_linear_bessel"
has_integral = False
def _matches(self):
eq = self.ode_problem.eq_high_order_free
f = self.ode_problem.func
order = self.ode_problem.order
x = self.ode_problem.sym
df = f.diff(x)
a = Wild('a', exclude=[f,df])
b = Wild('b', exclude=[x, f,df])
a4 = Wild('a4', exclude=[x,f,df])
b4 = Wild('b4', exclude=[x,f,df])
c4 = Wild('c4', exclude=[x,f,df])
d4 = Wild('d4', exclude=[x,f,df])
a3 = Wild('a3', exclude=[f, df, f.diff(x, 2)])
b3 = Wild('b3', exclude=[f, df, f.diff(x, 2)])
c3 = Wild('c3', exclude=[f, df, f.diff(x, 2)])
deq = a3*(f.diff(x, 2)) + b3*df + c3*f
r = collect(eq,
[f.diff(x, 2), df, f]).match(deq)
if order == 2 and r:
if not all(r[key].is_polynomial() for key in r):
n, d = eq.as_numer_denom()
eq = expand(n)
r = collect(eq,
[f.diff(x, 2), df, f]).match(deq)
if r and r[a3] != 0:
# leading coeff of f(x).diff(x, 2)
coeff = factor(r[a3]).match(a4*(x-b)**b4)
if coeff:
# if coeff[b4] = 0 means constant coefficient
if coeff[b4] == 0:
return False
point = coeff[b]
else:
return False
if point:
r[a3] = simplify(r[a3].subs(x, x+point))
r[b3] = simplify(r[b3].subs(x, x+point))
r[c3] = simplify(r[c3].subs(x, x+point))
# making a3 in the form of x**2
r[a3] = cancel(r[a3]/(coeff[a4]*(x)**(-2+coeff[b4])))
r[b3] = cancel(r[b3]/(coeff[a4]*(x)**(-2+coeff[b4])))
r[c3] = cancel(r[c3]/(coeff[a4]*(x)**(-2+coeff[b4])))
# checking if b3 is of form c*(x-b)
coeff1 = factor(r[b3]).match(a4*(x))
if coeff1 is None:
return False
# c3 maybe of very complex form so I am simply checking (a - b) form
# if yes later I will match with the standard form of bessel in a and b
# a, b are wild variable defined above.
_coeff2 = expand(r[c3]).match(a - b)
if _coeff2 is None:
return False
# matching with standard form for c3
coeff2 = factor(_coeff2[a]).match(c4**2*(x)**(2*a4))
if coeff2 is None:
return False
if _coeff2[b] == 0:
coeff2[d4] = 0
else:
coeff2[d4] = factor(_coeff2[b]).match(d4**2)[d4]
self.rn = {'n':coeff2[d4], 'a4':coeff2[c4], 'd4':coeff2[a4]}
self.rn['c4'] = coeff1[a4]
self.rn['b4'] = point
return True
return False
def _get_general_solution(self, *, simplify_flag: bool = True):
f = self.ode_problem.func.func
x = self.ode_problem.sym
n = self.rn['n']
a4 = self.rn['a4']
c4 = self.rn['c4']
d4 = self.rn['d4']
b4 = self.rn['b4']
n = sqrt(n**2 + Rational(1, 4)*(c4 - 1)**2)
(C1, C2) = self.ode_problem.get_numbered_constants(num=2)
return [Eq(f(x), ((x**(Rational(1-c4,2)))*(C1*besselj(n/d4,a4*x**d4/d4)
+ C2*bessely(n/d4,a4*x**d4/d4))).subs(x, x-b4))]
| SecondLinearBessel |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_clip_grad_norm_.py | {
"start": 769,
"end": 3888
} | class ____(FSDPTest):
def _test_clip_grad_norm(
self,
max_norm: Union[float, int],
norm_type: Union[float, int],
ref_model: nn.Module,
ref_optim: torch.optim.Optimizer,
model: nn.Module,
optim: torch.optim.Optimizer,
inp: torch.Tensor,
dp_mesh: Optional[DeviceMesh] = None,
):
vector_norm_fn = functools.partial(torch.linalg.vector_norm, ord=norm_type)
dp_mesh = dp_mesh or init_device_mesh(device_type.type, (self.world_size,))
torch.manual_seed(42 + dp_mesh.get_local_rank() + 1)
for _ in range(10):
ref_optim.zero_grad()
ref_model(inp).sum().backward()
optim.zero_grad()
model(inp).sum().backward()
ref_grads = [p.grad.detach().clone() for p in ref_model.parameters()]
local_grads = [
p.grad.to_local().detach().clone() for p in model.parameters()
]
for ref_grad, param in zip(ref_grads, model.parameters()):
self.assertEqual(ref_grad, param.grad.full_tensor())
# Check that at least one gradient has norm greater than the max
# norm before clipping to ensure the clipping is not vacuous
self.assertTrue(any(vector_norm_fn(g).item() > max_norm for g in ref_grads))
self.assertTrue(
any(vector_norm_fn(g).item() > max_norm for g in local_grads)
)
# Check gradient norm clipping via total norm and individual
# gradient norms post-clipping
ref_total_norm = torch.nn.utils.clip_grad_norm_(
ref_model.parameters(), max_norm=max_norm, norm_type=norm_type
)
comm_mode = CommDebugMode()
with comm_mode:
# foreach is default to turn on so we don't need to specify it.
total_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(),
max_norm=max_norm,
norm_type=norm_type,
)
self.assertEqual(ref_total_norm, total_norm.full_tensor())
# Expect one all-reduce per mesh dim for partial -> replicate
expected_all_reduces = len(total_norm.placements)
self.assertEqual(
comm_mode.get_comm_counts()[torch.ops.c10d_functional.all_reduce],
expected_all_reduces,
)
# For zero gradients, clipping has no effect
for param, grad in zip(ref_model.parameters(), ref_grads):
self.assertTrue(vector_norm_fn(param.grad).item() <= max_norm)
if torch.count_nonzero(grad):
self.assertFalse(torch.equal(param.grad, grad))
for param, grad in zip(model.parameters(), local_grads):
self.assertTrue(
vector_norm_fn(param.grad.to_local()).item() <= max_norm
)
if torch.count_nonzero(grad):
self.assertFalse(torch.equal(param.grad.to_local(), grad))
| _TestClipGradNormBase |
python | python-markdown__markdown | tests/test_syntax/extensions/test_toc.py | {
"start": 58296,
"end": 59971
} | class ____(TestCase):
def testStripElement(self):
self.assertEqual(
strip_tags('foo <em>bar</em>'),
'foo bar'
)
def testStripOpenElement(self):
self.assertEqual(
strip_tags('foo <em>bar'),
'foo bar'
)
def testStripEmptyElement(self):
self.assertEqual(
strip_tags('foo <br />bar'),
'foo bar'
)
def testDontStripOpenBracket(self):
self.assertEqual(
strip_tags('foo < bar'),
'foo < bar'
)
def testDontStripCloseBracket(self):
self.assertEqual(
strip_tags('foo > bar'),
'foo > bar'
)
def testStripCollapseWhitespace(self):
self.assertEqual(
strip_tags('foo <em>\tbar\t</em>'),
'foo bar'
)
def testStripElementWithNewlines(self):
self.assertEqual(
strip_tags('foo <meta content="tag\nwith\nnewlines"> bar'),
'foo bar'
)
def testStripComment(self):
self.assertEqual(
strip_tags('foo <!-- comment --> bar'),
'foo bar'
)
def testStripCommentWithInnerTags(self):
self.assertEqual(
strip_tags('foo <!-- comment with <em> --> bar'),
'foo bar'
)
def testStripCommentInElement(self):
self.assertEqual(
strip_tags('<em>foo <!-- comment --> bar<em>'),
'foo bar'
)
def testDontStripHTMLEntities(self):
self.assertEqual(
strip_tags('foo < & < bar'),
'foo < & < bar'
)
| testStripTags |
python | scipy__scipy | scipy/optimize/tests/test_zeros.py | {
"start": 5555,
"end": 8775
} | class ____(TestScalarRootFinders):
@pytest.mark.parametrize('method', bracket_methods)
@pytest.mark.parametrize('function', tstutils_functions)
def test_basic_root_scalar(self, method, function):
# Tests bracketing root finders called via `root_scalar` on a small
# set of simple problems, each of which has a root at `x=1`. Checks for
# converged status and that the root was found.
a, b = .5, sqrt(3)
r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a,
xtol=self.xtol, rtol=self.rtol)
assert r.converged
assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol)
assert r.method == method.__name__
@pytest.mark.parametrize('method', bracket_methods)
@pytest.mark.parametrize('function', tstutils_functions)
def test_basic_individual(self, method, function):
# Tests individual bracketing root finders on a small set of simple
# problems, each of which has a root at `x=1`. Checks for converged
# status and that the root was found.
a, b = .5, sqrt(3)
root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol,
full_output=True)
assert r.converged
assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol)
@pytest.mark.parametrize('method', bracket_methods)
@pytest.mark.parametrize('function', tstutils_functions)
def test_bracket_is_array(self, method, function):
# Test bracketing root finders called via `root_scalar` on a small set
# of simple problems, each of which has a root at `x=1`. Check that
# passing `bracket` as a `ndarray` is accepted and leads to finding the
# correct root.
a, b = .5, sqrt(3)
r = root_scalar(function, method=method.__name__,
bracket=np.array([a, b]), x0=a, xtol=self.xtol,
rtol=self.rtol)
assert r.converged
assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol)
assert r.method == method.__name__
@pytest.mark.parametrize('method', bracket_methods)
def test_aps_collection(self, method):
self.run_collection('aps', method, method.__name__, smoothness=1)
@pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder,
zeros.toms748])
def test_chandrupatla_collection(self, method):
known_fail = {'fun7.4'} if method == zeros.ridder else {}
self.run_collection('chandrupatla', method, method.__name__,
known_fail=known_fail)
@pytest.mark.parametrize('method', bracket_methods)
def test_lru_cached_individual(self, method):
# check that https://github.com/scipy/scipy/issues/10846 is fixed
# (`root_scalar` failed when passed a function that was `@lru_cache`d)
a, b = -1, 1
root, r = method(f_lrucached, a, b, full_output=True)
assert r.converged
assert_allclose(root, 0)
def test_gh_22934(self):
with pytest.raises(ValueError, match="maxiter must be >= 0"):
zeros.brentq(lambda x: x**2 - 1, -2, 0, maxiter=-1)
| TestBracketMethods |
python | encode__django-rest-framework | tests/test_relations_pk.py | {
"start": 7924,
"end": 15826
} | class ____(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve(self):
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1}
]
with self.assertNumQueries(1):
assert serializer.data == expected
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': []},
]
with self.assertNumQueries(3):
assert serializer.data == expected
def test_reverse_foreign_key_retrieve_prefetch_related(self):
queryset = ForeignKeyTarget.objects.all().prefetch_related('sources')
serializer = ForeignKeyTargetSerializer(queryset, many=True)
with self.assertNumQueries(2):
serializer.data
def test_foreign_key_update(self):
data = {'id': 1, 'name': 'source-1', 'target': 2}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
assert serializer.is_valid()
serializer.save()
assert serializer.data == data
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 2},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1}
]
assert serializer.data == expected
def test_foreign_key_update_incorrect_type(self):
data = {'id': 1, 'name': 'source-1', 'target': 'foo'}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
assert not serializer.is_valid()
assert serializer.errors == {'target': ['Incorrect type. Expected pk value, received str.']}
def test_reverse_foreign_key_update(self):
data = {'id': 2, 'name': 'target-2', 'sources': [1, 3]}
instance = ForeignKeyTarget.objects.get(pk=2)
serializer = ForeignKeyTargetSerializer(instance, data=data)
assert serializer.is_valid()
# We shouldn't have saved anything to the db yet since save
# hasn't been called.
queryset = ForeignKeyTarget.objects.all()
new_serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': []},
]
assert new_serializer.data == expected
serializer.save()
assert serializer.data == data
# Ensure target 2 is update, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [2]},
{'id': 2, 'name': 'target-2', 'sources': [1, 3]},
]
assert serializer.data == expected
def test_foreign_key_create(self):
data = {'id': 4, 'name': 'source-4', 'target': 2}
serializer = ForeignKeySourceSerializer(data=data)
assert serializer.is_valid()
obj = serializer.save()
assert serializer.data == data
assert obj.name == 'source-4'
# Ensure source 4 is added, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1},
{'id': 4, 'name': 'source-4', 'target': 2},
]
assert serializer.data == expected
def test_reverse_foreign_key_create(self):
data = {'id': 3, 'name': 'target-3', 'sources': [1, 3]}
serializer = ForeignKeyTargetSerializer(data=data)
assert serializer.is_valid()
obj = serializer.save()
assert serializer.data == data
assert obj.name == 'target-3'
# Ensure target 3 is added, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [2]},
{'id': 2, 'name': 'target-2', 'sources': []},
{'id': 3, 'name': 'target-3', 'sources': [1, 3]},
]
assert serializer.data == expected
def test_foreign_key_update_with_invalid_null(self):
data = {'id': 1, 'name': 'source-1', 'target': None}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
assert not serializer.is_valid()
assert serializer.errors == {'target': ['This field may not be null.']}
def test_foreign_key_with_unsaved(self):
source = ForeignKeySource(name='source-unsaved')
expected = {'id': None, 'name': 'source-unsaved', 'target': None}
serializer = ForeignKeySourceSerializer(source)
# no query if source hasn't been created yet
with self.assertNumQueries(0):
assert serializer.data == expected
def test_foreign_key_with_empty(self):
"""
Regression test for #1072
https://github.com/encode/django-rest-framework/issues/1072
"""
serializer = NullableForeignKeySourceSerializer()
assert serializer.data['target'] is None
def test_foreign_key_not_required(self):
"""
Let's say we wanted to fill the non-nullable model field inside
Model.save(), we would make it empty and not required.
"""
class ModelSerializer(ForeignKeySourceSerializer):
class Meta(ForeignKeySourceSerializer.Meta):
extra_kwargs = {'target': {'required': False}}
serializer = ModelSerializer(data={'name': 'test'})
serializer.is_valid(raise_exception=True)
assert 'target' not in serializer.validated_data
def test_queryset_size_without_limited_choices(self):
limited_target = ForeignKeyTarget(name="limited-target")
limited_target.save()
queryset = ForeignKeySourceSerializer().fields["target"].get_queryset()
assert len(queryset) == 3
def test_queryset_size_with_limited_choices(self):
limited_target = ForeignKeyTarget(name="limited-target")
limited_target.save()
queryset = ForeignKeySourceWithLimitedChoicesSerializer().fields["target"].get_queryset()
assert len(queryset) == 1
def test_queryset_size_with_Q_limited_choices(self):
limited_target = ForeignKeyTarget(name="limited-target")
limited_target.save()
class QLimitedChoicesSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySourceWithQLimitedChoices
fields = ("id", "target")
queryset = QLimitedChoicesSerializer().fields["target"].get_queryset()
assert len(queryset) == 1
| PKForeignKeyTests |
python | keras-team__keras | keras/src/layers/preprocessing/data_layer_test.py | {
"start": 255,
"end": 1223
} | class ____(DataLayer):
def __init__(self, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs):
images_shape = self.backend.shape(inputs)
batch_size = 1 if len(images_shape) == 3 else images_shape[0]
seed = self._get_seed_generator(self.backend._backend)
probability = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
hsv_images = self.backend.image.rgb_to_hsv(
inputs, data_format=self.data_format
)
return self.backend.numpy.where(
probability[:, None, None, None] > 0.5, hsv_images, inputs
)
def compute_output_shape(self, input_shape):
return input_shape
| RandomRGBToHSVLayer |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/telemetry/__init__.py | {
"start": 4881,
"end": 11335
} | class ____(
NamedTuple(
"_TelemetryEntry",
[
("action", str),
("client_time", str),
("event_id", str),
("elapsed_time", str),
("instance_id", str),
("metadata", Mapping[str, str]),
("python_version", str),
("dagster_version", str),
("os_desc", str),
("os_platform", str),
("run_storage_id", str),
("is_known_ci_env", bool),
],
)
):
"""Schema for telemetry logs.
Currently, log entries are coerced to the same schema to enable storing all entries in one DB
table with unified schema.
action - Name of function called i.e. `execute_job_started` (see: fn telemetry_wrapper)
client_time - Client time
elapsed_time - Time elapsed between start of function and end of function call
event_id - Unique id for the event
instance_id - Unique id for dagster instance
python_version - Python version
metadata - More information i.e. pipeline success (boolean)
version - Schema version
dagster_version - Version of the project being used.
os_desc - String describing OS in use
os_platform - Terse string describing OS platform - linux, windows, darwin, etc.
run_storage_id - Unique identifier of run storage database
If $DAGSTER_HOME is set, then use $DAGSTER_HOME/logs/
Otherwise, use ~/.dagster/logs/
"""
def __new__(
cls,
action: str,
client_time: str,
event_id: str,
instance_id: str,
metadata: Optional[Mapping[str, str]] = None,
elapsed_time: Optional[str] = None,
run_storage_id: Optional[str] = None,
):
OS_DESC = platform.platform()
OS_PLATFORM = platform.system()
return super().__new__(
cls,
action=action,
client_time=client_time,
elapsed_time=elapsed_time or "",
event_id=event_id,
instance_id=instance_id,
python_version=get_python_version(),
metadata=metadata or {},
dagster_version=__version__ or "None",
os_desc=OS_DESC,
os_platform=OS_PLATFORM,
run_storage_id=run_storage_id or "",
is_known_ci_env=get_is_known_ci_env(),
)
def log_telemetry_action(
get_telemetry_settings: Callable[[], TelemetrySettings],
action: str,
client_time: Optional[datetime.datetime] = None,
elapsed_time: Optional[datetime.timedelta] = None,
metadata: Optional[Mapping[str, str]] = None,
) -> None:
if client_time is None:
client_time = datetime.datetime.now()
(dagster_telemetry_enabled, instance_id, run_storage_id) = get_telemetry_settings()
if dagster_telemetry_enabled:
if not instance_id:
raise Exception("Instance ID must be set when telemetry is enabled")
# Log general statistics
write_telemetry_log_line(
TelemetryEntry(
action=action,
client_time=str(client_time),
elapsed_time=str(elapsed_time),
event_id=str(uuid.uuid4()),
instance_id=instance_id,
metadata=metadata,
run_storage_id=run_storage_id,
)._asdict()
)
def get_telemetry_enabled_from_dagster_yaml() -> bool:
"""Lightweight check to see if telemetry is enabled by checking $DAGSTER_HOME/dagster.yaml,
without needing to load the entire Dagster instance.
"""
import yaml
dagster_home_path = dagster_home_if_set()
if dagster_home_path is None:
return True
dagster_yaml_path = os.path.join(dagster_home_path, "dagster.yaml")
if not os.path.exists(dagster_yaml_path):
return True
with open(dagster_yaml_path, encoding="utf8") as dagster_yaml_file:
dagster_yaml_data = yaml.safe_load(dagster_yaml_file)
if (
dagster_yaml_data
and "telemetry" in dagster_yaml_data
and "enabled" in dagster_yaml_data["telemetry"]
):
return dagster_yaml_data["telemetry"]["enabled"]
return True
def get_or_set_instance_id() -> str:
instance_id = _get_telemetry_instance_id()
if instance_id is None:
instance_id = _set_telemetry_instance_id()
return instance_id
# Gets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml
def _get_telemetry_instance_id() -> Optional[str]:
import yaml
telemetry_id_path = os.path.join(get_or_create_dir_from_dagster_home(TELEMETRY_STR), "id.yaml")
if not os.path.exists(telemetry_id_path):
return
with open(telemetry_id_path, encoding="utf8") as telemetry_id_file:
telemetry_id_yaml = yaml.safe_load(telemetry_id_file)
if (
telemetry_id_yaml
and INSTANCE_ID_STR in telemetry_id_yaml
and isinstance(telemetry_id_yaml[INSTANCE_ID_STR], str)
):
return telemetry_id_yaml[INSTANCE_ID_STR]
return None
# Sets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml
def _set_telemetry_instance_id() -> str:
import yaml
click.secho(TELEMETRY_TEXT, err=True)
click.secho(SLACK_PROMPT, err=True)
telemetry_id_path = os.path.join(get_or_create_dir_from_dagster_home(TELEMETRY_STR), "id.yaml")
instance_id = str(uuid.uuid4())
try: # In case we encounter an error while writing to user's file system
with open(telemetry_id_path, "w", encoding="utf8") as telemetry_id_file:
yaml.dump({INSTANCE_ID_STR: instance_id}, telemetry_id_file, default_flow_style=False)
return instance_id
except Exception:
return "<<unable_to_write_instance_id>>"
TELEMETRY_TEXT = """
{telemetry}
As an open-source project, we collect usage statistics to inform development priorities. For more
information, read https://docs.dagster.io/about/telemetry.
We will not see or store any data that is processed by your code.
To opt-out, add the following to $DAGSTER_HOME/dagster.yaml, creating that file if necessary:
telemetry:
enabled: false
""".format(telemetry=click.style("Telemetry:", fg="blue", bold=True))
SLACK_PROMPT = """
{welcome}
If you have any questions or would like to engage with the Dagster team, please join us on Slack
(https://bit.ly/39dvSsF).
""".format(welcome=click.style("Welcome to Dagster!", bold=True))
| TelemetryEntry |
python | apache__airflow | airflow-core/src/airflow/triggers/base.py | {
"start": 1550,
"end": 4566
} | class ____(abc.ABC, LoggingMixin):
"""
Base class for all triggers.
A trigger has two contexts it can exist in:
- Inside an Operator, when it's passed to TaskDeferred
- Actively running in a trigger worker
We use the same class for both situations, and rely on all Trigger classes
to be able to return the arguments (possible to encode with Airflow-JSON) that will
let them be re-instantiated elsewhere.
"""
def __init__(self, **kwargs):
# these values are set by triggerer when preparing to run the instance
# when run, they are injected into logger record.
self.task_instance = None
self.trigger_id = None
def _set_context(self, context):
"""Part of LoggingMixin and used mainly for configuration of task logging; not used for triggers."""
raise NotImplementedError
@abc.abstractmethod
def serialize(self) -> tuple[str, dict[str, Any]]:
"""
Return the information needed to reconstruct this Trigger.
:return: Tuple of (class path, keyword arguments needed to re-instantiate).
"""
raise NotImplementedError("Triggers must implement serialize()")
@abc.abstractmethod
async def run(self) -> AsyncIterator[TriggerEvent]:
"""
Run the trigger in an asynchronous context.
The trigger should yield an Event whenever it wants to fire off
an event, and return None if it is finished. Single-event triggers
should thus yield and then immediately return.
If it yields, it is likely that it will be resumed very quickly,
but it may not be (e.g. if the workload is being moved to another
triggerer process, or a multi-event trigger was being used for a
single-event task defer).
In either case, Trigger classes should assume they will be persisted,
and then rely on cleanup() being called when they are no longer needed.
"""
raise NotImplementedError("Triggers must implement run()")
yield # To convince Mypy this is an async iterator.
async def cleanup(self) -> None:
"""
Cleanup the trigger.
Called when the trigger is no longer needed, and it's being removed
from the active triggerer process.
This method follows the async/await pattern to allow to run the cleanup
in triggerer main event loop. Exceptions raised by the cleanup method
are ignored, so if you would like to be able to debug them and be notified
that cleanup method failed, you should wrap your code with try/except block
and handle it appropriately (in async-compatible way).
"""
@staticmethod
def repr(classpath: str, kwargs: dict[str, Any]):
kwargs_str = ", ".join(f"{k}={v}" for k, v in kwargs.items())
return f"<{classpath} {kwargs_str}>"
def __repr__(self) -> str:
classpath, kwargs = self.serialize()
return self.repr(classpath, kwargs)
| BaseTrigger |
python | huggingface__transformers | src/transformers/models/granite/modeling_granite.py | {
"start": 14380,
"end": 17399
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: GraniteConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[GraniteConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| GraniteRotaryEmbedding |
python | kubernetes-client__python | kubernetes/client/models/v1_replication_controller_spec.py | {
"start": 383,
"end": 7744
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'replicas': 'int',
'selector': 'dict(str, str)',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'replicas': 'replicas',
'selector': 'selector',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, replicas=None, selector=None, template=None, local_vars_configuration=None): # noqa: E501
"""V1ReplicationControllerSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_ready_seconds = None
self._replicas = None
self._selector = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if replicas is not None:
self.replicas = replicas
if selector is not None:
self.selector = selector
if template is not None:
self.template = template
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1ReplicationControllerSpec. # noqa: E501
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:return: The min_ready_seconds of this V1ReplicationControllerSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1ReplicationControllerSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1ReplicationControllerSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def replicas(self):
"""Gets the replicas of this V1ReplicationControllerSpec. # noqa: E501
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller # noqa: E501
:return: The replicas of this V1ReplicationControllerSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1ReplicationControllerSpec.
Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller # noqa: E501
:param replicas: The replicas of this V1ReplicationControllerSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def selector(self):
"""Gets the selector of this V1ReplicationControllerSpec. # noqa: E501
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:return: The selector of this V1ReplicationControllerSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1ReplicationControllerSpec.
Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:param selector: The selector of this V1ReplicationControllerSpec. # noqa: E501
:type: dict(str, str)
"""
self._selector = selector
@property
def template(self):
"""Gets the template of this V1ReplicationControllerSpec. # noqa: E501
:return: The template of this V1ReplicationControllerSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1ReplicationControllerSpec.
:param template: The template of this V1ReplicationControllerSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
self._template = template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ReplicationControllerSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ReplicationControllerSpec):
return True
return self.to_dict() != other.to_dict()
| V1ReplicationControllerSpec |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 14298,
"end": 14362
} | class ____(scale_fill_cmap):
pass
@alias
| scale_fill_continuous |
python | rapidsai__cudf | python/cudf/cudf/core/join/join.py | {
"start": 812,
"end": 25504
} | class ____:
@staticmethod
@acquire_spill_lock()
def _joiner(
lhs: list[ColumnBase],
rhs: list[ColumnBase],
how: str,
) -> tuple[ColumnBase, ColumnBase]:
if how == "outer":
how = "full"
if (join_func := getattr(plc.join, f"{how}_join", None)) is None:
raise ValueError(f"Invalid join type {how}")
left_rows, right_rows = join_func(
plc.Table([col.to_pylibcudf(mode="read") for col in lhs]),
plc.Table([col.to_pylibcudf(mode="read") for col in rhs]),
plc.types.NullEquality.EQUAL,
)
return (
ColumnBase.from_pylibcudf(left_rows),
ColumnBase.from_pylibcudf(right_rows),
)
def __init__(
self,
lhs,
rhs,
*,
on,
left_on,
right_on,
left_index,
right_index,
how,
sort,
indicator,
suffixes,
):
"""
Manage the merging of two Frames.
Parameters
----------
lhs : DataFrame
The left operand of the merge
rhs : DataFrame
The right operand of the merge
on : string or list like
A set of key columns in the left and right operands
elements must be common to both frames
left_on : string or list like
A set of key columns in the left operand. Must be
specified with right_on or right_index concurrently
right_on : string or list like
A set of key columns in the right operand. Must be
specified with left_on or left_index concurrently
left_index : bool
Boolean flag indicating the left index column or columns
are to be used as join keys in order.
right_index : bool
Boolean flag indicating the right index column or columns
are to be used as join keys in order.
how : string
The type of join. Possible values are
'inner', 'outer', 'left', 'leftsemi' and 'leftanti'
sort : bool
Boolean flag indicating if the output Frame is to be
sorted on the output's join keys, in left to right order.
suffixes : list like
Left and right suffixes specified together, unpacked into lsuffix
and rsuffix.
"""
self._validate_merge_params(
lhs,
rhs,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
how=how,
suffixes=suffixes,
)
self.lhs = lhs.copy(deep=False)
self.rhs = rhs.copy(deep=False)
self.how = how
# If the user requests that the result is sorted or we're in
# pandas-compatible mode we have various obligations on the
# output order:
#
# compat-> | False | True
# sort | |
# ---------+--------------------------+-------------------------------
# False| no obligation | ordering as per pandas docs(*)
# True | sorted lexicographically | sorted lexicographically(*)
#
# (*) If two keys are equal, tiebreak is to use input table order.
#
# In pandas-compat mode, we have obligations on the order to
# match pandas (even if sort=False), see
# pandas.pydata.org/docs/reference/api/pandas.DataFrame.merge.html.
# The ordering requirements differ depending on which join
# type is specified:
#
# - left: preserve key order (only keeping left keys)
# - right: preserve key order (only keeping right keys)
# - inner: preserve key order (of left keys)
# - outer: sort keys lexicographically
# - cross (not supported): preserve key order (of left keys)
#
# Moreover, in all cases, whenever there is a tiebreak
# situation (for sorting or otherwise), the deciding order is
# "input table order"
self.sort = sort or (
get_option("mode.pandas_compatible") and how == "outer"
)
self.preserve_key_order = get_option(
"mode.pandas_compatible"
) and how in {
"inner",
"outer",
"left",
"right",
}
self.lsuffix, self.rsuffix = suffixes
# At this point validation guarantees that if on is not None we
# don't have any other args, so we can apply it directly to left_on and
# right_on.
self._using_left_index = bool(left_index)
left_on = (
lhs.index._column_names
if left_index
else left_on
if left_on
else on
)
self._using_right_index = bool(right_index)
right_on = (
rhs.index._column_names
if right_index
else right_on
if right_on
else on
)
if left_on or right_on:
self._left_keys = [
_ColumnIndexer(name=on)
if not self._using_left_index and on in lhs._data
else _IndexIndexer(name=on)
for on in (_coerce_to_tuple(left_on) if left_on else [])
]
self._right_keys = [
_ColumnIndexer(name=on)
if not self._using_right_index and on in rhs._data
else _IndexIndexer(name=on)
for on in (_coerce_to_tuple(right_on) if right_on else [])
]
if len(self._left_keys) != len(self._right_keys):
raise ValueError(
"Merge operands must have same number of join key columns"
)
self._using_left_index = any(
isinstance(idx, _IndexIndexer) for idx in self._left_keys
)
self._using_right_index = any(
isinstance(idx, _IndexIndexer) for idx in self._right_keys
)
# For left/right merges, joining on an index and column should result in a RangeIndex
# if sort is False.
self._return_rangeindex = (
not self.sort
and self.how in {"left", "right"}
and not (
all(
isinstance(idx, _IndexIndexer)
for idx in itertools.chain(
self._left_keys, self._right_keys
)
)
or all(
isinstance(idx, _ColumnIndexer)
for idx in itertools.chain(
self._left_keys, self._right_keys
)
)
)
)
else:
# if `on` is not provided and we're not merging
# index with column or on both indexes, then use
# the intersection of columns in both frames
on_names = set(lhs._data) & set(rhs._data)
self._left_keys = [_ColumnIndexer(name=on) for on in on_names]
self._right_keys = [_ColumnIndexer(name=on) for on in on_names]
self._using_left_index = False
self._using_right_index = False
self._return_rangeindex = False
self._key_columns_with_same_name = (
set(_coerce_to_tuple(on))
if on
else {
lkey.name
for lkey, rkey in zip(
self._left_keys, self._right_keys, strict=True
)
if lkey.name == rkey.name
and not (
isinstance(lkey, _IndexIndexer)
or isinstance(rkey, _IndexIndexer)
)
}
)
def _gather_maps(self, left_cols, right_cols):
# Produce gather maps for the join, optionally reordering to
# match pandas-order in compat mode.
maps = self._joiner(
left_cols,
right_cols,
how=self.how,
)
if not self.preserve_key_order:
return maps
# We should only get here if we're in a join on which
# pandas-compat places some ordering obligation (which
# precludes a semi-join)
# We must perform this reordering even if sort=True since the
# obligation to ensure tiebreaks appear in input table order
# means that the gather maps must be permuted into an original
# order.
assert self.how in {"inner", "outer", "left", "right"}
# And hence both maps returned from the libcudf join should be
# non-None.
assert all(m is not None for m in maps)
lengths = [len(left_cols[0]), len(right_cols[0])]
# Only nullify those maps that need it.
nullify = [
self.how not in {"inner", "left"},
self.how not in {"inner", "right"},
]
# To reorder maps so that they are in order of the input
# tables, we gather from iota on both right and left, and then
# sort the gather maps with those two columns as key.
key_order = [
as_column(range(n), dtype=SIZE_TYPE_DTYPE).take(
map_, nullify=null, check_bounds=False
)
for map_, n, null in zip(maps, lengths, nullify, strict=True)
]
if self.how == "right":
# If how is right, right map is primary sort key.
key_order = reversed(key_order)
return [
ColumnBase.from_pylibcudf(col)
for col in sorting.sort_by_key(
maps,
key_order,
itertools.repeat(True, times=len(key_order)),
itertools.repeat("last", times=len(key_order)),
stable=True,
)
]
def perform_merge(self) -> DataFrame:
left_join_cols = []
right_join_cols = []
for left_key, right_key in zip(
self._left_keys, self._right_keys, strict=True
):
lcol = left_key.get(self.lhs)
rcol = right_key.get(self.rhs)
lcol_casted, rcol_casted = _match_join_keys(lcol, rcol, self.how)
left_join_cols.append(lcol_casted)
right_join_cols.append(rcol_casted)
# Categorical dtypes must be cast back from the underlying codes
# type that was returned by _match_join_keys.
if (
self.how == "inner"
and isinstance(lcol.dtype, CategoricalDtype)
and isinstance(rcol.dtype, CategoricalDtype)
):
lcol_casted = lcol_casted.astype(lcol.dtype)
rcol_casted = rcol_casted.astype(rcol.dtype)
left_key.set(self.lhs, lcol_casted)
right_key.set(self.rhs, rcol_casted)
from cudf.core.dataframe import DataFrame
if self.how == "cross":
lib_table = plc.join.cross_join(
plc.Table(
[
col.to_pylibcudf(mode="read")
for col in self.lhs._columns
]
),
plc.Table(
[
col.to_pylibcudf(mode="read")
for col in self.rhs._columns
]
),
)
columns = lib_table.columns()
left_names, right_names = (
self.lhs._column_names,
self.rhs._column_names,
)
left_result = DataFrame._from_data(
{
col: ColumnBase.from_pylibcudf(lib_col)
for col, lib_col in zip(
left_names, columns[: len(left_names)], strict=True
)
}
)
right_result = DataFrame._from_data(
{
col: ColumnBase.from_pylibcudf(lib_col)
for col, lib_col in zip(
right_names, columns[len(left_names) :], strict=True
)
}
)
del columns, lib_table
else:
left_rows, right_rows = self._gather_maps(
left_join_cols, right_join_cols
)
gather_kwargs = {
"keep_index": self._using_left_index
or self._using_right_index,
}
left_result = (
self.lhs._gather(
GatherMap.from_column_unchecked(
left_rows, len(self.lhs), nullify=True
),
**gather_kwargs,
)
if left_rows is not None
else DataFrame._from_data({})
)
del left_rows
right_result = (
self.rhs._gather(
GatherMap.from_column_unchecked(
right_rows, len(self.rhs), nullify=True
),
**gather_kwargs,
)
if right_rows is not None
else DataFrame._from_data({})
)
del right_rows
result = DataFrame._from_data(
*self._merge_results(left_result, right_result)
)
if self.sort:
result = self._sort_result(result)
if self._return_rangeindex:
result = result.reset_index(drop=True)
return result
def _merge_results(self, left_result: DataFrame, right_result: DataFrame):
# Merge the DataFrames `left_result` and `right_result` into a single
# `DataFrame`, suffixing column names if necessary.
# If two key columns have the same name, a single output column appears
# in the result. For all non-outer join types, the key column from the
# rhs is simply dropped. For outer joins, the two key columns are
# combined by filling nulls in the left key column with corresponding
# values from the right key column:
if self.how == "outer":
for lkey, rkey in zip(
self._left_keys, self._right_keys, strict=True
):
if lkey.name == rkey.name:
# fill nulls in lhs from values in the rhs
lkey.set(
left_result,
lkey.get(left_result).fillna(rkey.get(right_result)),
)
# All columns from the left table make it into the output. Non-key
# columns that share a name with a column in the right table are
# suffixed with the provided suffix.
common_names = set(left_result._column_names) & set(
right_result._column_names
)
cols_to_suffix = (
common_names
if self.how == "cross"
else common_names - self._key_columns_with_same_name
)
data = {
(f"{name}{self.lsuffix}" if name in cols_to_suffix else name): col
for name, col in left_result._column_labels_and_values
}
# The right table follows the same rule as the left table except that
# key columns from the right table are removed.
for name, col in right_result._column_labels_and_values:
if name in common_names:
if (
self.how == "cross"
or name not in self._key_columns_with_same_name
):
r_label = f"{name}{self.rsuffix}"
if r_label in data:
raise NotImplementedError(
f"suffixes={(self.lsuffix, self.rsuffix)} would introduce a "
f"duplicate column label, '{r_label}', which is "
"not supported."
)
data[r_label] = col
else:
data[name] = col
# determine if the result has multiindex columns. The result
# of a join has a MultiIndex as its columns if:
# - both the `lhs` and `rhs` have a MultiIndex columns
# OR
# - either one of `lhs` or `rhs` have a MultiIndex columns,
# and the other is empty (i.e., no columns)
if self.lhs._data and self.rhs._data:
multiindex_columns = (
self.lhs._data.multiindex and self.rhs._data.multiindex
)
rangeindex_columns = (
self.lhs._data.rangeindex and self.rhs._data.rangeindex
)
elif self.lhs._data:
multiindex_columns = self.lhs._data.multiindex
rangeindex_columns = self.lhs._data.rangeindex
elif self.rhs._data:
multiindex_columns = self.rhs._data.multiindex
rangeindex_columns = self.rhs._data.rangeindex
else:
multiindex_columns = False
rangeindex_columns = (
self.lhs._data.rangeindex and self.rhs._data.rangeindex
)
index: Index | None
if self._using_right_index:
# right_index and left_on
index = left_result.index
elif self._using_left_index:
# left_index and right_on
index = right_result.index
else:
index = None
# Construct result from data and index:
return (
left_result._data.__class__(
data=data,
multiindex=multiindex_columns,
rangeindex=rangeindex_columns,
),
index,
)
def _sort_result(self, result: DataFrame) -> DataFrame:
# Pandas sorts on the key columns in the
# same order as given in 'on'. If the indices are used as
# keys, the index will be sorted. If one index is specified,
# the key columns on the other side will be used to sort.
# In pandas-compatible mode, tie-breaking for multiple equal
# sort keys is to produce output in input dataframe order.
# This is taken care of by using a stable sort here, and (in
# pandas-compat mode) reordering the gather maps before
# producing the input result.
by: list[Any] = []
if self._using_left_index and self._using_right_index:
by.extend(result.index._columns)
if not self._using_left_index:
by.extend([result._data[col.name] for col in self._left_keys])
if not self._using_right_index:
by.extend([result._data[col.name] for col in self._right_keys])
if by:
keep_index = self._using_left_index or self._using_right_index
if keep_index:
to_sort: Iterable[ColumnBase] = itertools.chain(
result.index._columns, result._columns
)
index_names = result.index.names
else:
to_sort = result._columns
index_names = None
result_columns = sorting.sort_by_key(
to_sort,
by,
itertools.repeat(True, times=len(by)),
itertools.repeat("last", times=len(by)),
stable=True,
)
result = result._from_columns_like_self(
[ColumnBase.from_pylibcudf(col) for col in result_columns],
result._column_names,
index_names,
)
return result
@staticmethod
def _validate_merge_params(
lhs,
rhs,
on,
left_on,
right_on,
left_index,
right_index,
how,
suffixes,
):
# Error for various invalid combinations of merge input parameters
from cudf.core.dataframe import DataFrame
from cudf.core.series import Series
if not isinstance(lhs, (Series, DataFrame)):
raise TypeError("left must be a Series or DataFrame")
if not isinstance(rhs, (Series, DataFrame)):
raise TypeError("right must be a Series or DataFrame")
# We must actually support the requested merge type
if how not in {
"left",
"inner",
"outer",
"leftanti",
"leftsemi",
"cross",
}:
raise NotImplementedError(f"{how} merge not supported yet")
if on:
if left_on or right_on:
# Passing 'on' with 'left_on' or 'right_on' is ambiguous
raise ValueError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
elif left_index or right_index:
# Passing 'on' with 'left_index' or 'right_index' is ambiguous
raise ValueError(
'Can only pass argument "on" OR "left_index" '
'and "right_index", not a combination of both.'
)
else:
# the validity of 'on' being checked by _Indexer
return
elif left_on and left_index:
raise ValueError(
'Can only pass argument "left_on" OR "left_index" not both.'
)
elif right_on and right_index:
raise ValueError(
'Can only pass argument "right_on" OR "right_index" not both.'
)
# Can't merge on a column name that is present in both a frame and its
# indexes.
if on:
for key in on:
if (key in lhs._data and key in lhs.index._data) or (
key in rhs._data and key in rhs.index._data
):
raise ValueError(
f"{key} is both an index level and a "
"column label, which is ambiguous."
)
if left_on:
for key in left_on:
if key in lhs._data and key in lhs.index._data:
raise ValueError(
f"{key} is both an index level and a "
"column label, which is ambiguous."
)
if right_on:
for key in right_on:
if key in rhs._data and key in rhs.index._data:
raise ValueError(
f"{key} is both an index level and a "
"column label, which is ambiguous."
)
# Can't merge on unnamed Series
if (isinstance(lhs, Series) and not lhs.name) or (
isinstance(rhs, Series) and not rhs.name
):
raise ValueError("Cannot merge on unnamed Series")
# If nothing specified, must have common cols to use implicitly
same_named_columns = set(lhs._data) & set(rhs._data)
if how != "cross" and (
not (left_index or right_index)
and not (left_on or right_on)
and len(same_named_columns) == 0
):
raise ValueError("No common columns to perform merge on")
lsuffix, rsuffix = suffixes
for name in same_named_columns:
if name == left_on == right_on:
continue
elif left_on and right_on:
if (name in left_on and name in right_on) and (
left_on.index(name) == right_on.index(name)
):
continue
else:
if not (lsuffix or rsuffix):
raise ValueError(
"there are overlapping columns but "
"lsuffix and rsuffix are not defined"
)
if (
isinstance(lhs, DataFrame)
and isinstance(rhs, DataFrame)
# An empty column is considered to have 1 level by pandas (can be
# seen by using lhs.columns.nlevels, but we don't want to use
# columns internally because it's expensive).
# TODO: Investigate whether ColumnAccessor.nlevels should be
# modified in the size 0 case.
and max(lhs._data.nlevels, 1) != max(rhs._data.nlevels, 1)
):
raise ValueError(
"Not allowed to merge between different levels. "
f"({lhs._data.nlevels} levels on "
f"the left, {rhs._data.nlevels} on the right)"
)
| Merge |
python | dask__dask | dask/_expr.py | {
"start": 989,
"end": 26020
} | class ____:
_parameters: list[str] = []
_defaults: dict[str, Any] = {}
_pickle_functools_cache: bool = True
operands: list
_determ_token: str | None
def __new__(cls, *args, _determ_token=None, **kwargs):
operands = list(args)
for parameter in cls._parameters[len(operands) :]:
try:
operands.append(kwargs.pop(parameter))
except KeyError:
operands.append(cls._defaults[parameter])
assert not kwargs, kwargs
inst = object.__new__(cls)
inst._determ_token = _determ_token
inst.operands = [_unpack_collections(o) for o in operands]
# This is typically cached. Make sure the cache is populated by calling
# it once
inst._name
return inst
def _tune_down(self):
return None
def _tune_up(self, parent):
return None
def finalize_compute(self):
return self
def _operands_for_repr(self):
return [f"{param}={op!r}" for param, op in zip(self._parameters, self.operands)]
def __str__(self):
s = ", ".join(self._operands_for_repr())
return f"{type(self).__name__}({s})"
def __repr__(self):
return str(self)
def _tree_repr_argument_construction(self, i, op, header):
try:
param = self._parameters[i]
default = self._defaults[param]
except (IndexError, KeyError):
param = self._parameters[i] if i < len(self._parameters) else ""
default = "--no-default--"
if repr(op) != repr(default):
if param:
header += f" {param}={op!r}"
else:
header += repr(op)
return header
def _tree_repr_lines(self, indent=0, recursive=True):
return " " * indent + repr(self)
def tree_repr(self):
return os.linesep.join(self._tree_repr_lines())
def analyze(self, filename: str | None = None, format: str | None = None) -> None:
from dask.dataframe.dask_expr._expr import Expr as DFExpr
from dask.dataframe.dask_expr.diagnostics import analyze
if not isinstance(self, DFExpr):
raise TypeError(
"analyze is only supported for dask.dataframe.Expr objects."
)
return analyze(self, filename=filename, format=format)
def explain(
self, stage: OptimizerStage = "fused", format: str | None = None
) -> None:
from dask.dataframe.dask_expr.diagnostics import explain
return explain(self, stage, format)
def pprint(self):
for line in self._tree_repr_lines():
print(line)
def __hash__(self):
return hash(self._name)
def __dask_tokenize__(self):
if not self._determ_token:
# If the subclass does not implement a __dask_tokenize__ we'll want
# to tokenize all operands.
# Note how this differs to the implementation of
# Expr.deterministic_token
self._determ_token = _tokenize_deterministic(type(self), *self.operands)
return self._determ_token
def __dask_keys__(self):
"""The keys for this expression
This is used to determine the keys of the output collection
when this expression is computed.
Returns
-------
keys: list
The keys for this expression
"""
return [(self._name, i) for i in range(self.npartitions)]
@staticmethod
def _reconstruct(*args):
typ, *operands, token, cache = args
inst = typ(*operands, _determ_token=token)
for k, v in cache.items():
inst.__dict__[k] = v
return inst
def __reduce__(self):
if dask.config.get("dask-expr-no-serialize", False):
raise RuntimeError(f"Serializing a {type(self)} object")
cache = {}
if type(self)._pickle_functools_cache:
for k, v in type(self).__dict__.items():
if isinstance(v, functools.cached_property) and k in self.__dict__:
cache[k] = getattr(self, k)
return Expr._reconstruct, (
type(self),
*self.operands,
self.deterministic_token,
cache,
)
def _depth(self, cache=None):
"""Depth of the expression tree
Returns
-------
depth: int
"""
if cache is None:
cache = {}
if not self.dependencies():
return 1
else:
result = []
for expr in self.dependencies():
if expr._name in cache:
result.append(cache[expr._name])
else:
result.append(expr._depth(cache) + 1)
cache[expr._name] = result[-1]
return max(result)
def __setattr__(self, name: str, value: Any) -> None:
if name in ["operands", "_determ_token"]:
object.__setattr__(self, name, value)
return
try:
params = type(self)._parameters
operands = object.__getattribute__(self, "operands")
operands[params.index(name)] = value
except ValueError:
raise AttributeError(
f"{type(self).__name__} object has no attribute {name}"
)
def operand(self, key):
# Access an operand unambiguously
# (e.g. if the key is reserved by a method/property)
return self.operands[type(self)._parameters.index(key)]
def dependencies(self):
# Dependencies are `Expr` operands only
return [operand for operand in self.operands if isinstance(operand, Expr)]
def _task(self, key: Key, index: int) -> Task:
"""The task for the i'th partition
Parameters
----------
index:
The index of the partition of this dataframe
Examples
--------
>>> class Add(Expr):
... def _task(self, i):
... return Task(
... self.__dask_keys__()[i],
... operator.add,
... TaskRef((self.left._name, i)),
... TaskRef((self.right._name, i))
... )
Returns
-------
task:
The Dask task to compute this partition
See Also
--------
Expr._layer
"""
raise NotImplementedError(
"Expressions should define either _layer (full dictionary) or _task"
f" (single task). This expression {type(self)} defines neither"
)
def _layer(self) -> dict:
"""The graph layer added by this expression.
Simple expressions that apply one task per partition can choose to only
implement `Expr._task` instead.
Examples
--------
>>> class Add(Expr):
... def _layer(self):
... return {
... name: Task(
... name,
... operator.add,
... TaskRef((self.left._name, i)),
... TaskRef((self.right._name, i))
... )
... for i, name in enumerate(self.__dask_keys__())
... }
Returns
-------
layer: dict
The Dask task graph added by this expression
See Also
--------
Expr._task
Expr.__dask_graph__
"""
return {
(self._name, i): self._task((self._name, i), i)
for i in range(self.npartitions)
}
def rewrite(self, kind: str, rewritten):
"""Rewrite an expression
This leverages the ``._{kind}_down`` and ``._{kind}_up``
methods defined on each class
Returns
-------
expr:
output expression
changed:
whether or not any change occurred
"""
if self._name in rewritten:
return rewritten[self._name]
expr = self
down_name = f"_{kind}_down"
up_name = f"_{kind}_up"
while True:
_continue = False
# Rewrite this node
out = getattr(expr, down_name)()
if out is None:
out = expr
if not isinstance(out, Expr):
return out
if out._name != expr._name:
expr = out
continue
# Allow children to rewrite their parents
for child in expr.dependencies():
out = getattr(child, up_name)(expr)
if out is None:
out = expr
if not isinstance(out, Expr):
return out
if out is not expr and out._name != expr._name:
expr = out
_continue = True
break
if _continue:
continue
# Rewrite all of the children
new_operands = []
changed = False
for operand in expr.operands:
if isinstance(operand, Expr):
new = operand.rewrite(kind=kind, rewritten=rewritten)
rewritten[operand._name] = new
if new._name != operand._name:
changed = True
else:
new = operand
new_operands.append(new)
if changed:
expr = type(expr)(*new_operands)
continue
else:
break
return expr
def simplify_once(self, dependents: defaultdict, simplified: dict):
"""Simplify an expression
This leverages the ``._simplify_down`` and ``._simplify_up``
methods defined on each class
Parameters
----------
dependents: defaultdict[list]
The dependents for every node.
simplified: dict
Cache of simplified expressions for these dependents.
Returns
-------
expr:
output expression
"""
# Check if we've already simplified for these dependents
if self._name in simplified:
return simplified[self._name]
expr = self
while True:
out = expr._simplify_down()
if out is None:
out = expr
if not isinstance(out, Expr):
return out
if out._name != expr._name:
expr = out
# Allow children to simplify their parents
for child in expr.dependencies():
out = child._simplify_up(expr, dependents)
if out is None:
out = expr
if not isinstance(out, Expr):
return out
if out is not expr and out._name != expr._name:
expr = out
break
# Rewrite all of the children
new_operands = []
changed = False
for operand in expr.operands:
if isinstance(operand, Expr):
# Bandaid for now, waiting for Singleton
dependents[operand._name].append(weakref.ref(expr))
new = operand.simplify_once(
dependents=dependents, simplified=simplified
)
simplified[operand._name] = new
if new._name != operand._name:
changed = True
else:
new = operand
new_operands.append(new)
if changed:
expr = type(expr)(*new_operands)
break
return expr
def optimize(self, fuse: bool = False) -> Expr:
stage: OptimizerStage = "fused" if fuse else "simplified-physical"
return optimize_until(self, stage)
def fuse(self) -> Expr:
return self
def simplify(self) -> Expr:
expr = self
seen = set()
while True:
dependents = collect_dependents(expr)
new = expr.simplify_once(dependents=dependents, simplified={})
if new._name == expr._name:
break
if new._name in seen:
raise RuntimeError(
f"Optimizer does not converge. {expr!r} simplified to {new!r} which was already seen. "
"Please report this issue on the dask issue tracker with a minimal reproducer."
)
seen.add(new._name)
expr = new
return expr
def _simplify_down(self):
return
def _simplify_up(self, parent, dependents):
return
def lower_once(self, lowered: dict):
# Check for a cached result
try:
return lowered[self._name]
except KeyError:
pass
expr = self
# Lower this node
out = expr._lower()
if out is None:
out = expr
if not isinstance(out, Expr):
return out
# Lower all children
new_operands = []
changed = False
for operand in out.operands:
if isinstance(operand, Expr):
new = operand.lower_once(lowered)
if new._name != operand._name:
changed = True
else:
new = operand
new_operands.append(new)
if changed:
out = type(out)(*new_operands)
# Cache the result and return
return lowered.setdefault(self._name, out)
def lower_completely(self) -> Expr:
"""Lower an expression completely
This calls the ``lower_once`` method in a loop
until nothing changes. This function does not
apply any other optimizations (like ``simplify``).
Returns
-------
expr:
output expression
See Also
--------
Expr.lower_once
Expr._lower
"""
# Lower until nothing changes
expr = self
lowered: dict = {}
while True:
new = expr.lower_once(lowered)
if new._name == expr._name:
break
expr = new
return expr
def _lower(self):
return
@functools.cached_property
def _funcname(self) -> str:
return funcname(type(self)).lower()
@property
def deterministic_token(self):
if not self._determ_token:
# Just tokenize self to fall back on __dask_tokenize__
# Note how this differs to the implementation of __dask_tokenize__
self._determ_token = self.__dask_tokenize__()
return self._determ_token
@functools.cached_property
def _name(self) -> str:
return f"{self._funcname}-{self.deterministic_token}"
@property
def _meta(self):
raise NotImplementedError()
@classmethod
def _annotations_tombstone(cls) -> _AnnotationsTombstone:
return _AnnotationsTombstone()
def __dask_annotations__(self):
return {}
def __dask_graph__(self):
"""Traverse expression tree, collect layers
Subclasses generally do not want to override this method unless custom
logic is required to treat (e.g. ignore) specific operands during graph
generation.
See also
--------
Expr._layer
Expr._task
"""
stack = [self]
seen = set()
layers = []
while stack:
expr = stack.pop()
if expr._name in seen:
continue
seen.add(expr._name)
layers.append(expr._layer())
for operand in expr.dependencies():
stack.append(operand)
return toolz.merge(layers)
@property
def dask(self):
return self.__dask_graph__()
def substitute(self, old, new) -> Expr:
"""Substitute a specific term within the expression
Note that replacing non-`Expr` terms may produce
unexpected results, and is not recommended.
Substituting boolean values is not allowed.
Parameters
----------
old:
Old term to find and replace.
new:
New term to replace instances of `old` with.
Examples
--------
>>> (df + 10).substitute(10, 20) # doctest: +SKIP
df + 20
"""
return self._substitute(old, new, _seen=set())
def _substitute(self, old, new, _seen):
if self._name in _seen:
return self
# Check if we are replacing a literal
if isinstance(old, Expr):
substitute_literal = False
if self._name == old._name:
return new
else:
substitute_literal = True
if isinstance(old, bool):
raise TypeError("Arguments to `substitute` cannot be bool.")
new_exprs = []
update = False
for operand in self.operands:
if isinstance(operand, Expr):
val = operand._substitute(old, new, _seen)
if operand._name != val._name:
update = True
new_exprs.append(val)
elif (
"Fused" in type(self).__name__
and isinstance(operand, list)
and all(isinstance(op, Expr) for op in operand)
):
# Special handling for `Fused`.
# We make no promise to dive through a
# list operand in general, but NEED to
# do so for the `Fused.exprs` operand.
val = []
for op in operand:
val.append(op._substitute(old, new, _seen))
if val[-1]._name != op._name:
update = True
new_exprs.append(val)
elif (
substitute_literal
and not isinstance(operand, bool)
and isinstance(operand, type(old))
and operand == old
):
new_exprs.append(new)
update = True
else:
new_exprs.append(operand)
if update: # Only recreate if something changed
return type(self)(*new_exprs)
else:
_seen.add(self._name)
return self
def substitute_parameters(self, substitutions: dict) -> Expr:
"""Substitute specific `Expr` parameters
Parameters
----------
substitutions:
Mapping of parameter keys to new values. Keys that
are not found in ``self._parameters`` will be ignored.
"""
if not substitutions:
return self
changed = False
new_operands = []
for i, operand in enumerate(self.operands):
if i < len(self._parameters) and self._parameters[i] in substitutions:
new_operands.append(substitutions[self._parameters[i]])
changed = True
else:
new_operands.append(operand)
if changed:
return type(self)(*new_operands)
return self
def _node_label_args(self):
"""Operands to include in the node label by `visualize`"""
return self.dependencies()
def _to_graphviz(
self,
rankdir="BT",
graph_attr=None,
node_attr=None,
edge_attr=None,
**kwargs,
):
from dask.dot import label, name
graphviz = import_required(
"graphviz",
"Drawing dask graphs with the graphviz visualization engine requires the `graphviz` "
"python library and the `graphviz` system library.\n\n"
"Please either conda or pip install as follows:\n\n"
" conda install python-graphviz # either conda install\n"
" python -m pip install graphviz # or pip install and follow installation instructions",
)
graph_attr = graph_attr or {}
node_attr = node_attr or {}
edge_attr = edge_attr or {}
graph_attr["rankdir"] = rankdir
node_attr["shape"] = "box"
node_attr["fontname"] = "helvetica"
graph_attr.update(kwargs)
g = graphviz.Digraph(
graph_attr=graph_attr,
node_attr=node_attr,
edge_attr=edge_attr,
)
stack = [self]
seen = set()
dependencies = {}
while stack:
expr = stack.pop()
if expr._name in seen:
continue
seen.add(expr._name)
dependencies[expr] = set(expr.dependencies())
for dep in expr.dependencies():
stack.append(dep)
cache = {}
for expr in dependencies:
expr_name = name(expr)
attrs = {}
# Make node label
deps = [
funcname(type(dep)) if isinstance(dep, Expr) else str(dep)
for dep in expr._node_label_args()
]
_label = funcname(type(expr))
if deps:
_label = f"{_label}({', '.join(deps)})" if deps else _label
node_label = label(_label, cache=cache)
attrs.setdefault("label", str(node_label))
attrs.setdefault("fontsize", "20")
g.node(expr_name, **attrs)
for expr, deps in dependencies.items():
expr_name = name(expr)
for dep in deps:
dep_name = name(dep)
g.edge(dep_name, expr_name)
return g
def visualize(self, filename="dask-expr.svg", format=None, **kwargs):
"""
Visualize the expression graph.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name of the file to write to disk. If the provided `filename`
doesn't include an extension, '.png' will be used by default.
If `filename` is None, no file will be written, and the graph is
rendered in the Jupyter notebook only.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'svg'.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
"""
from dask.dot import graphviz_to_file
g = self._to_graphviz(**kwargs)
graphviz_to_file(g, filename, format)
return g
def walk(self) -> Generator[Expr]:
"""Iterate through all expressions in the tree
Returns
-------
nodes
Generator of Expr instances in the graph.
Ordering is a depth-first search of the expression tree
"""
stack = [self]
seen = set()
while stack:
node = stack.pop()
if node._name in seen:
continue
seen.add(node._name)
for dep in node.dependencies():
stack.append(dep)
yield node
def find_operations(self, operation: type | tuple[type]) -> Generator[Expr]:
"""Search the expression graph for a specific operation type
Parameters
----------
operation
The operation type to search for.
Returns
-------
nodes
Generator of `operation` instances. Ordering corresponds
to a depth-first search of the expression graph.
"""
assert (
isinstance(operation, tuple)
and all(issubclass(e, Expr) for e in operation)
or issubclass(operation, Expr) # type: ignore[arg-type]
), "`operation` must be`Expr` subclass)"
return (expr for expr in self.walk() if isinstance(expr, operation))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as err:
if key.startswith("_meta"):
# Avoid a recursive loop if/when `self._meta*`
# produces an `AttributeError`
raise RuntimeError(
f"Failed to generate metadata for {self}. "
"This operation may not be supported by the current backend."
)
# Allow operands to be accessed as attributes
# as long as the keys are not already reserved
# by existing methods/properties
_parameters = type(self)._parameters
if key in _parameters:
idx = _parameters.index(key)
return self.operands[idx]
raise AttributeError(
f"{err}\n\n"
"This often means that you are attempting to use an unsupported "
f"API function.."
)
| Expr |
python | getsentry__sentry | src/sentry/models/apikey.py | {
"start": 1156,
"end": 4099
} | class ____(ReplicatedControlModel, HasApiScopes):
__relocation_scope__ = RelocationScope.Global
category = OutboxCategory.API_KEY_UPDATE
replication_version = 3
organization_id = HybridCloudForeignKey("sentry.Organization", on_delete="CASCADE")
label = models.CharField(max_length=64, blank=True, default="Default")
key = models.CharField(max_length=32, unique=True)
status = BoundedPositiveIntegerField(
default=0,
choices=((ApiKeyStatus.ACTIVE, _("Active")), (ApiKeyStatus.INACTIVE, _("Inactive"))),
db_index=True,
)
date_added = models.DateTimeField(default=timezone.now)
allowed_origins = models.TextField(blank=True, null=True)
objects: ClassVar[BaseManager[Self]] = BaseManager(cache_fields=("key",))
class Meta:
app_label = "sentry"
db_table = "sentry_apikey"
__repr__ = sane_repr("organization_id", "key")
def handle_async_replication(self, region_name: str, shard_identifier: int) -> None:
from sentry.auth.services.auth.serial import serialize_api_key
region_replica_service.upsert_replicated_api_key(
api_key=serialize_api_key(self), region_name=region_name
)
def __str__(self) -> str:
return f"api_key_id={self.id}, status={self.status}"
@classmethod
def generate_api_key(cls):
return secrets.token_hex(nbytes=16)
@property
def is_active(self):
return self.status == ApiKeyStatus.ACTIVE
def save(self, *args, **kwargs):
if not self.key:
self.key = ApiKey.generate_api_key()
super().save(*args, **kwargs)
def get_allowed_origins(self) -> list[str]:
if not self.allowed_origins:
return []
return list(filter(bool, self.allowed_origins.split("\n")))
def get_audit_log_data(self):
return {
"label": self.label,
"key": self.key,
"scopes": self.get_scopes(),
"status": self.status,
}
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_string(json, SanitizableField(model_name, "allowed_origins"), lambda _: "")
sanitizer.set_string(json, SanitizableField(model_name, "key"))
sanitizer.set_name(json, SanitizableField(model_name, "label"))
def is_api_key_auth(auth: object) -> TypeGuard[AuthenticatedToken | ApiKey | ApiKeyReplica]:
""":returns True when an API Key is hitting the API."""
from sentry.hybridcloud.models.apikeyreplica import ApiKeyReplica
if isinstance(auth, AuthenticatedToken):
return auth.kind == "api_key"
return isinstance(auth, ApiKey) or isinstance(auth, ApiKeyReplica)
| ApiKey |
python | PyCQA__pylint | pylint/exceptions.py | {
"start": 527,
"end": 872
} | class ____(UnknownMessageError):
"""Raised when a message id or symbol that was deleted from pylint is
encountered.
"""
def __init__(self, msgid_or_symbol: str, removal_explanation: str):
super().__init__(
f"'{msgid_or_symbol}' was removed from pylint, see {removal_explanation}."
)
| DeletedMessageError |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 13207,
"end": 13562
} | class ____(AnsibleAction):
"""
An action runtime failure.
This exception provides a result dictionary via the ContributesToTaskResult mixin.
"""
@property
def result_contribution(self) -> _c.Mapping[str, object]:
return self._result | dict(
failed=True,
msg=self.message,
)
| AnsibleActionFail |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 95486,
"end": 96174
} | class ____(ScaledMMConfigMixin, MTIAConfigHeuristic):
"""Scaled MM template heuristic for MTIA (non-TMA)"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use scaled_mm_configs
self.mm_configs = self.scaled_mm_configs
# NOTE: overriding exhaustive configs here to be the same as mm_configs
# as we haven't validated exhaustive support here yet
# TODO(coconutruben): remove this once we have validated exhaustive support
# for scaled_mm
self.exhaustive_configs = self.scaled_mm_configs
@register_template_heuristic(mm_template.uid, "mtia", op_name="int_mm")
| MTIAScaledMMTemplateConfigHeuristic |
python | getsentry__sentry | src/sentry/tasks/statistical_detectors.py | {
"start": 8610,
"end": 37018
} | class ____(RegressionDetector):
source = "profile"
kind = "function"
regression_type = RegressionType.FUNCTION
min_change = 100_000_000 # 100ms in ns
buffer_period = timedelta(days=1)
resolution_rel_threshold = 0.1
escalation_rel_threshold = 0.75
@classmethod
def min_throughput_threshold(cls) -> int:
return options.get("statistical_detectors.throughput.threshold.functions")
@classmethod
def detector_algorithm_factory(cls) -> DetectorAlgorithm:
return MovingAverageRelativeChangeDetector(
source=cls.source,
kind=cls.kind,
min_data_points=18,
moving_avg_short_factory=lambda: ExponentialMovingAverage(2 / 21),
moving_avg_long_factory=lambda: ExponentialMovingAverage(2 / 41),
threshold=0.15,
)
@classmethod
def detector_store_factory(cls) -> DetectorStore:
return RedisDetectorStore(regression_type=RegressionType.FUNCTION)
@classmethod
def query_payloads(
cls,
projects: list[Project],
start: datetime,
) -> list[DetectorPayload]:
return query_functions(projects, start)
@classmethod
def query_timeseries(
cls,
objects: list[tuple[Project, int | str]],
start: datetime,
function: str,
) -> Iterable[tuple[int, int | str, SnubaTSResult]]:
return query_functions_timeseries(objects, start, function)
@instrumented_task(
name="sentry.tasks.statistical_detectors.detect_transaction_trends",
namespace=performance_tasks,
processing_deadline_duration=30,
)
def detect_transaction_trends(
_org_ids: list[int], project_ids: list[int], start: str, *args, **kwargs
) -> None:
if not options.get("statistical_detectors.enable"):
return
start_time = datetime.fromisoformat(start)
EndpointRegressionDetector.configure_tags()
projects = get_detector_enabled_projects(
project_ids,
project_option=InternalProjectOptions.TRANSACTION_DURATION_REGRESSION,
)
trends = EndpointRegressionDetector.detect_trends(projects, start_time)
trends = EndpointRegressionDetector.get_regression_groups(trends)
trends = EndpointRegressionDetector.redirect_resolutions(trends, start_time)
trends = EndpointRegressionDetector.redirect_escalations(trends, start_time)
trends = EndpointRegressionDetector.limit_regressions_by_project(trends)
delay = 12 # hours
delayed_start = start_time + timedelta(hours=delay)
for regression_chunk in chunked(trends, TRANSACTIONS_PER_BATCH):
detect_transaction_change_points.apply_async(
args=[
[(bundle.payload.project_id, bundle.payload.group) for bundle in regression_chunk],
delayed_start.isoformat(),
],
# delay the check by delay hours because we want to make sure there
# will be enough data after the potential change point to be confident
# that a change has occurred
countdown=delay * 60 * 60,
)
@instrumented_task(
name="sentry.tasks.statistical_detectors.detect_transaction_change_points",
namespace=performance_tasks,
)
def detect_transaction_change_points(
transactions: list[tuple[int, str | int]], start: str, *args, **kwargs
) -> None:
start_time = datetime.fromisoformat(start)
_detect_transaction_change_points(transactions, start_time, *args, **kwargs)
def _detect_transaction_change_points(
transactions: list[tuple[int, str | int]], start: datetime, *args, **kwargs
) -> None:
if not options.get("statistical_detectors.enable"):
return
EndpointRegressionDetector.configure_tags()
projects_by_id = {
project.id: project
for project in get_detector_enabled_projects(
[project_id for project_id, _ in transactions],
)
}
transaction_pairs: list[tuple[Project, int | str]] = [
(projects_by_id[item[0]], item[1]) for item in transactions if item[0] in projects_by_id
]
regressions = EndpointRegressionDetector.detect_regressions(
transaction_pairs, start, "p95(transaction.duration)", TIMESERIES_PER_BATCH
)
regressions = EndpointRegressionDetector.save_regressions_with_versions(regressions)
breakpoint_count = 0
for regression in regressions:
breakpoint_count += 1
send_regression_to_platform(regression)
metrics.incr(
"statistical_detectors.breakpoint.emitted",
amount=breakpoint_count,
tags={"source": "transaction", "kind": "endpoint"},
sample_rate=1.0,
)
@instrumented_task(
name="sentry.tasks.statistical_detectors.detect_function_trends",
namespace=profiling_tasks,
processing_deadline_duration=30,
)
def detect_function_trends(project_ids: list[int], start: str, *args, **kwargs) -> None:
if not options.get("statistical_detectors.enable"):
return
start_time = datetime.fromisoformat(start)
FunctionRegressionDetector.configure_tags()
projects = get_detector_enabled_projects(
project_ids,
project_option=InternalProjectOptions.FUNCTION_DURATION_REGRESSION,
)
trends = FunctionRegressionDetector.detect_trends(projects, start_time)
trends = FunctionRegressionDetector.get_regression_groups(trends)
trends = FunctionRegressionDetector.redirect_resolutions(trends, start_time)
trends = FunctionRegressionDetector.redirect_escalations(trends, start_time)
trends = FunctionRegressionDetector.limit_regressions_by_project(trends)
delay = 12 # hours
delayed_start = start_time + timedelta(hours=delay)
for regression_chunk in chunked(trends, FUNCTIONS_PER_BATCH):
detect_function_change_points.apply_async(
args=[
[(bundle.payload.project_id, bundle.payload.group) for bundle in regression_chunk],
delayed_start.isoformat(),
],
# delay the check by delay hours because we want to make sure there
# will be enough data after the potential change point to be confident
# that a change has occurred
countdown=delay * 60 * 60,
)
@instrumented_task(
name="sentry.tasks.statistical_detectors.detect_function_change_points",
namespace=profiling_tasks,
)
def detect_function_change_points(
functions_list: list[tuple[int, int]], start: str, *args, **kwargs
) -> None:
start_time = datetime.fromisoformat(start)
_detect_function_change_points(functions_list, start_time, *args, **kwargs)
def _detect_function_change_points(
functions_list: list[tuple[int, int]], start: datetime, *args, **kwargs
) -> None:
if not options.get("statistical_detectors.enable"):
return
FunctionRegressionDetector.configure_tags()
projects_by_id = {
project.id: project
for project in get_detector_enabled_projects(
[project_id for project_id, _ in functions_list],
)
}
function_pairs: list[tuple[Project, int | str]] = [
(projects_by_id[item[0]], item[1]) for item in functions_list if item[0] in projects_by_id
]
regressions = FunctionRegressionDetector.detect_regressions(
function_pairs, start, "p95()", TIMESERIES_PER_BATCH
)
regressions = FunctionRegressionDetector.save_regressions_with_versions(regressions)
breakpoint_count = 0
emitted_count = 0
for regression_chunk in chunked(regressions, 100):
breakpoint_count += len(regression_chunk)
emitted_count += emit_function_regression_issue(projects_by_id, regression_chunk, start)
metrics.incr(
"statistical_detectors.breakpoint.detected",
amount=breakpoint_count,
tags={"source": "profile", "kind": "function"},
sample_rate=1.0,
)
metrics.incr(
"statistical_detectors.breakpoint.emitted",
amount=emitted_count,
tags={"source": "profile", "kind": "function"},
sample_rate=1.0,
)
def emit_function_regression_issue(
projects_by_id: dict[int, Project],
regressions: list[BreakpointData],
start: datetime,
) -> int:
start = start - timedelta(hours=1)
start = start.replace(minute=0, second=0, microsecond=0)
project_ids = [int(regression["project"]) for regression in regressions]
projects = [projects_by_id[project_id] for project_id in project_ids]
params = SnubaParams(
start=start,
end=start + timedelta(minutes=1),
projects=projects,
)
conditions = [
And(
[
Condition(Column("project_id"), Op.EQ, int(regression["project"])),
Condition(Column("fingerprint"), Op.EQ, int(regression["transaction"])),
]
)
for regression in regressions
]
result = functions.query(
selected_columns=["project.id", "fingerprint", "all_examples()"],
query="is_application:1",
snuba_params=params,
orderby=["project.id"],
limit=len(regressions),
referrer=Referrer.API_PROFILING_FUNCTIONS_STATISTICAL_DETECTOR_EXAMPLE.value,
auto_aggregations=True,
use_aggregate_conditions=True,
transform_alias_to_input_format=True,
conditions=conditions if len(conditions) <= 1 else [Or(conditions)],
)
transaction_examples = {}
raw_continuous_examples = {}
for row in result["data"]:
# Split this into 2 loops here to bias towards transaction based profiles
key = (row["project.id"], row["fingerprint"])
for example in row["all_examples()"]:
if "profile_id" in example:
transaction_examples[key] = example
if key in transaction_examples:
continue
for example in row["all_examples()"]:
if "profiler_id" in example:
raw_continuous_examples[key] = example
continuous_examples = fetch_continuous_examples(raw_continuous_examples)
payloads = []
for regression in regressions:
project_id = int(regression["project"])
fingerprint = int(regression["transaction"])
project = projects_by_id.get(project_id)
if project is None:
continue
key = (project_id, fingerprint)
example = transaction_examples.get(key) or continuous_examples.get(key)
if example is None:
continue
payloads.append(
{
"organization_id": project.organization_id,
"project_id": project_id,
"example": example,
"fingerprint": fingerprint,
"absolute_percentage_change": regression["absolute_percentage_change"],
"aggregate_range_1": regression["aggregate_range_1"],
"aggregate_range_2": regression["aggregate_range_2"],
"breakpoint": int(regression["breakpoint"]),
"trend_difference": regression["trend_difference"],
"trend_percentage": regression["trend_percentage"],
"unweighted_p_value": regression["unweighted_p_value"],
"unweighted_t_value": regression["unweighted_t_value"],
}
)
if not payloads:
return 0
response = get_from_profiling_service(method="POST", path="/regressed", json_data=payloads)
if response.status != 200:
return 0
data = json.loads(response.data)
return data.get("occurrences")
def fetch_continuous_examples(raw_examples):
if not raw_examples:
return raw_examples
project_condition = Condition(
Column("project_id"),
Op.IN,
list({project_id for project_id, _ in raw_examples.keys()}),
)
conditions = [project_condition]
example_conditions: list[BooleanCondition | Condition] = []
for (project_id, _), example in raw_examples.items():
example_conditions.append(
And(
[
Condition(Column("project_id"), Op.EQ, project_id),
Condition(Column("profiler_id"), Op.EQ, example["profiler_id"]),
Condition(
Column("start_timestamp"), Op.LTE, resolve_datetime64(example["end"])
),
Condition(
Column("end_timestamp"), Op.GTE, resolve_datetime64(example["start"])
),
]
)
)
if len(example_conditions) >= 2:
conditions.append(Or(example_conditions))
else:
conditions.extend(example_conditions)
query = Query(
match=Storage(StorageKey.ProfileChunks.value),
select=[
Column("project_id"),
Column("profiler_id"),
Column("chunk_id"),
Column("start_timestamp"),
Column("end_timestamp"),
],
where=conditions,
limit=Limit(len(raw_examples)),
)
request = Request(
dataset=Dataset.Profiles.value,
app_id="default",
query=query,
tenant_ids={
"referrer": Referrer.API_PROFILING_FUNCTIONS_STATISTICAL_DETECTOR_CHUNKS.value,
"cross_org_query": 1,
},
)
data = raw_snql_query(
request,
referrer=Referrer.API_PROFILING_FUNCTIONS_STATISTICAL_DETECTOR_CHUNKS.value,
)["data"]
for row in data:
row["start"] = (
datetime.fromisoformat(row["start_timestamp"]).replace(tzinfo=UTC).timestamp()
)
row["end"] = datetime.fromisoformat(row["end_timestamp"]).replace(tzinfo=UTC).timestamp()
examples = {}
for key, example in raw_examples.items():
for row in data:
if example["profiler_id"] != row["profiler_id"]:
continue
if example["start"] > row["end"]:
continue
if example["end"] < row["start"]:
continue
examples[key] = {
"profiler_id": row["profiler_id"],
"chunk_id": row["chunk_id"],
"thread_id": example["thread_id"],
"start": row["start"],
"end": row["end"],
}
return examples
BACKEND_TRANSACTION_OPS = [
# Common
"function.aws",
"function.aws.lambda",
"http.server",
"serverless.function",
# Python
"asgi.server",
# Ruby
"rails.request",
]
def query_transactions(
projects: list[Project],
start: datetime,
transactions_per_project: int = TRANSACTIONS_PER_PROJECT,
) -> list[DetectorPayload]:
start = start - timedelta(hours=1)
start = start.replace(minute=0, second=0, microsecond=0)
end = start + timedelta(hours=1)
org_ids = list({p.organization_id for p in projects})
project_ids = list({p.id for p in projects})
use_case_id = UseCaseID.TRANSACTIONS
# both the metric and tag that we are using are hardcoded values in sentry_metrics.indexer.strings
# so the org_id that we are using does not actually matter here, we only need to pass in an org_id
#
# Because we filter on more than just `transaction`, we have to use DURATION here instead of
# DURATION_LIGHT.
duration_metric_id = indexer.resolve(
use_case_id, org_ids[0], str(TransactionMRI.DURATION.value)
)
transaction_name_metric_id = indexer.resolve(
use_case_id,
org_ids[0],
"transaction",
)
transaction_op_metric_id = indexer.resolve(
use_case_id,
org_ids[0],
"transaction.op",
)
# if our time range is more than an hour, use the hourly granularity
granularity = 3600 if int(end.timestamp()) - int(start.timestamp()) >= 3600 else 60
# This query returns the top `transactions_per_project` transaction names by count in the specified
# [start, end) time period along with the p95 of each transaction in that time period
# this is written in raw SnQL because the metrics layer does not support the limitby clause which is necessary for this operation to work
query = Query(
match=Entity(EntityKey.GenericMetricsDistributions.value),
select=[
Column("project_id"),
Function(
"arrayElement",
(
CurriedFunction(
"quantilesIf",
[0.95],
(
Column("value"),
Function("equals", (Column("metric_id"), duration_metric_id)),
),
),
1,
),
"p95",
),
Function(
"countIf",
(Column("value"), Function("equals", (Column("metric_id"), duration_metric_id))),
"count",
),
Function(
"transform",
(
Column(f"tags_raw[{transaction_name_metric_id}]"),
Function("array", ("",)),
Function("array", ("<< unparameterized >>",)),
),
"transaction_name",
),
],
groupby=[
Column("project_id"),
Column("transaction_name"),
],
where=[
Condition(Column("org_id"), Op.IN, list(org_ids)),
Condition(Column("project_id"), Op.IN, project_ids),
Condition(Column("timestamp"), Op.GTE, start),
Condition(Column("timestamp"), Op.LT, end),
Condition(Column("metric_id"), Op.EQ, duration_metric_id),
Condition(
Column(f"tags_raw[{transaction_op_metric_id}]"),
Op.IN,
list(BACKEND_TRANSACTION_OPS),
),
],
limitby=LimitBy([Column("project_id")], transactions_per_project),
orderby=[
OrderBy(Column("project_id"), Direction.DESC),
OrderBy(Column("count"), Direction.DESC),
],
granularity=Granularity(granularity),
limit=Limit(len(project_ids) * transactions_per_project),
)
request = Request(
dataset=Dataset.PerformanceMetrics.value,
app_id="statistical_detectors",
query=query,
tenant_ids={
"referrer": Referrer.STATISTICAL_DETECTORS_FETCH_TOP_TRANSACTION_NAMES.value,
"cross_org_query": 1,
"use_case_id": use_case_id.value,
},
)
data = raw_snql_query(
request, referrer=Referrer.STATISTICAL_DETECTORS_FETCH_TOP_TRANSACTION_NAMES.value
)["data"]
return [
DetectorPayload(
project_id=row["project_id"],
group=row["transaction_name"],
fingerprint=fingerprint_regression(row["transaction_name"]),
count=row["count"],
value=row["p95"],
timestamp=start,
)
for row in data
]
def query_transactions_timeseries(
transactions: list[tuple[Project, int | str]],
start: datetime,
agg_function: str,
) -> Generator[tuple[int, int | str, SnubaTSResult]]:
end = start.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
days_to_query = options.get("statistical_detectors.query.transactions.timeseries_days")
start = end - timedelta(days=days_to_query)
use_case_id = UseCaseID.TRANSACTIONS
interval = 3600 # 1 hour
project_objects = {p for p, _ in transactions}
project_ids = [project.id for project in project_objects]
org_ids = list({project.organization_id for project in project_objects})
# The only tag available on DURATION_LIGHT is `transaction`: as long as
# we don't filter on any other tags, DURATION_LIGHT's lower cardinality
# will be faster to query.
duration_metric_id = indexer.resolve(
use_case_id, org_ids[0], str(TransactionMRI.DURATION_LIGHT.value)
)
transaction_name_metric_id = indexer.resolve(
use_case_id,
org_ids[0],
"transaction",
)
transactions_condition = None
if len(transactions) == 1:
project, transaction_name = transactions[0]
transactions_condition = BooleanCondition(
BooleanOp.AND,
[
Condition(Column("project_id"), Op.EQ, project.id),
Condition(Column("transaction"), Op.EQ, transaction_name),
],
)
else:
transactions_condition = BooleanCondition(
BooleanOp.OR,
[
BooleanCondition(
BooleanOp.AND,
[
Condition(Column("project_id"), Op.EQ, project.id),
Condition(Column("transaction"), Op.EQ, transaction_name),
],
)
for project, transaction_name in transactions
],
)
query = Query(
match=Entity(EntityKey.GenericMetricsDistributions.value),
select=[
Column("project_id"),
Function(
"arrayElement",
(
CurriedFunction(
"quantilesIf",
[0.95],
(
Column("value"),
Function("equals", (Column("metric_id"), duration_metric_id)),
),
),
1,
),
"p95_transaction_duration",
),
Function(
"transform",
(
Column(f"tags_raw[{transaction_name_metric_id}]"),
Function("array", ("",)),
Function("array", ("<< unparameterized >>",)),
),
"transaction",
),
],
groupby=[
Column("transaction"),
Column("project_id"),
Function(
"toStartOfInterval",
(Column("timestamp"), Function("toIntervalSecond", (3600,)), "Universal"),
"time",
),
],
where=[
Condition(Column("org_id"), Op.IN, list(org_ids)),
Condition(Column("project_id"), Op.IN, list(project_ids)),
Condition(Column("timestamp"), Op.GTE, start),
Condition(Column("timestamp"), Op.LT, end),
Condition(Column("metric_id"), Op.EQ, duration_metric_id),
transactions_condition,
],
orderby=[
OrderBy(Column("project_id"), Direction.ASC),
OrderBy(Column("transaction"), Direction.ASC),
OrderBy(
Function(
"toStartOfInterval",
(Column("timestamp"), Function("toIntervalSecond", (3600,)), "Universal"),
"time",
),
Direction.ASC,
),
],
granularity=Granularity(interval),
limit=Limit(10000),
)
request = Request(
dataset=Dataset.PerformanceMetrics.value,
app_id="statistical_detectors",
query=query,
tenant_ids={
"referrer": Referrer.STATISTICAL_DETECTORS_FETCH_TRANSACTION_TIMESERIES.value,
"cross_org_query": 1,
"use_case_id": use_case_id.value,
},
)
data = raw_snql_query(
request, referrer=Referrer.STATISTICAL_DETECTORS_FETCH_TRANSACTION_TIMESERIES.value
)["data"]
results = {}
for index, datapoint in enumerate(data or []):
key = (datapoint["project_id"], datapoint["transaction"])
if key not in results:
results[key] = {
"data": [datapoint],
}
else:
data = results[key]["data"]
data.append(datapoint)
for key, item in results.items():
project_id, transaction_name = key
formatted_result = SnubaTSResult(
{
"data": zerofill(
item["data"],
start,
end,
interval,
["time"],
),
"project": project_id,
},
start,
end,
interval,
)
yield project_id, transaction_name, formatted_result
def query_functions(projects: list[Project], start: datetime) -> list[DetectorPayload]:
# The functions dataset only supports 1 hour granularity.
# So we always look back at the last full hour that just elapsed.
# And since the timestamps are truncated to the start of the hour
# we just need to query for the 1 minute of data.
start = start - timedelta(hours=1)
start = start.replace(minute=0, second=0, microsecond=0)
params = SnubaParams(
start=start,
end=start + timedelta(minutes=1),
projects=projects,
)
# TODOs: handle any errors
query_results = functions.query(
selected_columns=[
"project.id",
"timestamp",
"fingerprint",
"count()",
"p95()",
],
query="is_application:1",
snuba_params=params,
orderby=["project.id", "-count()"],
limitby=("project.id", FUNCTIONS_PER_PROJECT),
limit=FUNCTIONS_PER_PROJECT * len(projects),
referrer=Referrer.API_PROFILING_FUNCTIONS_STATISTICAL_DETECTOR.value,
auto_aggregations=True,
use_aggregate_conditions=True,
transform_alias_to_input_format=True,
)
return [
DetectorPayload(
project_id=row["project.id"],
group=row["fingerprint"],
fingerprint=f"{row['fingerprint']:x}",
count=row["count()"],
value=row["p95()"],
timestamp=datetime.fromisoformat(row["timestamp"]),
)
for row in query_results["data"]
]
def query_functions_timeseries(
functions_list: list[tuple[Project, int | str]],
start: datetime,
agg_function: str,
) -> Generator[tuple[int, int | str, SnubaTSResult]]:
projects = [project for project, _ in functions_list]
days_to_query = options.get("statistical_detectors.query.functions.timeseries_days")
end = start.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
params = SnubaParams(
start=end - timedelta(days=days_to_query),
end=end,
projects=projects,
)
interval = 3600 # 1 hour
chunk: list[dict[str, Any]] = [
{
"project.id": project.id,
"fingerprint": fingerprint,
}
for project, fingerprint in functions_list
]
results = functions.top_events_timeseries(
timeseries_columns=[agg_function],
selected_columns=["project.id", "fingerprint"],
user_query="is_application:1",
snuba_params=params,
orderby=None, # unused because top events is specified
rollup=interval,
limit=len(chunk),
organization=None, # unused
referrer=Referrer.API_PROFILING_FUNCTIONS_STATISTICAL_DETECTOR_STATS.value,
top_events={"data": chunk},
result_key_order=["project.id", "fingerprint"],
)
for project, fingerprint in functions_list:
key = f"{project.id},{fingerprint}"
if key not in results:
logger.warning(
"Missing timeseries for project: {} function: {}",
project.id,
fingerprint,
)
continue
yield project.id, fingerprint, results[key]
def get_detector_enabled_projects(
project_ids: list[int],
feature_name: str | None = None,
project_option: InternalProjectOptions | None = None,
) -> list[Project]:
projects_qs = Project.objects.filter(id__in=project_ids)
if feature_name is None:
projects = list(projects_qs)
else:
projects = [
project
for project in projects_qs.select_related("organization")
if features.has(feature_name, project.organization)
]
if project_option is not None:
settings = get_performance_issue_settings(projects)
projects = [project for project in projects if settings[project][project_option.value]]
return projects
| FunctionRegressionDetector |
python | doocs__leetcode | solution/1300-1399/1307.Verbal Arithmetic Puzzle/Solution.py | {
"start": 0,
"end": 3272
} | class ____:
def isAnyMapping(
self, words, row, col, bal, letToDig, digToLet, totalRows, totalCols
):
# If traversed all columns.
if col == totalCols:
return bal == 0
# At the end of a particular column.
if row == totalRows:
return bal % 10 == 0 and self.isAnyMapping(
words, 0, col + 1, bal // 10, letToDig, digToLet, totalRows, totalCols
)
w = words[row]
# If the current string 'w' has no character in the ('col')th index.
if col >= len(w):
return self.isAnyMapping(
words, row + 1, col, bal, letToDig, digToLet, totalRows, totalCols
)
# Take the current character in the variable letter.
letter = w[len(w) - 1 - col]
# Create a variable 'sign' to check whether we have to add it or subtract it.
if row < totalRows - 1:
sign = 1
else:
sign = -1
# If we have a prior valid mapping, then use that mapping.
# The second condition is for the leading zeros.
if letter in letToDig and (
letToDig[letter] != 0
or (letToDig[letter] == 0 and len(w) == 1)
or col != len(w) - 1
):
return self.isAnyMapping(
words,
row + 1,
col,
bal + sign * letToDig[letter],
letToDig,
digToLet,
totalRows,
totalCols,
)
# Choose a new mapping.
else:
for i in range(10):
# If 'i'th mapping is valid then select it.
if digToLet[i] == "-" and (
i != 0 or (i == 0 and len(w) == 1) or col != len(w) - 1
):
digToLet[i] = letter
letToDig[letter] = i
# Call the function again with the new mapping.
if self.isAnyMapping(
words,
row + 1,
col,
bal + sign * letToDig[letter],
letToDig,
digToLet,
totalRows,
totalCols,
):
return True
# Unselect the mapping.
digToLet[i] = "-"
if letter in letToDig:
del letToDig[letter]
# If nothing is correct then just return false.
return False
def isSolvable(self, words, result):
# Add the string 'result' in the list 'words'.
words.append(result)
# Initialize 'totalRows' with the size of the list.
totalRows = len(words)
# Find the longest string in the list and set 'totalCols' with the size of that string.
totalCols = max(len(word) for word in words)
# Create a HashMap for the letter to digit mapping.
letToDig = {}
# Create a list for the digit to letter mapping.
digToLet = ["-"] * 10
return self.isAnyMapping(
words, 0, 0, 0, letToDig, digToLet, totalRows, totalCols
)
| Solution |
python | walkccc__LeetCode | solutions/179. Largest Number/179.py | {
"start": 0,
"end": 90
} | class ____(str):
def __lt__(x: str, y: str) -> bool:
return x + y > y + x
| LargerStrKey |
python | google__pytype | pytype/datatypes_test.py | {
"start": 1086,
"end": 1661
} | class ____(unittest.TestCase):
def test_merge(self):
uf = datatypes.UnionFind()
uf.merge("k1", "k2")
self.assertEqual(uf.find_by_name("k1"), uf.find_by_name("k2"))
self.assertNotEqual(uf.find_by_name("k1"), uf.find_by_name("k3"))
def test_merge_from(self):
uf1 = datatypes.UnionFind()
uf1.merge("k1", "k2")
uf2 = datatypes.UnionFind()
uf2.merge("k2", "k3")
uf1.merge_from(uf2)
for k_i in ["k1", "k2", "k3"]:
for k_j in ["k1", "k2", "k3"]:
self.assertEqual(uf1.find_by_name(k_i), uf1.find_by_name(k_j))
| UnionFindTest |
python | getsentry__sentry | src/sentry/testutils/helpers/alert_rule.py | {
"start": 225,
"end": 869
} | class ____:
_suspended_values: _FactoryRegistry
@classmethod
def suspend(cls) -> "TemporaryAlertRuleTriggerActionRegistry":
obj = cls(AlertRuleTriggerAction._factory_registrations)
AlertRuleTriggerAction._factory_registrations = _FactoryRegistry()
return obj
def restore(self) -> None:
AlertRuleTriggerAction._factory_registrations = self._suspended_values
@classmethod
@contextmanager
def registry_patched(cls) -> Generator[None]:
suspended = cls.suspend()
try:
yield
finally:
suspended.restore()
| TemporaryAlertRuleTriggerActionRegistry |
python | modin-project__modin | modin/config/envvars.py | {
"start": 33875,
"end": 35312
} | class ____(EnvironmentVariable, type=int):
"""
Minimum number of rows/columns in a single pandas partition split.
Once a partition for a pandas dataframe has more than this many elements,
Modin adds another partition.
"""
varname = "MODIN_MIN_PARTITION_SIZE"
default = 32
@classmethod
def put(cls, value: int) -> None:
"""
Set ``MinPartitionSize`` with extra checks.
Parameters
----------
value : int
Config value to set.
"""
if value <= 0:
raise ValueError(f"Min partition size should be > 0, passed value {value}")
super().put(value)
@classmethod
def get(cls) -> int:
"""
Get ``MinPartitionSize`` with extra checks.
Returns
-------
int
"""
from modin.error_message import ErrorMessage
ErrorMessage.single_warning(
"`MinPartitionSize` is deprecated and will be removed in a future version. "
+ "This config has no longer effect, "
+ "use `MinRowPartitionSize` and `MinColumnPartitionSize` instead.",
FutureWarning,
)
min_partition_size = super().get()
if min_partition_size <= 0:
raise ValueError(
f"`MinPartitionSize` should be > 0; current value: {min_partition_size}"
)
return min_partition_size
| MinPartitionSize |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/_redaction.py | {
"start": 455,
"end": 602
} | class ____(TypedDict):
"""Represents an individual match of sensitive data."""
type: str
value: str
start: int
end: int
| PIIMatch |
python | pypa__pip | src/pip/_vendor/urllib3/contrib/pyopenssl.py | {
"start": 9154,
"end": 13846
} | class ____(object):
"""API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
"""
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
return b""
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b""
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout("The read operation timed out")
else:
return self.recv(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if not util.wait_for_read(self.socket, self.socket.gettimeout()):
raise timeout("The read operation timed out")
else:
return self.recv_into(*args, **kwargs)
# TLS 1.3 post-handshake authentication
except OpenSSL.SSL.Error as e:
raise ssl.SSLError("read error: %r" % e)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if not util.wait_for_write(self.socket, self.socket.gettimeout()):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(
data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
)
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
return {
"subject": ((("commonName", x509.get_subject().CN),),),
"subjectAltName": get_subj_alt_name(x509),
}
def version(self):
return self.connection.get_protocol_version_name()
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
| WrappedSocket |
python | sphinx-doc__sphinx | sphinx/search/nl.py | {
"start": 189,
"end": 582
} | class ____(SearchLanguage):
lang = 'nl'
language_name = 'Dutch'
js_stemmer_rawcode = 'dutch-stemmer.js'
stopwords = DUTCH_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('dutch')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
| SearchDutch |
python | ansible__ansible | lib/ansible/plugins/test/uri.py | {
"start": 746,
"end": 965
} | class ____(object):
""" Ansible URI jinja2 test """
def tests(self):
return {
# file testing
'uri': is_uri,
'url': is_url,
'urn': is_urn,
}
| TestModule |
python | scipy__scipy | scipy/sparse/tests/test_extract.py | {
"start": 200,
"end": 1685
} | class ____:
def setup_method(self):
self.cases = [
csr_array([[1,2]]),
csr_array([[1,0]]),
csr_array([[0,0]]),
csr_array([[1],[2]]),
csr_array([[1],[0]]),
csr_array([[0],[0]]),
csr_array([[1,2],[3,4]]),
csr_array([[0,1],[0,0]]),
csr_array([[0,0],[1,0]]),
csr_array([[0,0],[0,0]]),
csr_array([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]),
csr_array([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T,
]
def test_find(self):
for A in self.cases:
I,J,V = _extract.find(A)
B = csr_array((V,(I,J)), shape=A.shape)
assert_equal(A.toarray(), B.toarray())
def test_tril(self):
for A in self.cases:
B = A.toarray()
for k in [-3,-2,-1,0,1,2,3]:
assert_equal(_extract.tril(A,k=k).toarray(), np.tril(B,k=k))
def test_triu(self):
for A in self.cases:
B = A.toarray()
for k in [-3,-2,-1,0,1,2,3]:
assert_equal(_extract.triu(A,k=k).toarray(), np.triu(B,k=k))
def test_array_vs_matrix(self):
for A in self.cases:
assert isinstance(_extract.tril(A), sparray)
assert isinstance(_extract.triu(A), sparray)
M = csr_matrix(A)
assert not isinstance(_extract.tril(M), sparray)
assert not isinstance(_extract.triu(M), sparray)
| TestExtract |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1403305,
"end": 1406351
} | class ____(VegaLiteSchema):
"""
TimeUnitParams schema wrapper.
Time Unit Params for encoding predicate, which can specified if the data is already
"binned".
Parameters
----------
binned : bool
Whether the data has already been binned to this time unit. If true, Vega-Lite will
only format the data, marks, and guides, without applying the timeUnit transform to
re-bin the data again.
maxbins : float
If no ``unit`` is specified, maxbins is used to infer time units.
step : float
The number of steps between bins, in terms of the least significant unit provided.
unit : :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`SingleTimeUnit`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Defines how date-time values should be binned.
utc : bool
True to use UTC timezone. Equivalent to using a ``utc`` prefixed ``TimeUnit``.
"""
_schema = {"$ref": "#/definitions/TimeUnitParams"}
def __init__(
self,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
**kwds,
):
super().__init__(
binned=binned, maxbins=maxbins, step=step, unit=unit, utc=utc, **kwds
)
| TimeUnitParams |
python | getsentry__sentry | tests/sentry/snuba/test_entity_subscriptions.py | {
"start": 23825,
"end": 28339
} | class ____(TestCase):
def test(self) -> None:
cases = [
(EntityKey.Events, SnubaQuery.Type.ERROR, Dataset.Events, "count()", "", True, True),
(
EntityKey.Transactions,
SnubaQuery.Type.PERFORMANCE,
Dataset.Transactions,
"count()",
"",
True,
True,
),
(
EntityKey.GenericMetricsDistributions,
SnubaQuery.Type.PERFORMANCE,
Dataset.Metrics,
"count()",
"",
True,
True,
),
(
EntityKey.GenericMetricsSets,
SnubaQuery.Type.PERFORMANCE,
Dataset.Metrics,
"count_unique(user)",
"",
True,
True,
),
(
EntityKey.GenericMetricsDistributions,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"count()",
"",
True,
True,
),
(
EntityKey.GenericMetricsSets,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"count_unique(user)",
"",
True,
True,
),
(
EntityKey.GenericMetricsCounters,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"sum(c:transactions/sentry.process_profile.track_outcome@second)",
"",
# Custom metrics are not supported when the metrics layer integration with mqb is disabled.
False,
True,
),
(
EntityKey.GenericMetricsDistributions,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"max(d:transactions/sentry.process_profile.track_outcome@second)",
"",
# Custom metrics are not supported when the metrics layer integration with mqb is disabled.
False,
True,
),
(
EntityKey.GenericMetricsSets,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"count_unique(s:transactions/sentry.process_profile.track_outcome@second)",
"",
# Custom metrics are not supported when the metrics layer integration with mqb is disabled.
False,
True,
),
(
EntityKey.GenericMetricsGauges,
SnubaQuery.Type.PERFORMANCE,
Dataset.PerformanceMetrics,
"last(g:transactions/sentry.process_profile.track_outcome@second)",
"",
# Custom metrics are not supported when the metrics layer integration with mqb is disabled.
False,
True,
),
(
EntityKey.MetricsCounters,
SnubaQuery.Type.CRASH_RATE,
Dataset.Metrics,
"percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate",
"",
True,
True,
),
(
EntityKey.MetricsSets,
SnubaQuery.Type.CRASH_RATE,
Dataset.Metrics,
"percentage(users_crashed, users) AS _crash_rate_alert_aggregate",
"",
True,
True,
),
]
for (
expected_entity_key,
query_type,
dataset,
aggregate,
query,
supported_with_no_metrics_layer,
supported_with_metrics_layer,
) in cases:
snuba_query = SnubaQuery.objects.create(
time_window=60,
type=query_type.value,
dataset=dataset.value,
aggregate=aggregate,
query=query,
resolution=5,
)
if supported_with_no_metrics_layer:
assert expected_entity_key == get_entity_key_from_snuba_query(
snuba_query, self.organization.id, self.project.id
)
| GetEntityKeyFromSnubaQueryTest |
python | apache__airflow | providers/common/compat/src/airflow/providers/common/compat/lineage/entities.py | {
"start": 967,
"end": 1181
} | class ____:
"""File entity. Refers to a file."""
template_fields: ClassVar[tuple[str, ...]] = ("url",)
url: str = attr.ib()
type_hint: str | None = None
@attr.s(auto_attribs=True, kw_only=True)
| File |
python | gevent__gevent | src/gevent/thread.py | {
"start": 3954,
"end": 7612
} | class ____:
# The constructor must accept and ignore all arguments
# to match the stdlib.
def __init__(self, *_args, **_kwargs):
"""Does nothing; ignores args"""
# Must keep a weak reference to the greenlet
# to avoid problems managing the _active list of
# threads, which can sometimes rely on garbage collection.
# Also, this breaks a cycle.
_greenlet_ref = None
# We also need to keep track of whether we were ever
# actually bound to a greenlet so that our
# behaviour in 'join' is correct.
_had_greenlet = False
def _set_greenlet(self, glet):
from weakref import ref
assert glet is not None
self._greenlet_ref = ref(glet)
self._had_greenlet = True
def _get_greenlet(self):
return (
self._greenlet_ref()
if self._greenlet_ref is not None
else None
)
def join(self, timeout=-1):
# TODO: This is what we patch Thread.join to do on all versions,
# so there's another implementation in gevent.monkey._patch_thread_common.
# UNIFY THEM.
# Python 3.14 makes timeout optional, defaulting to -1;
# we need that to be None
timeout = None if timeout == -1 else timeout
if not self._had_greenlet:
raise RuntimeError('thread not started')
glet = self._get_greenlet()
if glet is not None:
if glet is getcurrent():
raise RuntimeError('Cannot join current thread')
if hasattr(glet, 'join'):
return glet.join(timeout)
# working with a raw greenlet. That
# means it's probably the MainThread, because the main
# greenlet is always raw. But it could also be a dummy
from time import time
end = None
if timeout:
end = time() + timeout
while not self.is_done():
if end is not None and time() > end:
return
sleep(0.001)
return None
@property
def ident(self):
glet = self._get_greenlet()
if glet is not None:
return get_ident(glet)
return None
def is_done(self):
glet = self._get_greenlet()
if glet is None:
return True
return glet.dead
def _set_done(self, enter_hub=True):
"""
Mark the thread as complete.
This releases our reference (if any) to our greenlet.
By default, this will bounce back to the hub so that waiters
in ``join`` can get notified. Set *enter_hub* to false not to
do this. This private API is tightly coupled to our ``threading``
implementation.
"""
if not self._had_greenlet:
raise RuntimeError('thread not started')
self._greenlet_ref = None
# Let the loop go around so that anyone waiting in
# join() gets to know about it. This is particularly
# important during threading/interpreter shutdown.
if enter_hub:
sleep(0.001)
def __repr__(self):
return '<%s.%s at 0x%x greenlet=%r>' % (
self.__class__.__module__,
self.__class__.__name__,
id(self),
self._get_greenlet()
)
def _make_thread_handle(*_args):
"""
Called on 3.13 after forking in the child.
Takes ``(module, ident)``, returns a handle object
with that ident.
"""
# The argument _should_ be a thread identifier int
handle = _ThreadHandle()
handle._set_greenlet(getcurrent())
return handle
| _ThreadHandle |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/rpc_communicator.py | {
"start": 706,
"end": 1098
} | class ____(UnityToExternalProtoServicer):
def __init__(self):
self.parent_conn, self.child_conn = Pipe()
def Initialize(self, request, context):
self.child_conn.send(request)
return self.child_conn.recv()
def Exchange(self, request, context):
self.child_conn.send(request)
return self.child_conn.recv()
| UnityToExternalServicerImplementation |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/workspace/load_target.py | {
"start": 598,
"end": 803
} | class ____(ABC):
@abstractmethod
def create_origins(self) -> Sequence[CodeLocationOrigin]:
"""Reloads the CodeLocationOrigins for this workspace."""
@record(kw_only=False)
| WorkspaceLoadTarget |
python | scrapy__scrapy | tests/test_command_runspider.py | {
"start": 8541,
"end": 8994
} | class ____(scrapy.Spider):
name = 'myspider'
async def start(self):
return
yield
"""
args = ["-o", "example1.json", "-O", "example2.json"]
log = self.get_log(tmp_path, spider_code, args=args)
assert (
"error: Please use only one of -o/--output and -O/--overwrite-output" in log
)
def test_output_stdout(self, tmp_path: Path) -> None:
spider_code = """
import scrapy
| MySpider |
python | faif__python-patterns | patterns/creational/builder.py | {
"start": 2343,
"end": 3054
} | class ____(ComplexBuilding):
def build_floor(self) -> None:
self.floor = "One"
def build_size(self) -> None:
self.size = "Big and fancy"
def construct_building(cls) -> Building:
building = cls()
building.build_floor()
building.build_size()
return building
def main():
"""
>>> house = House()
>>> house
Floor: One | Size: Big
>>> flat = Flat()
>>> flat
Floor: More than One | Size: Small
# Using an external constructor function:
>>> complex_house = construct_building(ComplexHouse)
>>> complex_house
Floor: One | Size: Big and fancy
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| ComplexHouse |
python | sympy__sympy | sympy/tensor/functions.py | {
"start": 3749,
"end": 4166
} | class ____(Exception):
"""
Raised when ``shape()`` is called on non-array object.
This error can be imported from ``sympy.tensor.functions``.
Examples
========
>>> from sympy import shape
>>> from sympy.abc import x
>>> shape(x)
Traceback (most recent call last):
...
sympy.tensor.functions.NoShapeError: shape() called on non-array object: x
"""
pass
| NoShapeError |
python | tensorflow__tensorflow | tensorflow/python/ops/variables.py | {
"start": 54868,
"end": 76281
} | class ____:
"""A container for partitioned `Variable` objects.
@compatibility(eager) `tf.PartitionedVariable` is not compatible with
eager execution. Use `tf.Variable` instead which is compatible
with both eager execution and graph construction. See [the
TensorFlow Eager Execution
guide](https://www.tensorflow.org/guide/eager#variables_and_optimizers)
for details on how variables work in eager execution.
@end_compatibility
"""
def __init__(self, name, shape, dtype, variable_list, partitions):
"""Creates a new partitioned variable wrapper.
Variables passed via the variable_list must contain a save_slice_info
field. Concatenation and iteration is in lexicographic order according
to the var_offset property of the save_slice_info.
Args:
name: String. Overall name of the variables.
shape: List of integers. Overall shape of the variables.
dtype: Type of the variables.
variable_list: List of `Variable` that comprise this partitioned variable.
partitions: List of integers. Number of partitions for each dimension.
Raises:
TypeError: If `variable_list` is not a list of `Variable` objects, or
`partitions` is not a list.
ValueError: If `variable_list` is empty, or the `Variable` shape
information does not match `shape`, or `partitions` has invalid values.
"""
if not isinstance(variable_list, (list, tuple)):
raise TypeError("variable_list is not a list or tuple: %s" %
variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all(p >= 1 for p in partitions):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
# pylint: disable=protected-access
for v in variable_list:
# Sort the variable_list lexicographically according to var offset value.
if not all(v._get_save_slice_info() is not None for v in variable_list):
raise ValueError(
"All variables must have a save_slice_info available: %s" %
[v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s" %
(shape, partitions))
if v._get_save_slice_info().full_shape != shape:
raise ValueError("All variables' full shapes must match shape: %s; "
"but full shapes were: %s" %
(shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
"""Return an iterable for accessing the underlying partition Variables."""
return iter(self._variable_list)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes" %
num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all(p == 1 for p in self._partitions):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
"""Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
`Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
"""Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
`Tensor` containing the concatenated value.
"""
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self.get_shape()
@property
def _distribute_strategy(self):
"""The `tf.distribute.Strategy` that this variable was created under."""
# NOTE(yuefengz): Today, no partitioned variables in a distribute strategy.
return None
def get_shape(self) -> tensor_shape.TensorShape:
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def _apply_assign_fn(self, assign_fn, value):
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot do assign action along more than one dimension: %s. "
"Multi-axis partition assign action is not supported " %
str(partition_axes))
if isinstance(value, list):
assert len(value) == len(self._variable_list)
value_list = value
elif isinstance(value, PartitionedVariable):
value_list = list(value)
else:
partition_ix = partition_axes[0]
size_splits_list = [
tensor_shape.dimension_value(var.shape[partition_ix])
for var in self._variable_list
]
value_list = array_ops.split(value, size_splits_list, axis=partition_ix)
op_list = [
assign_fn(var, value_list[idx])
for idx, var in enumerate(self._variable_list)
]
return op_list
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign(
r_value, use_locking=use_locking, name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_add(
r_value, use_locking=use_locking, name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_sub(
r_value, use_locking=use_locking, name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
@tf_export(v1=["global_variables"])
def global_variables(scope=None):
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
`tf.compat.v1.local_variables`
@compatibility(TF2)
Not compatible with eager execution and `tf.function`. In particular, Graph
collections are deprecated in TF2. Instead please create a
[tf.Module](https://www.tensorflow.org/guide/intro_to_modules)
container for all your model state, including variables.
You can then list all the variables in your `tf.Module` through the
`variables` attribute.
@end_compatibility
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered to
include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a scope
is supplied. The choice of `re.match` means that a `scope` without special
tokens filters by prefix.
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)
@tf_export(v1=["all_variables"])
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
"""Use `tf.compat.v1.global_variables` instead."""
return global_variables()
def _all_saveable_objects(scope=None):
"""Returns all variables and `SaveableObject`s that must be checkpointed.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered to
include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a scope
is supplied. The choice of `re.match` means that a `scope` without special
tokens filters by prefix.
Returns:
A list of `Variable` and `SaveableObject` to be checkpointed
"""
# TODO(andreasst): make this function public once things are settled.
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))
@tf_export(v1=["local_variables"])
def local_variables(scope=None):
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `tf.contrib.framework.local_variable()` function automatically adds the
new variable to `GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
`tf.compat.v1.global_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered to
include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a scope
is supplied. The choice of `re.match` means that a `scope` without special
tokens filters by prefix.
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)
@tf_export(v1=["model_variables"])
def model_variables(scope=None):
"""Returns all variables in the MODEL_VARIABLES collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered to
include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a scope
is supplied. The choice of `re.match` means that a `scope` without special
tokens filters by prefix.
Returns:
A list of local Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)
@tf_export(v1=["trainable_variables"])
def trainable_variables(scope=None):
"""Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
@compatibility(TF2)
Not compatible with eager execution and `tf.function`. In particular, Graph
collections are deprecated in TF2. Instead please create a `tf.Module`
container for all your model state, including variables.
You can then list all the trainable variables in your `tf.Module` through the
`trainable_variables` attribute.
@end_compatibility
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered to
include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a scope
is supplied. The choice of `re.match` means that a `scope` without special
tokens filters by prefix.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)
@tf_export(v1=["moving_average_variables"])
def moving_average_variables(scope=None):
"""Returns all variables that maintain their moving averages.
If an `ExponentialMovingAverage` object is created and the `apply()`
method is called on a list of variables, these variables will
be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
This convenience function returns the contents of that collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered to
include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a scope
is supplied. The choice of `re.match` means that a `scope` without special
tokens filters by prefix.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)
@tf_export(v1=["initializers.variables", "variables_initializer"])
def variables_initializer(var_list, name="init"):
"""Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
@compatibility(TF2)
In TF2, variables are initialized immediately when they are created. There is
no longer a need to run variable initializers before using them.
@end_compatibility
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
"""
if var_list and not context.executing_eagerly():
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@tf_export(v1=["initialize_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
"""See `tf.compat.v1.variables_initializer`."""
return variables_initializer(var_list, name=name)
@tf_export(v1=["initializers.global_variables", "global_variables_initializer"])
def global_variables_initializer():
"""Returns an Op that initializes global variables.
This is just a shortcut for `variables_initializer(global_variables())`
@compatibility(TF2)
In TF2, variables are initialized immediately when they are created. There is
no longer a need to run variable initializers before using them.
@end_compatibility
Returns:
An Op that initializes global variables in the graph.
"""
if context.executing_eagerly():
return control_flow_ops.no_op(name="global_variables_initializer")
return variables_initializer(global_variables())
@tf_export(v1=["initialize_all_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
"""See `tf.compat.v1.global_variables_initializer`."""
return global_variables_initializer()
@tf_export(v1=["initializers.local_variables", "local_variables_initializer"])
def local_variables_initializer():
"""Returns an Op that initializes all local variables.
This is just a shortcut for `variables_initializer(local_variables())`
@compatibility(TF2)
In TF2, variables are initialized immediately when they are created. There is
no longer a need to run variable initializers before using them.
@end_compatibility
Returns:
An Op that initializes all local variables in the graph.
"""
if context.executing_eagerly():
return control_flow_ops.no_op(name="local_variables_initializer")
return variables_initializer(local_variables())
@tf_export(v1=["initialize_local_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
"""See `tf.compat.v1.local_variables_initializer`."""
return local_variables_initializer()
@tf_export(v1=["assert_variables_initialized"])
@tf_should_use.should_use_result
def assert_variables_initialized(var_list=None):
"""Returns an Op to check if variables are initialized.
NOTE: This function is obsolete and will be removed in 6 months. Please
change your implementation to use `report_uninitialized_variables()`.
When run, the returned Op will raise the exception `FailedPreconditionError`
if any of the variables has not yet been initialized.
Note: This function is implemented by trying to fetch the values of the
variables. If one of the variables is not initialized a message may be
logged by the C++ runtime. This is expected.
Args:
var_list: List of `Variable` objects to check. Defaults to the value of
`global_variables().`
Returns:
An Op, or None if there are no variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops_stack.stack(ranks)
@tf_export(v1=["report_uninitialized_variables"])
@tf_should_use.should_use_result
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the value of
`global_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the uninitialized variables, or an empty
1-D tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
# Run all operations on CPU
if var_list:
init_vars = [state_ops.is_variable_initialized(v) for v in var_list]
local_device = os.environ.get(
"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0")
with ops.device(local_device):
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(array_ops_stack.stack(init_vars))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of
# uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
tensor_conversion_registry.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction) # pylint: disable=protected-access
| PartitionedVariable |
python | huggingface__transformers | src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py | {
"start": 4224,
"end": 10183
} | class ____(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
self.config = config
self.mask_value = None
self.multi_query = config.multi_query
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.kv_heads = 1 if self.multi_query else self.num_heads
self.kv_dim = self.kv_heads * self.head_dim
self.num_key_value_groups = self.num_heads // self.kv_heads
self.split_size = self.embed_dim
self.is_causal = True
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.scaling = self.head_dim**-0.5 if config.scale_attn_weights else 1.0
self.is_cross_attention = is_cross_attention
self.layer_idx = layer_idx
self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
self.scale_attention_softmax_in_fp32 = (
config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32
)
self.attn_pdrop = config.attn_pdrop
if self.is_cross_attention:
if self.multi_query:
raise NotImplementedError("Multi-Query Attention not supported for cross_attention")
self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim)
self.q_attn = nn.Linear(self.embed_dim, self.embed_dim)
else:
self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim)
self.c_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.attn_dropout = config.attn_pdrop
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def forward(
self,
hidden_states: torch.Tensor,
layer_past: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[
tuple[torch.Tensor, Optional[torch.Tensor]],
tuple[torch.Tensor, Optional[torch.Tensor], tuple[torch.Tensor, ...]],
]:
input_shape = hidden_states.shape[:-1]
if layer_past is not None:
if isinstance(layer_past, EncoderDecoderCache):
is_updated = layer_past.is_updated.get(self.layer_idx)
if self.is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = layer_past.cross_attention_cache
else:
curr_past_key_values = layer_past.self_attention_cache
else:
curr_past_key_values = layer_past
if self.is_cross_attention:
if not hasattr(self, "q_attn") or not self.is_cross_attention:
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. "
"Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
)
if layer_past is not None and is_updated:
# reuse k,v, cross_attentions
key = curr_past_key_values.layers[self.layer_idx].keys
value = curr_past_key_values.layers[self.layer_idx].values
else:
query = self.q_attn(hidden_states).view(*input_shape, -1, self.head_dim).transpose(1, 2)
key, value = self.c_attn(encoder_hidden_states).split((self.head_dim, self.head_dim), dim=-1)
else:
if self.multi_query:
query, key, value = (
self.c_attn(hidden_states).unsqueeze(1).split((self.embed_dim, self.kv_dim, self.kv_dim), dim=3)
)
query = query.view(*input_shape, -1, self.head_dim).transpose(1, 2)
else:
query, key, value = (
self.c_attn(hidden_states)
.view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
.transpose(1, 2)
.split(3 * [self.head_dim], dim=3)
)
if layer_past is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not self.is_cross_attention else None
key, value = curr_past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position})
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if self.is_cross_attention:
layer_past.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask,
dropout=0.0 if not self.training else self.attn_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return attn_output, attn_weights
| GPTBigCodeAttention |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 68797,
"end": 71866
} | class ____:
"""
State that helps to provide a reentrant gdb.execute() function.
"""
def __init__(self):
f = tempfile.NamedTemporaryFile('r+')
self.file = f
self.filename = f.name
self.fd = f.fileno()
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
_execute("set logging on")
_execute("set pagination off")
self.file_position_stack.append(os.fstat(self.fd).st_size)
return self
def getoutput(self):
gdb.flush()
self.file.seek(self.file_position_stack[-1])
result = self.file.read()
return result
def __exit__(self, exc_type, exc_val, tb):
startpos = self.file_position_stack.pop()
self.file.seek(startpos)
self.file.truncate()
if not self.file_position_stack:
_execute("set logging off")
_execute("set logging redirect off")
_execute("set pagination on")
def execute(command, from_tty=False, to_string=False):
"""
Replace gdb.execute() with this function and have it accept a 'to_string'
argument (new in 7.2). Have it properly capture stderr also. Ensure
reentrancy.
"""
if to_string:
with _logging_state as state:
_execute(command, from_tty)
return state.getoutput()
else:
_execute(command, from_tty)
_execute = gdb.execute
gdb.execute = execute
_logging_state = _LoggingState()
def get_selected_inferior():
"""
Return the selected inferior in gdb.
"""
# Woooh, another bug in gdb! Is there an end in sight?
# http://sourceware.org/bugzilla/show_bug.cgi?id=12212
return gdb.inferiors()[0]
selected_thread = gdb.selected_thread()
for inferior in gdb.inferiors():
for thread in inferior.threads():
if thread == selected_thread:
return inferior
def source_gdb_script(script_contents, to_string=False):
"""
Source a gdb script with script_contents passed as a string. This is useful
to provide defines for py-step and py-next to make them repeatable (this is
not possible with gdb.execute()). See
http://sourceware.org/bugzilla/show_bug.cgi?id=12216
"""
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(script_contents)
f.close()
gdb.execute("source %s" % filename, to_string=to_string)
os.remove(filename)
def register_defines():
source_gdb_script(textwrap.dedent("""\
define py-step
-py-step
end
define py-next
-py-next
end
document py-step
%s
end
document py-next
%s
end
""") % (PyStep.__doc__, PyNext.__doc__))
def stackdepth(frame):
"Tells the stackdepth of a gdb frame."
depth = 0
while frame:
frame = frame.older()
depth += 1
return depth
| _LoggingState |
python | astropy__astropy | astropy/coordinates/builtin_frames/galactocentric.py | {
"start": 17997,
"end": 25250
} | class ____(BaseCoordinateFrame):
r"""
A coordinate or frame in the Galactocentric system.
This frame allows specifying the Sun-Galactic center distance, the height of
the Sun above the Galactic midplane, and the solar motion relative to the
Galactic center. However, as there is no modern standard definition of a
Galactocentric reference frame, it is important to pay attention to the
default values used in this class if precision is important in your code.
The default values of the parameters of this frame are taken from the
original definition of the frame in 2014. As such, the defaults are somewhat
out of date relative to recent measurements made possible by, e.g., Gaia.
The defaults can, however, be changed at runtime by setting the parameter
set name in `~astropy.coordinates.galactocentric_frame_defaults`.
The current default parameter set is ``"pre-v4.0"``, indicating that the
parameters were adopted before ``astropy`` version 4.0. A regularly-updated
parameter set can instead be used by setting
``galactocentric_frame_defaults.set ('latest')``, and other parameter set
names may be added in future versions. To find out the scientific papers
that the current default parameters are derived from, use
``galcen.frame_attribute_references`` (where ``galcen`` is an instance of
this frame), which will update even if the default parameter set is changed.
The position of the Sun is assumed to be on the x axis of the final,
right-handed system. That is, the x axis points from the position of
the Sun projected to the Galactic midplane to the Galactic center --
roughly towards :math:`(l,b) = (0^\circ,0^\circ)`. For the default
transformation (:math:`{\rm roll}=0^\circ`), the y axis points roughly
towards Galactic longitude :math:`l=90^\circ`, and the z axis points
roughly towards the North Galactic Pole (:math:`b=90^\circ`).
For a more detailed look at the math behind this transformation, see
the document :ref:`astropy:coordinates-galactocentric`.
The frame attributes are listed under **Other Parameters**.
"""
default_representation = r.CartesianRepresentation
default_differential = r.CartesianDifferential
# frame attributes
galcen_coord = CoordinateAttribute(
frame=ICRS, doc="The coordinates of the Galactic center"
)
galcen_distance = QuantityAttribute(
unit=u.kpc, doc="The distance from the Sun to the Galactic center"
)
galcen_v_sun = CartesianRepresentationAttribute(
unit=u.km / u.s,
doc="The velocity of the Sun in the Galactocentric frame",
)
z_sun = QuantityAttribute(
unit=u.pc, doc="The distance from the Sun to the Galactic midplane"
)
roll = QuantityAttribute(
unit=u.deg, doc="The rotation angle relative to the orientation for Galactic"
)
def __init__(self, *args, **kwargs):
# Set default frame attribute values based on the ScienceState instance
# for the solar parameters defined above
default_params = galactocentric_frame_defaults.get()
self.frame_attribute_references = (
galactocentric_frame_defaults.references.copy()
)
for k in default_params:
if k in kwargs:
# If a frame attribute is set by the user, remove its reference
self.frame_attribute_references.pop(k, None)
# Keep the frame attribute if it is set by the user, otherwise use
# the default value
kwargs[k] = kwargs.get(k, default_params[k])
super().__init__(*args, **kwargs)
@classmethod
def get_roll0(cls):
"""The additional roll angle (about the final x axis) necessary to align the
final z axis to match the Galactic yz-plane. Setting the ``roll``
frame attribute to -this method's return value removes this rotation,
allowing the use of the `~astropy.coordinates.Galactocentric` frame
in more general contexts.
"""
# note that the actual value is defined at the module level. We make at
# a property here because this module isn't actually part of the public
# API, so it's better for it to be accessible from Galactocentric
return _ROLL0
# ICRS to/from Galactocentric ----------------------->
def get_matrix_vectors(galactocentric_frame, inverse=False):
"""
Use the ``inverse`` argument to get the inverse transformation, matrix and
offsets to go from Galactocentric to ICRS.
"""
# shorthand
gcf = galactocentric_frame
# rotation matrix to align x(ICRS) with the vector to the Galactic center
mat1 = rotation_matrix(-gcf.galcen_coord.dec, "y")
mat2 = rotation_matrix(gcf.galcen_coord.ra, "z")
# extra roll away from the Galactic x-z plane
mat0 = rotation_matrix(gcf.get_roll0() - gcf.roll, "x")
# construct transformation matrix and use it
R = mat0 @ mat1 @ mat2
# Now need to translate by Sun-Galactic center distance around x' and
# rotate about y' to account for tilt due to Sun's height above the plane
translation = r.CartesianRepresentation(gcf.galcen_distance * [1.0, 0.0, 0.0])
z_d = gcf.z_sun / gcf.galcen_distance
H = rotation_matrix(-np.arcsin(z_d), "y")
# compute total matrices
A = H @ R
# Now we re-align the translation vector to account for the Sun's height
# above the midplane
offset = -translation.transform(H)
offset_v = gcf.galcen_v_sun
if inverse:
# the inverse of a rotation matrix is a transpose, which is much faster
# and more stable to compute
A = matrix_transpose(A)
offset = (-offset).transform(A)
offset_v = (-gcf.galcen_v_sun).transform(A)
offset = offset.with_differentials(r.CartesianDifferential.from_cartesian(offset_v))
return A, offset
def _check_coord_repr_diff_types(c):
if isinstance(c.data, r.UnitSphericalRepresentation):
raise ConvertError(
"Transforming to/from a Galactocentric frame requires a 3D coordinate, e.g."
" (angle, angle, distance) or (x, y, z)."
)
if "s" in c.data.differentials and isinstance(
c.data.differentials["s"],
(
r.UnitSphericalDifferential,
r.UnitSphericalCosLatDifferential,
r.RadialDifferential,
),
):
raise ConvertError(
"Transforming to/from a Galactocentric frame requires a 3D velocity, e.g.,"
" proper motion components and radial velocity."
)
@frame_transform_graph.transform(AffineTransform, ICRS, Galactocentric)
def icrs_to_galactocentric(icrs_coord, galactocentric_frame):
_check_coord_repr_diff_types(icrs_coord)
return get_matrix_vectors(galactocentric_frame)
@frame_transform_graph.transform(AffineTransform, Galactocentric, ICRS)
def galactocentric_to_icrs(galactocentric_coord, icrs_frame):
_check_coord_repr_diff_types(galactocentric_coord)
return get_matrix_vectors(galactocentric_coord, inverse=True)
# Create loopback transformation
frame_transform_graph._add_merged_transform(Galactocentric, ICRS, Galactocentric)
| Galactocentric |
python | ray-project__ray | rllib/models/torch/recurrent_net.py | {
"start": 980,
"end": 5223
} | class ____(TorchModelV2):
"""Helper class to simplify implementing RNN models with TorchModelV2.
Instead of implementing forward(), you can implement forward_rnn() which
takes batches with the time dimension added already.
Here is an example implementation for a subclass
``MyRNNClass(RecurrentNetwork, nn.Module)``::
def __init__(self, obs_space, num_outputs):
nn.Module.__init__(self)
super().__init__(obs_space, action_space, num_outputs,
model_config, name)
self.obs_size = _get_size(obs_space)
self.rnn_hidden_dim = model_config["lstm_cell_size"]
self.fc1 = nn.Linear(self.obs_size, self.rnn_hidden_dim)
self.rnn = nn.GRUCell(self.rnn_hidden_dim, self.rnn_hidden_dim)
self.fc2 = nn.Linear(self.rnn_hidden_dim, num_outputs)
self.value_branch = nn.Linear(self.rnn_hidden_dim, 1)
self._cur_value = None
@override(ModelV2)
def get_initial_state(self):
# Place hidden states on same device as model.
h = [self.fc1.weight.new(
1, self.rnn_hidden_dim).zero_().squeeze(0)]
return h
@override(ModelV2)
def value_function(self):
assert self._cur_value is not None, "must call forward() first"
return self._cur_value
@override(RecurrentNetwork)
def forward_rnn(self, input_dict, state, seq_lens):
x = nn.functional.relu(self.fc1(input_dict["obs_flat"].float()))
h_in = state[0].reshape(-1, self.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
self._cur_value = self.value_branch(h).squeeze(1)
return q, [h]
"""
@override(ModelV2)
def forward(
self,
input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType,
) -> Tuple[TensorType, List[TensorType]]:
"""Adds time dimension to batch before sending inputs to forward_rnn().
You should implement forward_rnn() in your subclass."""
# Creating a __init__ function that acts as a passthrough and adding the warning
# there led to errors probably due to the multiple inheritance. We encountered
# the same error if we add the Deprecated decorator. We therefore add the
# deprecation warning here.
if log_once("recurrent_network_tf"):
deprecation_warning(
old="ray.rllib.models.torch.recurrent_net.RecurrentNetwork"
)
flat_inputs = input_dict["obs_flat"].float()
# Note that max_seq_len != input_dict.max_seq_len != seq_lens.max()
# as input_dict may have extra zero-padding beyond seq_lens.max().
# Use add_time_dimension to handle this
self.time_major = self.model_config.get("_time_major", False)
inputs = add_time_dimension(
flat_inputs,
seq_lens=seq_lens,
framework="torch",
time_major=self.time_major,
)
output, new_state = self.forward_rnn(inputs, state, seq_lens)
output = torch.reshape(output, [-1, self.num_outputs])
return output, new_state
def forward_rnn(
self, inputs: TensorType, state: List[TensorType], seq_lens: TensorType
) -> Tuple[TensorType, List[TensorType]]:
"""Call the model with the given input tensors and state.
Args:
inputs: Observation tensor with shape [B, T, obs_size].
state: List of state tensors, each with shape [B, size].
seq_lens: 1D tensor holding input sequence lengths.
Note: len(seq_lens) == B.
Returns:
(outputs, new_state): The model output tensor of shape
[B, T, num_outputs] and the list of new state tensors each with
shape [B, size].
Examples:
def forward_rnn(self, inputs, state, seq_lens):
model_out, h, c = self.rnn_model([inputs, seq_lens] + state)
return model_out, [h, c]
"""
raise NotImplementedError("You must implement this for an RNN model")
@OldAPIStack
| RecurrentNetwork |
python | pytorch__pytorch | tools/linter/adapters/test_has_main_linter.py | {
"start": 1953,
"end": 3811
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def check_file(filename: str) -> list[LintMessage]:
lint_messages = []
with open(filename) as f:
file = f.read()
v = HasMainVisiter()
cst.parse_module(file).visit(v)
if not v.found:
message = (
"Test files need to have a main block which either calls run_tests "
+ "(to ensure that the tests are run during OSS CI) or raises an exception "
+ "and added to the blocklist in test/run_test.py"
)
lint_messages.append(
LintMessage(
path=filename,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="[no-main]",
original=None,
replacement=None,
description=message,
)
)
return lint_messages
def main() -> None:
parser = argparse.ArgumentParser(
description="test files should have main block linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
pool = mp.Pool(8)
lint_messages = pool.map(check_file, args.filenames)
pool.close()
pool.join()
flat_lint_messages = []
for sublist in lint_messages:
flat_lint_messages.extend(sublist)
for lint_message in flat_lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
if __name__ == "__main__":
main()
| LintMessage |
python | readthedocs__readthedocs.org | readthedocs/storage/s3_storage.py | {
"start": 2380,
"end": 2911
} | class ____:
bucket_name = getattr(settings, "S3_STATIC_STORAGE_BUCKET", None)
override_hostname = getattr(settings, "S3_STATIC_STORAGE_OVERRIDE_HOSTNAME", None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.bucket_name:
raise ImproperlyConfigured(
"AWS S3 not configured correctly. Ensure S3_STATIC_STORAGE_BUCKET is defined.",
)
self.querystring_auth = False
# pylint: disable=too-many-ancestors
| S3StaticStorageMixin |
python | kamyu104__LeetCode-Solutions | Python/minimum-sum-of-values-by-dividing-array.py | {
"start": 114,
"end": 2219
} | class ____(object):
def minimumValueSum(self, nums, andValues):
"""
:type nums: List[int]
:type andValues: List[int]
:rtype: int
"""
INF = float("inf")
L = max(nums).bit_length()
def update(cnt, x, d):
for i in xrange(L):
if x&(1<<i):
cnt[i] += d
def mask(cnt, l):
return reduce(lambda accu, i: accu|(1<<i), (i for i in xrange(L) if cnt[i] == l), 0)
dp = [INF]*(len(nums)+1)
dp[0] = 0
for j in xrange(len(andValues)):
new_dp = [INF]*(len(nums)+1)
cnt = [0]*L
l = [0]*len(dp)
dq = collections.deque()
left = idx = j
for right in xrange(j, len(nums)):
update(cnt, nums[right], +1)
if mask(cnt, right-left+1) <= andValues[j]:
while left <= right:
if mask(cnt, right-left+1) > andValues[j]:
break
update(cnt, nums[left], -1)
left += 1
left -= 1
update(cnt, nums[left], +1) # try to move to the last left s.t. mask(cnt, right-left+1) == andValues[j]
if (andValues[j]&nums[right]) == andValues[j]:
l[right + 1] = l[right]+1
if mask(cnt, right-left+1) != andValues[j]:
continue
# new_dp[right+1] = min(dp[left-l[left]], dp[left-l[left]+1], ..., dp[left])+nums[right]
while idx <= left:
while dq and dp[dq[-1]] >= dp[idx]:
dq.pop()
dq.append(idx)
idx += 1
while dq and dq[0] < left-l[left]:
dq.popleft()
if dq:
new_dp[right+1] = dp[dq[0]]+nums[right]
dp = new_dp
return dp[-1] if dp[-1] != INF else -1
# Time: O(n * m * (logn + logr)), r = max(nums)
# Space: O(n + logr)
# dp, sparse table
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/one_device_strategy.py | {
"start": 9887,
"end": 18367
} | class ____(distribute_lib.StrategyExtendedV1):
"""Implementation of OneDeviceStrategy."""
def __init__(self, container_strategy, device):
super(OneDeviceExtended, self).__init__(container_strategy)
self._device = device_util.resolve(device)
self._input_device = device_util.get_host_for_device(self._device)
def _input_workers_with_options(self, options=None):
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers([(self._input_device, (self._device,))])
else:
return input_lib.InputWorkers([(self._input_device,
(self._input_device,))])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _create_variable(self, next_creator, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
with ops.device(self._device):
return next_creator(**kwargs)
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
with ops.colocate_with(colocate_with):
return next_creator(**kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterator from dataset without splitting the batch."""
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
return input_lib_v1.DatasetIterator(dataset, self._input_workers,
self._container_strategy())
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._input_device), session)
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
def _experimental_distribute_dataset(self, dataset, options):
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function`."
)
return input_util.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
return input_util.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options),
[distribute_lib.InputContext()],
self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
# TODO(b/137795644): This should return a PerReplica value but other
# methods like run in OneDeviceStrategy need to be modified
# to do the same.
return value_fn(distribute_lib.ValueContext())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
# TODO(priyag): Use max_iterations instead of an explicit counter.
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = while_loop.while_loop(
cond,
body, [i] + initial_loop_values,
name="",
parallel_iterations=1,
back_prop=False,
swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
strategy = self._container_strategy()
with ops.device(self._device), _OneDeviceReplicaContext(strategy):
return fn(*args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations, options):
del reduce_op, destinations, options
return value
def _gather_to_implementation(self, value, destinations, axis, options):
del destinations, axis, options
return value
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._device), distribute_lib.UpdateContext(self._device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
return array_ops.identity(replica_local_var)
def _local_results(self, value):
return (value,)
def value_container(self, value):
return value
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return False
@property
def _num_replicas_in_sync(self):
return 1
@property
def worker_devices(self):
return (self._device,)
@property
def parameter_devices(self):
return (self._device,)
def non_slot_devices(self, var_list):
del var_list
return (self._device,)
@property
def experimental_should_init(self):
return True
@property
def experimental_between_graph(self):
return False
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""Global and per-replica batching are equivalent for OneDeviceStrategy."""
return True
@property
def _support_per_replica_values(self):
return False
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
| OneDeviceExtended |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_dot.py | {
"start": 3193,
"end": 5116
} | class ____(DotSharedTests):
@pytest.fixture
def obj(self):
return DataFrame(
np.random.default_rng(2).standard_normal((3, 4)),
index=["a", "b", "c"],
columns=["p", "q", "r", "s"],
)
@pytest.fixture
def other(self):
return DataFrame(
np.random.default_rng(2).standard_normal((4, 2)),
index=["p", "q", "r", "s"],
columns=["1", "2"],
)
@pytest.fixture
def expected(self, obj, other):
return DataFrame(
np.dot(obj.values, other.values), index=obj.index, columns=other.columns
)
@classmethod
def reduced_dim_assert(cls, result, expected):
"""
Assertion about results with 1 fewer dimension that self.obj
"""
tm.assert_series_equal(result, expected, check_names=False)
assert result.name is None
@pytest.mark.parametrize(
"dtype,exp_dtype",
[("Float32", "Float64"), ("Int16", "Int32"), ("float[pyarrow]", "double[pyarrow]")],
)
def test_arrow_dtype(dtype, exp_dtype):
pytest.importorskip("pyarrow")
cols = ["a", "b"]
df_a = DataFrame([[1, 2], [3, 4], [5, 6]], columns=cols, dtype="int32")
df_b = DataFrame([[1, 0], [0, 1]], index=cols, dtype=dtype)
result = df_a.dot(df_b)
expected = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=exp_dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype,exp_dtype",
[("Float32", "Float64"), ("Int16", "Int32"), ("float[pyarrow]", "double[pyarrow]")],
)
def test_arrow_dtype_series(dtype, exp_dtype):
pytest.importorskip("pyarrow")
cols = ["a", "b"]
series_a = Series([1, 2], index=cols, dtype="int32")
df_b = DataFrame([[1, 0], [0, 1]], index=cols, dtype=dtype)
result = series_a.dot(df_b)
expected = Series([1, 2], dtype=exp_dtype)
tm.assert_series_equal(result, expected)
| TestDataFrameDot |
python | tensorflow__tensorflow | tensorflow/compiler/tests/add_n_test.py | {
"start": 1029,
"end": 3321
} | class ____(xla_test.XLATestCase):
def testAddTensorLists(self):
with self.session(), self.test_scope():
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l1 = list_ops.tensor_list_set_item(l1, 0, 5.)
l2 = list_ops.tensor_list_set_item(l2, 2, 10.)
l = math_ops.add_n([l1, l2])
self.assertAllEqual(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
[5.0, 0.0, 10.0])
def testAddTensorListsFailsIfLeadingDimsMismatch(self):
with self.session(), self.test_scope():
l1 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=2)
l2 = list_ops.tensor_list_reserve(
element_shape=[], element_dtype=dtypes.float32, num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"TensorList arguments to AddN must all have the same shape"):
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32).eval()
def testAddTensorListsFailsIfElementShapesMismatch(self):
with self.session() as session, self.test_scope():
# Use placeholders instead of constant values for shapes to prevent TF's
# shape inference from catching this early.
l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)
l1 = list_ops.tensor_list_reserve(
element_shape=l1_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l2 = list_ops.tensor_list_reserve(
element_shape=l2_element_shape,
element_dtype=dtypes.float32,
num_elements=3)
l = math_ops.add_n([l1, l2])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"TensorList arguments to AddN must all have the same shape"):
session.run(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {
l1_element_shape: [],
l2_element_shape: [2]
})
if __name__ == "__main__":
test.main()
| XlaAddNTest |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/parser_test.py | {
"start": 1036,
"end": 11696
} | class ____(test.TestCase):
def assertAstMatches(self, actual_node, expected_node_src, expr=True):
if expr:
# Ensure multi-line expressions parse.
expected_node = gast.parse('({})'.format(expected_node_src)).body[0]
expected_node = expected_node.value
else:
expected_node = gast.parse(expected_node_src).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def test_parse_entity(self):
def f(x):
return x + 1
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_lambda(self):
l = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(l, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_parse_lambda_prefix_cleanup(self):
lambda_lam = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(lambda_lam, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_parse_lambda_resolution_by_location(self):
_ = lambda x: x + 1
l = lambda x: x + 1
_ = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(l, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x: x + 1')
def test_parse_lambda_resolution_by_signature(self):
l = lambda x: lambda x, y: x + y
node, source = parser.parse_entity(l, future_features=())
expected_node_src = 'lambda x: (lambda x, y: (x + y))'
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x: lambda x, y: x + y')
node, source = parser.parse_entity(l(0), future_features=())
expected_node_src = 'lambda x, y: (x + y)'
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x, y: x + y')
def test_parse_lambda_resolution_ambiguous(self):
l = lambda x: lambda x: 2 * x
expected_exception_text = re.compile(r'found multiple definitions'
r'.+'
r'\(?lambda x: \(?lambda x'
r'.+'
r'\(?lambda x: \(?2', re.DOTALL)
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError,
expected_exception_text):
parser.parse_entity(l, future_features=())
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError,
expected_exception_text):
parser.parse_entity(l(0), future_features=())
def assertMatchesWithPotentialGarbage(self, source, expected, garbage):
# In runtimes which don't track end_col_number, the source contains the
# entire line, which in turn may have garbage from the surrounding context.
self.assertIn(source, (expected, expected + garbage))
def test_parse_lambda_multiline(self):
l = (
lambda x: lambda y: x + y # pylint:disable=g-long-lambda
- 1)
node, source = parser.parse_entity(l, future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) - 1))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, ('lambda x: lambda y: x + y # pylint:disable=g-long-lambda\n'
' - 1'), ')')
node, source = parser.parse_entity(l(0), future_features=())
expected_node_src = 'lambda y: ((x + y) - 1)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, ('lambda y: x + y # pylint:disable=g-long-lambda\n'
' - 1'), ')')
def test_parse_lambda_in_expression(self):
l = (
lambda x: lambda y: x + y + 1,
lambda x: lambda y: x + y + 2,
)
node, source = parser.parse_entity(l[0], future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) + 1))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda x: lambda y: x + y + 1', ',')
node, source = parser.parse_entity(l[0](0), future_features=())
expected_node_src = 'lambda y: ((x + y) + 1)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda y: x + y + 1', ',')
node, source = parser.parse_entity(l[1], future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) + 2))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(source,
'lambda x: lambda y: x + y + 2', ',')
node, source = parser.parse_entity(l[1](0), future_features=())
expected_node_src = 'lambda y: ((x + y) + 2)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(source, 'lambda y: x + y + 2', ',')
def test_parse_lambda_complex_body(self):
l = lambda x: ( # pylint:disable=g-long-lambda
x.y(
[],
x.z,
(),
x[0:2],
),
x.u,
'abc',
1,
)
node, source = parser.parse_entity(l, future_features=())
expected_node_src = "lambda x: (x.y([], x.z, (), x[0:2]), x.u, 'abc', 1)"
self.assertAstMatches(node, expected_node_src)
base_source = ('lambda x: ( # pylint:disable=g-long-lambda\n'
' x.y(\n'
' [],\n'
' x.z,\n'
' (),\n'
' x[0:2],\n'
' ),\n'
' x.u,\n'
' \'abc\',\n'
' 1,')
# The complete source includes the trailing parenthesis. But that is only
# detected in runtimes which correctly track end_lineno for ASTs.
self.assertMatchesWithPotentialGarbage(source, base_source, '\n )')
def test_parse_lambda_function_call_definition(self):
def do_parse_and_test(lam, **unused_kwargs):
node, source = parser.parse_entity(lam, future_features=())
expected_node_src = 'lambda x: x'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda x: x', ', named_arg=1)')
do_parse_and_test( # Intentional line break
lambda x: x, named_arg=1)
def test_parse_entity_print_function(self):
def f(x):
print(x)
node, _ = parser.parse_entity(f, future_features=('print_function',))
self.assertEqual('f', node.name)
def test_parse_comments(self):
def f():
# unindented comment
pass
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_multiline_strings(self):
def f():
print("""
multiline
string""")
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def _eval_code(self, code, name):
globs = {}
exec(code, globs) # pylint:disable=exec-used
return globs[name]
def test_dedent_block_basic(self):
code = """
def f(x):
if x > 0:
return -x
return x
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(1), -1)
self.assertEqual(f(-1), -1)
def test_dedent_block_comments_out_of_line(self):
code = """
###
def f(x):
###
if x > 0:
###
return -x
###
###
return x
###
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(1), -1)
self.assertEqual(f(-1), -1)
def test_dedent_block_multiline_string(self):
code = """
def f():
'''
Docstring.
'''
return '''
1
2
3'''
"""
f = self._eval_code(parser.dedent_block(code), 'f')
if sys.version_info >= (3, 13):
self.assertEqual(f.__doc__.strip(), 'Docstring.')
else:
self.assertEqual(f.__doc__, '\n Docstring.\n ')
self.assertEqual(f(), '\n 1\n 2\n 3')
def test_dedent_block_multiline_expression(self):
code = """
def f():
return (1,
2,
3)
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), (1, 2, 3))
def test_dedent_block_continuation(self):
code = r"""
def f():
a = \
1
return a
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), 1)
def test_dedent_block_continuation_in_string(self):
code = r"""
def f():
a = "a \
b"
return a
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), 'a b')
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
def test_unparse(self):
node = gast.If(
test=gast.Constant(1, kind=None),
body=[
gast.Assign(
targets=[
gast.Name(
'a',
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Name(
'b', ctx=gast.Load(), annotation=None, type_comment=None))
],
orelse=[
gast.Assign(
targets=[
gast.Name(
'a',
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Constant('c', kind=None))
])
source = parser.unparse(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
# coding=utf-8
if 1:
a = b
else:
a = 'c'
""").strip(), source.strip())
def test_ext_slice_roundtrip(self):
def ext_slice(n):
return n[:, :], n[0, :], n[:, 0]
node, _ = parser.parse_entity(ext_slice, future_features=())
source = parser.unparse(node)
self.assertAstMatches(node, source, expr=False)
if __name__ == '__main__':
test.main()
| ParserTest |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 41324,
"end": 55688
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def testCustomGradientTrivial(self):
@custom_gradient.custom_gradient
def MyIdentity(x):
def Grad(dy):
return [3 * dy]
return x, Grad
with ops.Graph().as_default():
x = constant(3.)
y = MyIdentity(MyIdentity(x))
dy = gradients.gradients(y, x)[0]
with session.Session():
self.assertEqual(9., self.evaluate(dy))
def testCustomGradient(self):
@custom_gradient.custom_gradient
def MyMultiply(x1, x2):
result = x1 * x2
def Grad(dy):
# Switched the ordering here.
return [dy * x1, dy * x2]
return result, Grad
with ops.Graph().as_default():
x1 = constant(3.)
x2 = constant(5.)
y = MyMultiply(x1, x2)
dy = gradients.gradients(y, [x1, x2])
self.assertAllEqual([3., 5.], self.evaluate(dy))
def testCustomGradientClass(self):
class Model:
@custom_gradient.custom_gradient
def Multiply(self, x1, x2):
result = x1 * x2
grad = lambda dy: (dy * x1, dy * x2)
return result, grad
with ops.Graph().as_default():
x1 = constant(3.)
x2 = constant(5.)
m = Model()
y = m.Multiply(x1, x2)
dy = gradients.gradients(y, [x1, x2])
self.assertAllEqual([3., 5.], self.evaluate(dy))
def testCustomGradientErrors(self):
@custom_gradient.custom_gradient
def F(x):
def Grad(_):
raise RuntimeError("x")
return x, Grad
with ops.Graph().as_default():
x = constant(1.0)
y = F(x)
with self.assertRaises(RuntimeError):
gradients.gradients(y, x)
def testCustomGradientWithVariables(self):
@custom_gradient.custom_gradient
def F(x):
out = core_layers.dense(x, 3, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
self.assertEqual(1, len(variables)) # pylint: disable=g-generic-assert
grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
return grads[0], [array_ops.ones((4, 3))]
return out, Grad
with ops.Graph().as_default():
x = array_ops.ones((2, 4))
with variable_scope.variable_scope("f", use_resource=True) as vs:
y = F(x)
all_vars = vs.global_variables()
assert len(all_vars) == 1
grads = gradients.gradients(y, [x, all_vars[0]])
for g in grads:
self.assertIsNotNone(g)
self.evaluate(variables.global_variables_initializer())
dw = self.evaluate(math_ops.reduce_sum(grads[1]))
self.assertEqual(12., dw)
@parameterized.named_parameters([
dict(
testcase_name="Eager",
decorator=lambda f: f),
dict(
testcase_name="Function",
decorator=def_function.function),
])
def testCustomGradientRaggedTensor(self, decorator):
with context.eager_mode():
@custom_gradient.custom_gradient
def F(x):
out = x * x
def Grad(*grad):
return 3 * grad[0]
return out, Grad
rt = ragged_factory_ops.constant([[1., 2.], [3.]])
with backprop.GradientTape() as tape:
tape.watch(rt.values)
out = decorator(F)(rt)
result = tape.gradient(out, rt)
self.assertIsInstance(out, ragged_tensor.RaggedTensor)
self.assertAllEqual(out, [[1., 4.], [9.]])
self.assertIsInstance(result, ragged_tensor.RaggedTensor)
self.assertAllEqual(result, [[3., 3.], [3.]])
@parameterized.named_parameters([
dict(
testcase_name="Eager",
decorator=lambda f: f),
dict(
testcase_name="Function",
decorator=def_function.function),
])
def testCustomGradientMultipleRaggedTensors(self, decorator):
with context.eager_mode():
@custom_gradient.custom_gradient
def F(x, y):
out = (x * x, 2 * y)
def Grad(*grad):
return (3 * grad[0], 4 * grad[1])
return out, Grad
rt1 = ragged_factory_ops.constant([[1., 2.], [3.]])
rt2 = ragged_factory_ops.constant([[4.], [5., 6.]])
with backprop.GradientTape() as tape:
tape.watch((rt1, rt2))
out1, out2 = decorator(F)(rt1, rt2)
grad1, grad2 = tape.gradient((out1, out2), (rt1, rt2))
self.assertIsInstance(out1, ragged_tensor.RaggedTensor)
self.assertAllEqual(out1, [[1., 4.], [9.]])
self.assertIsInstance(out2, ragged_tensor.RaggedTensor)
self.assertAllEqual(out2, [[8.], [10., 12.]])
self.assertIsInstance(grad1, ragged_tensor.RaggedTensor)
self.assertAllEqual(grad1, [[3., 3.], [3.]])
self.assertIsInstance(grad2, ragged_tensor.RaggedTensor)
self.assertAllEqual(grad2, [[4.], [4., 4.]])
@test_util.enable_quantized_dtypes_training
def testCustomGradientQuantizedDtypeTraining(self):
# TODO(b/309175067): Remove below skipTest() when fixed.
if sys.platform == "darwin":
self.skipTest("This test fails in TF MacOS nightly and continuous builds")
with context.eager_mode():
@custom_gradient.custom_gradient
def F(x):
out = x
def Grad(*grad):
return grad
return out, Grad
x = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.qint8)
with backprop.GradientTape() as tape:
tape.watch(x)
out = F(x)
result = tape.gradient(out, x)
self.assertAllEqual(out, [[1, 2], [3, 4]])
self.assertAllEqual(result, [[1, 1], [1, 1]])
self.assertEqual(result.dtype, dtypes.qint8)
def testCustomGradientWithCapture(self):
with ops.Graph().as_default():
x = constant(3.)
@framework_function.Defun(dtypes.float32)
def F(y):
@custom_gradient.custom_gradient
def MyMultiply(x1, x2):
result = x1 * x2
def Grad(dy):
# Switched the ordering here.
return [dy * x1, dy * x2]
return result, Grad
res = MyMultiply(x, y)
return gradients.gradients(res, [y])
y = constant(5.)
dy = F(y)
self.assertAllEqual(5., self.evaluate(dy))
def testCustomGradientWithVariablesNoFalsePositives(self):
@custom_gradient.custom_gradient
def F(x):
out = core_layers.dense(x, 3, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
self.assertEqual(1, len(variables)) # pylint: disable=g-generic-assert
grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
return grads[0], [array_ops.ones((3, 3))]
return out, Grad
with ops.Graph().as_default():
with variable_scope.variable_scope("f", use_resource=True) as vs:
a = array_ops.ones((2, 4))
# Variables in these layers shouldn't be picked up by the decorator.
b = core_layers.dense(a, 3, use_bias=False)
c = core_layers.dense(b, 3, use_bias=False)
x = core_layers.dense(b, 3, use_bias=False) + c
# Only the variables used in F.
y = F(x)
all_vars = vs.global_variables()
assert len(all_vars) == 4
grads = gradients.gradients(y, [x] + all_vars)
_, var_grads = grads[0], grads[1:]
for g in grads:
self.assertIsNotNone(g)
self.evaluate(variables.global_variables_initializer())
dw = self.evaluate(math_ops.reduce_sum(var_grads[-1]))
self.assertEqual(9., dw)
def testCustomGradientWithVariablesEager(self):
with context.eager_mode():
layer = core_layers.Dense(4, use_bias=False)
@custom_gradient.custom_gradient
def F(x):
out = layer(x)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
del out_grad
self.assertEqual(1, len(variables)) # pylint: disable=g-generic-assert
return (array_ops.ones((3, 2)),
[array_ops.ones((2, 4))])
return out, Grad
x = array_ops.ones((3, 2)) + 2.
with backprop.GradientTape() as tape:
tape.watch(x)
y = F(x)
w, = layer.variables
dx, dw = tape.gradient(y, [x, w])
self.assertEqual(6., math_ops.reduce_sum(dx).numpy())
self.assertEqual(8., math_ops.reduce_sum(dw).numpy())
@test_util.run_v1_only("b/120545219")
def testCustomGradientErrorsWithNonResourceVariables(self):
def F(x, use_resource=False):
with variable_scope.variable_scope("f", use_resource=use_resource):
out = core_layers.dense(x, 4, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
del out_grad
self.assertEqual(1, len(variables)) # pylint: disable=g-generic-assert
return (array_ops.ones((3, 2)), [array_ops.ones((2, 4))])
return out, Grad
@custom_gradient.custom_gradient
def FResource(x):
return F(x, use_resource=True)
@custom_gradient.custom_gradient
def FNonResource(x):
return F(x, use_resource=False)
x = array_ops.ones((3, 2)) + 2.
# Wrapping scope has use_resource=True but inner scope sets to False. Fails.
with variable_scope.variable_scope("vs1", use_resource=True):
with self.assertRaisesWithPredicateMatch(TypeError,
"must be `ResourceVariable`s"):
FNonResource(x)
# Wrapping scope has use_resource=False but inner scope sets to True.
# Passes.
with variable_scope.variable_scope("vs2", use_resource=False):
FResource(x)
@parameterized.parameters(True, False)
def testCustomGradientVariablesKwonlyArgs(self, anonymous_varargs):
with context.eager_mode():
x_captured = variables.Variable(3.) # Used by FuncMult
@custom_gradient.custom_gradient
def FuncMult(x):
def ActualGrad(dy, variables): # pylint: disable=redefined-outer-name
self.assertLen(variables, 1)
self.assertIs(variables[0], x_captured)
x_captured_grad = 5. * x * dy
return (4. * x_captured * dy, [x_captured_grad])
# Define the returned GradMult, using varargs; "variables" is kwonlyarg
if anonymous_varargs:
def GradMult(dy, *, variables=None): # pylint: disable=redefined-outer-name
return ActualGrad(dy, variables)
else:
def GradMult(*dys, variables=None): # pylint: disable=redefined-outer-name
return ActualGrad(dys[0], variables)
return x * x_captured, GradMult
x = variables.Variable(6.)
with backprop.GradientTape(persistent=True) as g:
y = FuncMult(x)
self.assertAllEqual(g.gradient(y, x), 4. * 3.)
def testWithNumpyInputs(self):
with context.eager_mode():
@custom_gradient.custom_gradient
def F(x):
out = x
def Grad(_):
return (None, None)
return out, Grad
x = np.ones((3, 2), dtype=np.float32)
# Smoke test to ensure numpy inputs are accepted
F(x)
@test_util.run_v1_only("b/120545219")
def testRVGradientsDynamicCond(self):
with self.cached_session():
alpha = resource_variable_ops.ResourceVariable(
np.random.random((1,)),
dtype="float32")
conditional = array_ops.placeholder_with_default(True, shape=())
output = cond.cond(
conditional, lambda: alpha * 2, lambda: alpha * 3)
g, = gradients_impl.gradients(output, alpha)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(g, [2.0])
self.assertAllEqual(g.eval(feed_dict={conditional: False}), [3.0])
def testRecursiveCustomGradient(self):
@custom_gradient.custom_gradient
def F(x):
out = core_layers.dense(x, 3, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
self.assertEqual(1, len(variables)) # pylint: disable=g-generic-assert
grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
return grads[0], [array_ops.ones((4, 3))]
return out, Grad
@custom_gradient.custom_gradient
def DoubleF(x):
out = F(x)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
self.assertEqual(1, len(variables)) # pylint: disable=g-generic-assert
grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
return grads[0], [array_ops.ones((4, 3))]
return out, Grad
with ops.Graph().as_default():
x = array_ops.ones((2, 4))
with variable_scope.variable_scope("f", use_resource=True) as vs:
y = DoubleF(x)
all_vars = vs.global_variables()
assert len(all_vars) == 1
grads = gradients.gradients(y, [x, all_vars[0]])
for g in grads:
self.assertIsNotNone(g)
self.evaluate(variables.global_variables_initializer())
dw = self.evaluate(math_ops.reduce_sum(grads[1]))
self.assertEqual(12., dw)
@parameterized.named_parameters(
[(("_%s_%s" % (x_struct, y_struct)).replace(" ", "").replace("None", ""), # pylint: disable=g-complex-comprehension
x_struct, y_struct)
for y_struct in [[None, ()], (None, (), [], (None, ((), None)))]
for x_struct in [(None, ()), (((), ()), [None, None], [], (None, ()))]
])
@test_util.run_in_graph_and_eager_modes
def testCustomGradientStructuralInputOutput(self, x_struct, y_struct):
"""Tests that custom_gradient can handle structured inputs/outputs."""
def Zeros(x):
return nest.map_structure(lambda _: array_ops.zeros([], "float32"), x)
def GetStruct(x):
return nest.map_structure(lambda _: None, x)
def MakeVjp(f, *x):
with backprop.GradientTape(persistent=True) as tape:
tape.watch(nest.flatten(x))
y = f(*x)
def Vjp(dy):
return tape.gradient(y, x, output_gradients=dy)
return y, Vjp
@custom_gradient.custom_gradient
def F(*x):
self.assertEqual(x_struct, GetStruct(x))
def Vjp(*dy):
self.assertEqual(len(nest.flatten(y_struct)),
len(nest.flatten(dy)))
return nest.flatten(Zeros(x_struct))
return Zeros(y_struct), Vjp
x, dy = Zeros([x_struct, y_struct])
y, vjp = MakeVjp(F, *x)
dx = vjp(dy)
self.assertEqual(x_struct, GetStruct(dx))
self.assertEqual(y_struct, GetStruct(y))
| CustomGradientTest |
python | rapidsai__cudf | python/cudf/cudf/pandas/fast_slow_proxy.py | {
"start": 2273,
"end": 2400
} | class ____(IntEnum):
"""Simple enum to track the type of wrapped object of a final proxy"""
SLOW = 0
FAST = 1
| _State |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 36932,
"end": 55589
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testInvolutionLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testLeftRightWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [1, 2, 3]], [[1, 2, 3], [1, 2, 3]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[3, 2, 1], [3, 2, 1]], [[3, 2, 1], [3, 2, 1]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRightStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_left_right"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down),
)
def testRandomFlipStateless(self, func):
with test_util.use_gpu():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
if y_tf_eval[0][0] == 1:
self.assertAllEqual(y_tf_eval, x_np)
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval, y_np)
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
# Verify that results are deterministic.
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
# TODO(b/162345082): stateless random op generates different random number
# with xla_gpu. Update tests such that there is a single ground truth result
# to test against.
@parameterized.named_parameters(
("_RandomFlipLeftRight", image_ops.stateless_random_flip_left_right),
("_RandomFlipUpDown", image_ops.stateless_random_flip_up_down)
)
def testRandomFlipStatelessWithBatch(self, func):
with test_util.use_gpu():
batch_size = 16
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [6, 5, 4]], dtype=np.uint8).reshape([1, 2, 3, 1])
if "RandomFlipUpDown" in self.id():
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
x_tf = constant_op.constant(x_np, shape=x_np.shape)
iterations = 2
flip_counts = [None for _ in range(iterations)]
flip_sequences = ["" for _ in range(iterations)]
test_seed = (1, 2)
split_seeds = stateless_random_ops.split(test_seed, 10)
seeds_list = self.evaluate(split_seeds)
for i in range(iterations):
count_flipped = 0
count_unflipped = 0
flip_seq = ""
for seed in seeds_list:
y_tf = func(x_tf, seed=seed)
y_tf_eval = self.evaluate(y_tf)
for j in range(batch_size):
if y_tf_eval[j][0][0] == 1:
self.assertAllEqual(y_tf_eval[j], x_np[j])
count_unflipped += 1
flip_seq += "U"
else:
self.assertAllEqual(y_tf_eval[j], y_np[j])
count_flipped += 1
flip_seq += "F"
flip_counts[i] = (count_flipped, count_unflipped)
flip_sequences[i] = flip_seq
for i in range(1, iterations):
self.assertAllEqual(flip_counts[0], flip_counts[i])
self.assertAllEqual(flip_sequences[0], flip_sequences[i])
def testRandomFlipLeftRightWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[3, 2, 1], [3, 2, 1]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_left_right(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testUpDownWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[4, 5, 6], [1, 2, 3]], [[10, 11, 12], [7, 8, 9]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDownStateful(self):
# Test random flip with single seed (stateful).
with ops.Graph().as_default():
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
seed = 42
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf, seed=seed)
self.assertTrue(y.op.name.startswith("random_flip_up_down"))
count_flipped = 0
count_unflipped = 0
for _ in range(100):
y_tf = self.evaluate(y)
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
# 100 trials
# Mean: 50
# Std Dev: ~5
# Six Sigma: 50 - (5 * 6) = 20
self.assertGreaterEqual(count_flipped, 20)
self.assertGreaterEqual(count_unflipped, 20)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertEqual(count_flipped, 45)
self.assertEqual(count_unflipped, 55)
def testRandomFlipUpDownWithBatch(self):
batch_size = 16
seed = 42
# create single item of test data
x_np_raw = np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.uint8
).reshape([1, 2, 3, 1])
y_np_raw = np.array(
[[4, 5, 6], [1, 2, 3]], dtype=np.uint8
).reshape([1, 2, 3, 1])
# create batched test data
x_np = np.vstack([x_np_raw for _ in range(batch_size)])
y_np = np.vstack([y_np_raw for _ in range(batch_size)])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
count_flipped = 0
count_unflipped = 0
for seed in range(100):
y_tf = self.evaluate(image_ops.random_flip_up_down(x_tf, seed=seed))
# check every element of the batch
for i in range(batch_size):
if y_tf[i][0][0] == 1:
self.assertAllEqual(y_tf[i], x_np[i])
count_unflipped += 1
else:
self.assertAllEqual(y_tf[i], y_np[i])
count_flipped += 1
self.assertEqual(count_flipped, 772)
self.assertEqual(count_unflipped, 828)
def testInvolutionTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testInvolutionTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(image_ops.transpose(x_tf))
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testTransposeWithBatch(self):
x_np = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.uint8).reshape([2, 2, 3, 1])
y_np = np.array(
[[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]],
dtype=np.uint8).reshape([2, 3, 2, 1])
with self.cached_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose(x_tf)
y_tf = self.evaluate(y)
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims_3 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_dims_4 = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_unknown_batch = array_ops.placeholder(
dtypes.uint8, shape=[None, 64, 64, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
#Ops that support 3D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertIsNone(transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims_3 = op(p_unknown_dims_3)
self.assertEqual(3, transformed_unknown_dims_3.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegex(ValueError, "must be > 0"):
op(p_zero_dim)
#Ops that support 4D input
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose, image_ops.rot90
]:
transformed_unknown_dims_4 = op(p_unknown_dims_4)
self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
transformed_unknown_batch = op(p_unknown_batch)
self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
with self.assertRaisesRegex(ValueError,
"must be at least three-dimensional"):
op(p_wrong_rank)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90GroupOrderWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
rotated = image
for _ in range(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, self.evaluate(rotated))
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testRot90NumpyEquivalenceWithBatch(self):
image = np.arange(48, dtype=np.uint8).reshape([2, 2, 4, 3])
with self.cached_session():
for k in range(4):
y_np = np.rot90(image, k=k, axes=(1, 2))
self.assertAllEqual(
y_np, self.evaluate(image_ops.rot90(image, k)))
def testFlipImageUnknownShape(self):
expected_output = constant_op.constant([[[[3, 4, 5], [0, 1, 2]],
[[9, 10, 11], [6, 7, 8]]]])
def generator():
image_input = np.array(
[[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], np.int32)
yield image_input
dataset = dataset_ops.Dataset.from_generator(
generator,
output_types=dtypes.int32,
output_shapes=tensor_shape.TensorShape([1, 2, 2, 3]))
dataset = dataset.map(image_ops.flip_left_right)
image_flipped_via_dataset_map = get_single_element.get_single_element(
dataset.take(1))
self.assertAllEqual(image_flipped_via_dataset_map, expected_output)
| FlipTransposeRotateTest |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/no_unused_fragments.py | {
"start": 69,
"end": 1489
} | class ____(ValidationRule):
__slots__ = 'fragment_definitions', 'operation_definitions', 'fragment_adjacencies', 'spread_names'
def __init__(self, context):
super(NoUnusedFragments, self).__init__(context)
self.operation_definitions = []
self.fragment_definitions = []
def enter_OperationDefinition(self, node, key, parent, path, ancestors):
self.operation_definitions.append(node)
return False
def enter_FragmentDefinition(self, node, key, parent, path, ancestors):
self.fragment_definitions.append(node)
return False
def leave_Document(self, node, key, parent, path, ancestors):
fragment_names_used = set()
for operation in self.operation_definitions:
fragments = self.context.get_recursively_referenced_fragments(operation)
for fragment in fragments:
fragment_names_used.add(fragment.name.value)
for fragment_definition in self.fragment_definitions:
if fragment_definition.name.value not in fragment_names_used:
self.context.report_error(GraphQLError(
self.unused_fragment_message(fragment_definition.name.value),
[fragment_definition]
))
@staticmethod
def unused_fragment_message(fragment_name):
return 'Fragment "{}" is never used.'.format(fragment_name)
| NoUnusedFragments |
python | h5py__h5py | h5py/tests/test_vds/test_highlevel_vds.py | {
"start": 13337,
"end": 15398
} | class ____(ut.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.f1 = osp.join(self.tmpdir, 'testfile1.h5')
self.f2 = osp.join(self.tmpdir, 'testfile2.h5')
self.data1 = np.arange(10)
self.data2 = np.arange(10) * -1
with h5.File(self.f1, 'w') as f:
# dataset
ds = f.create_dataset('data', (10,), 'f4')
ds[:] = self.data1
with h5.File(self.f2, 'w') as f:
# dataset
ds = f.create_dataset('data', (10,), 'f4')
ds[:] = self.data2
self.make_vds(f)
def make_vds(self, f):
# virtual dataset
layout = h5.VirtualLayout((2, 10), 'f4')
vsource1 = h5.VirtualSource(self.f1, 'data', shape=(10,))
vsource2 = h5.VirtualSource(self.f2, 'data', shape=(10,))
layout[0] = vsource1
layout[1] = vsource2
f.create_virtual_dataset('virtual', layout)
@pytest.mark.thread_unsafe(reason="Renames shared files")
def test_relative_vds(self):
with h5.File(self.f2) as f:
data = f['virtual'][:]
np.testing.assert_array_equal(data[0], self.data1)
np.testing.assert_array_equal(data[1], self.data2)
# move f2 -> f3
f3 = osp.join(self.tmpdir, 'testfile3.h5')
os.rename(self.f2, f3)
with h5.File(f3) as f:
data = f['virtual'][:]
assert data.dtype == 'f4'
np.testing.assert_array_equal(data[0], self.data1)
np.testing.assert_array_equal(data[1], self.data2)
# moving other file
f4 = osp.join(self.tmpdir, 'testfile4.h5')
os.rename(self.f1, f4)
with h5.File(f3) as f:
data = f['virtual'][:]
assert data.dtype == 'f4'
# unavailable data is silently converted to default value
np.testing.assert_array_equal(data[0], 0)
np.testing.assert_array_equal(data[1], self.data2)
def tearDown(self):
shutil.rmtree(self.tmpdir)
| RelativeLinkTestCase |
python | huggingface__transformers | src/transformers/models/albert/modeling_albert.py | {
"start": 16772,
"end": 20747
} | class ____(AlbertPreTrainedModel):
_tied_weights_keys = {
"predictions.decoder.weight": "albert.embeddings.word_embeddings.weight",
"predictions.decoder.bias": "predictions.bias",
}
def __init__(self, config: AlbertConfig):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.sop_classifier = AlbertSOPHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self) -> nn.Linear:
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
self.predictions.decoder = new_embeddings
def get_input_embeddings(self) -> nn.Embedding:
return self.albert.embeddings.word_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
sentence_order_label: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[AlbertForPreTrainingOutput, tuple]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then
sequence B), `1` indicates switched order (sequence B, then sequence A).
Example:
```python
>>> from transformers import AutoTokenizer, AlbertForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
>>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
>>> # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
```"""
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(sequence_output)
sop_scores = self.sop_classifier(pooled_output)
total_loss = None
if labels is not None and sentence_order_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
total_loss = masked_lm_loss + sentence_order_loss
return AlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| AlbertForPreTraining |
python | faif__python-patterns | patterns/behavioral/chain_of_responsibility.py | {
"start": 760,
"end": 1381
} | class ____(ABC):
def __init__(self, successor: Optional["Handler"] = None):
self.successor = successor
def handle(self, request: int) -> None:
"""
Handle request and stop.
If can't - call next handler in chain.
As an alternative you might even in case of success
call the next handler.
"""
res = self.check_range(request)
if not res and self.successor:
self.successor.handle(request)
@abstractmethod
def check_range(self, request: int) -> Optional[bool]:
"""Compare passed value to predefined interval"""
| Handler |
python | getsentry__sentry | tests/sentry/integrations/msteams/test_integration.py | {
"start": 4740,
"end": 7291
} | class ____(TestCase):
def setUp(self) -> None:
self.integration = self.create_provider_integration(
provider="msteams",
name="MS Teams",
external_id=team_id,
metadata={
"access_token": "test-access-token",
"service_url": "https://smba.trafficmanager.net/amer/",
"installation_type": "team",
"tenant_id": tenant_id,
},
)
self.installation = MsTeamsIntegration(self.integration, self.organization.id)
self.target = IntegrationNotificationTarget(
provider_key=NotificationProviderKey.MSTEAMS,
resource_type=NotificationTargetResourceType.CHANNEL,
resource_id="conversation123",
integration_id=self.integration.id,
organization_id=self.organization.id,
)
@patch("sentry.integrations.msteams.client.MsTeamsClient.send_card")
def test_send_notification_success(self, mock_send_card: MagicMock) -> None:
from sentry.integrations.msteams.card_builder.block import AdaptiveCard
payload: AdaptiveCard = {
"type": "AdaptiveCard",
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"version": "1.2",
"body": [],
}
self.installation.send_notification(target=self.target, payload=payload)
mock_send_card.assert_called_once_with(conversation_id="conversation123", card=payload)
@patch("sentry.integrations.msteams.client.MsTeamsClient.send_card")
def test_send_notification_api_error(self, mock_send_card: MagicMock) -> None:
from sentry.integrations.msteams.card_builder.block import AdaptiveCard
error_payload = json.dumps(
{
"error": {
"code": "ConversationBlockedByUser",
"message": "User blocked the conversation with the bot.",
},
}
)
mock_send_card.side_effect = ApiError(
text=error_payload,
code=400,
)
payload: AdaptiveCard = {
"type": "AdaptiveCard",
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"version": "1.2",
"body": [],
}
with pytest.raises(IntegrationConfigurationError) as e:
self.installation.send_notification(target=self.target, payload=payload)
assert str(e.value) == error_payload
| MsTeamsIntegrationSendNotificationTest |
python | charliermarsh__ruff | crates/ruff_benchmark/resources/numpy/ctypeslib.py | {
"start": 5452,
"end": 6332
} | class ____(_ndptr_base):
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError("argument must be an ndarray")
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError("array must have data type %s" % cls._dtype_)
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError("array must have %d dimension(s)" % cls._ndim_)
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError("array must have shape %s" % str(cls._shape_))
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError("array must have flags %s" %
_flags_fromnum(cls._flags_))
return obj.ctypes
| _ndptr |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/events.py | {
"start": 2929,
"end": 6350
} | class ____(Generic[T], EventWithMetadata):
"""Event corresponding to one of an op's outputs.
Op compute functions must explicitly yield events of this type when they have more than
one output, or when they also yield events of other types, or when defining a op using the
:py:class:`OpDefinition` API directly.
Outputs are values produced by ops that will be consumed by downstream ops in a job.
They are type-checked at op boundaries when their corresponding :py:class:`Out`
or the downstream :py:class:`In` is typed.
Args:
value (Any): The value returned by the compute function.
output_name (str): Name of the corresponding Out. (default: "result")
metadata (Optional[Dict[str, Union[str, float, int, MetadataValue]]]):
Arbitrary metadata about the output. Keys are displayed string labels, and values are
one of the following: string, float, int, JSON-serializable dict, JSON-serializable
list, and one of the data classes returned by a MetadataValue static method.
data_version (Optional[DataVersion]): (Beta) A data version to manually set
for the asset.
tags (Optional[Mapping[str, str]]): Tags that will be attached to the asset
materialization event corresponding to this output, if there is one.
"""
def __init__(
self,
value: T,
output_name: str = DEFAULT_OUTPUT,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
data_version: Optional[DataVersion] = None,
*,
tags: Optional[Mapping[str, str]] = None,
):
self._value = value
self._output_name = check.str_param(output_name, "output_name")
self._data_version = check.opt_inst_param(data_version, "data_version", DataVersion)
self._metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
self._tags = validate_asset_event_tags(tags)
@property
def metadata(self) -> MetadataMapping:
return self._metadata
@property
def tags(self) -> Optional[Mapping[str, str]]:
return self._tags
@public
@property
def value(self) -> Any:
"""Any: The value returned by the compute function."""
return self._value
@public
@property
def output_name(self) -> str:
"""str: Name of the corresponding :py:class:`Out`."""
return self._output_name
@public
@property
def data_version(self) -> Optional[DataVersion]:
"""Optional[DataVersion]: A data version that was manually set on the `Output`."""
return self._data_version
def __eq__(self, other: object) -> bool:
return (
isinstance(other, Output)
and self.value == other.value
and self.output_name == other.output_name
and self.metadata == other.metadata
and self.tags == other.tags
)
def with_metadata(self, metadata: Optional[Mapping[str, RawMetadataValue]]) -> "Output":
"""Returns a new Output with the same value and output_name,
but with the provided metadata.
"""
return self.__class__(
value=self.value,
output_name=self.output_name,
metadata=metadata,
data_version=self.data_version,
tags=self.tags,
)
| Output |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 51507,
"end": 62726
} | class ____(InspectTool):
''' *toolbar icon*: |hover_icon|
The hover tool is a passive inspector tool. It is generally on at all
times, but can be configured in the inspector's menu associated with the
*toolbar icon* shown above.
By default, the hover tool displays informational tooltips whenever the
cursor is directly over a glyph. The data to show comes from the glyph's
data source, and what to display is configurable with the ``tooltips``
property that maps display names to columns in the data source, or to
special known variables.
Here is an example of how to configure and use the hover tool::
# Add tooltip (name, field) pairs to the tool. See below for a
# description of possible field values.
hover.tooltips = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("radius", "@radius"),
("fill color", "$color[hex, swatch]:fill_color"),
("fill color", "$color[hex]:fill_color"),
("fill color", "$color:fill_color"),
("fill color", "$swatch:fill_color"),
("foo", "@foo"),
("bar", "@bar"),
("baz", "@baz{safe}"),
("total", "@total{$0,0.00}"),
]
You can also supply a ``Callback`` to the ``HoverTool``, to build custom
interactions on hover. In this case you may want to turn the tooltips
off by setting ``tooltips=None``.
.. warning::
When supplying a callback or custom template, the explicit intent
of this Bokeh Model is to embed *raw HTML and JavaScript code* for
a browser to execute. If any part of the code is derived from untrusted
user inputs, then you must take appropriate care to sanitize the user
input prior to passing to Bokeh.
Hover tool does not currently work with the following glyphs:
.. hlist::
:columns: 3
* annulus
* arc
* bezier
* image_url
* oval
* patch
* quadratic
* ray
* step
* text
.. |hover_icon| image:: /_images/icons/hover.svg
:height: 24px
:alt: Icon of a popup tooltip with abstract lines of text representing the hover tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
renderers = Either(Auto, List(Instance(DataRenderer)), default="auto", help="""
A list of renderers to hit test against. If unset, defaults to
all renderers on a plot.
""")
callback = Nullable(Instance(Callback), help="""
A callback to run in the browser whenever the input's value changes. The
``cb_data`` parameter that is available to the Callback code will contain two
``HoverTool`` specific fields:
:index: object containing the indices of the hovered points in the data source
:geometry: object containing the coordinates of the hover cursor
""")
tooltips = Either(Null, Instance(DOMElement), String, List(Tuple(String, String)),
default=[
("index","$index"),
("data (x, y)","($x, $y)"),
("screen (x, y)","($sx, $sy)"),
], help="""
The (name, field) pairs describing what the hover tool should
display when there is a hit.
Field names starting with "@" are interpreted as columns on the
data source. For instance, "@temp" would look up values to display
from the "temp" column of the data source.
Field names starting with "$" are special, known fields:
:$index: index of hovered point in the data source
:$name: value of the ``name`` property of the hovered glyph renderer
:$x: x-coordinate under the cursor in data space
:$y: y-coordinate under the cursor in data space
:$sx: x-coordinate under the cursor in screen (canvas) space
:$sy: y-coordinate under the cursor in screen (canvas) space
:$color: color data from data source, with the syntax:
``$color[options]:field_name``. The available options
are: ``hex`` (to display the color as a hex value), ``swatch``
(color data from data source displayed as a small color box)
:$swatch: color data from data source displayed as a small color box
Field names that begin with ``@`` are associated with columns in a
``ColumnDataSource``. For instance the field name ``"@price"`` will
display values from the ``"price"`` column whenever a hover is triggered.
If the hover is for the 17th glyph, then the hover tooltip will
correspondingly display the 17th price value.
Note that if a column name contains spaces, the it must be supplied by
surrounding it in curly braces, e.g. ``@{adjusted close}`` will display
values from a column named ``"adjusted close"``.
Sometimes (especially with stacked charts) it is desirable to allow the
name of the column be specified indirectly. The field name ``@$name`` is
distinguished in that it will look up the ``name`` field on the hovered
glyph renderer, and use that value as the column name. For instance, if
a user hovers with the name ``"US East"``, then ``@$name`` is equivalent to
``@{US East}``.
By default, values for fields (e.g. ``@foo``) are displayed in a basic
numeric format. However it is possible to control the formatting of values
more precisely. Fields can be modified by appending a format specified to
the end in curly braces. Some examples are below.
.. code-block:: python
"@foo{0,0.000}" # formats 10000.1234 as: 10,000.123
"@foo{(.00)}" # formats -10000.1234 as: (10000.123)
"@foo{($ 0.00 a)}" # formats 1230974 as: $ 1.23 m
Specifying a format ``{safe}`` after a field name will override automatic
escaping of the tooltip data source. Any HTML tags in the data tags will
be rendered as HTML in the resulting HoverTool output. See
:ref:`custom_hover_tooltip` for a more detailed example.
``None`` is also a valid value for tooltips. This turns off the
rendering of tooltips. This is mostly useful when supplying other
actions on hover via the callback property.
.. note::
The tooltips attribute can also be configured with a mapping type,
e.g. ``dict`` or ``OrderedDict``.
""").accepts(Dict(String, String), lambda d: list(d.items()))
formatters = Dict(String, Either(Enum(TooltipFieldFormatter), Instance(CustomJSHover)), default=lambda: dict(), help="""
Specify the formatting scheme for data source columns, e.g.
.. code-block:: python
tool.formatters = {"@date": "datetime"}
will cause format specifications for the "date" column to be interpreted
according to the "datetime" formatting scheme. The following schemes are
available:
:"numeral":
Provides a wide variety of formats for numbers, currency, bytes, times,
and percentages. The full set of formats can be found in the
|NumeralTickFormatter| reference documentation.
:"datetime":
Provides formats for date and time values. The full set of formats is
listed in the |DatetimeTickFormatter| reference documentation.
:"printf":
Provides formats similar to C-style "printf" type specifiers. See the
|PrintfTickFormatter| reference documentation for complete details.
If no formatter is specified for a column name, the default ``"numeral"``
formatter is assumed.
.. |NumeralTickFormatter| replace:: :class:`~bokeh.models.formatters.NumeralTickFormatter`
.. |DatetimeTickFormatter| replace:: :class:`~bokeh.models.formatters.DatetimeTickFormatter`
.. |PrintfTickFormatter| replace:: :class:`~bokeh.models.formatters.PrintfTickFormatter`
""")
filters = Dict(String, Either(Instance(CustomJS), List(Instance(CustomJS))), default={}, help="""
Allows filtering hover results using a ``CustomJS`` callback.
An example of a simple filter function:
.. code::
filter = '''
export default (args, tool, {value: x, row, index, field, data_source, vars}) => {
return x >= 0
}
'''
HoverTool(filters={"@x": CustomJS(args={}, code=filter)})
""")
sort_by = Nullable(
Either(
String,
List(
Either(String, Tuple(String, Either(Enum(SortDirection), Enum(1, -1)))),
),
),
)(default=None, help="""
Allows sorting hover results by a field or a sequence of fields.
Additionally sort direction can be provided when using the sequence form, even if
providing a single field. The default sort order is based on data index and/or
proximity to the hit point.
""")
limit = Nullable(Positive(Int), default=None, help="""
Limit the number the number of data points for which tooltips will be showed.
By default ``HoverTool`` will show tooltips for all hit data points.
""")
mode = Enum("mouse", "hline", "vline", help="""
Whether to consider hover pointer as a point (x/y values), or a
span on h or v directions.
""")
muted_policy = Enum("show", "ignore",
default="show", help="""
Whether to avoid showing tooltips on muted glyphs.
""")
point_policy = Enum("snap_to_data", "follow_mouse", "none", help="""
Whether the tooltip position should snap to the "center" (or other anchor)
position of the associated glyph, or always follow the current mouse cursor
position.
""")
line_policy = Enum("prev", "next", "nearest", "interp", "none",
default="nearest", help="""
Specifies where the tooltip will be positioned when hovering over line
glyphs:
:"prev": between the nearest two adjacent line points, positions the
tooltip at the point with the lower ("previous") index
:"next": between the nearest two adjacent line points, positions the
tooltip at the point with the higher ("next") index
:"nearest": between the nearest two adjacent line points, positions the
tooltip on the point that is nearest to the mouse cursor location
:"interp": positions the tooltip at an interpolated point on the segment
joining the two nearest adjacent line points.
:"none": positions the tooltip directly under the mouse cursor location
""")
anchor = Enum(Anchor, default="center", help="""
If point policy is set to `"snap_to_data"`, `anchor` defines the attachment
point of a tooltip. The default is to attach to the center of a glyph.
""")
attachment = Enum(TooltipAttachment, help="""
Whether the tooltip should be displayed to the left or right of the cursor
position or above or below it, or if it should be automatically placed
in the horizontal or vertical dimension.
""")
show_arrow = Bool(default=True, help="""
Whether tooltip's arrow should be shown.
""")
DEFAULT_HELP_TIP = "Click the question mark to learn more about Bokeh plot tools."
DEFAULT_HELP_URL = "https://docs.bokeh.org/en/latest/docs/user_guide/interaction/tools.html"
| HoverTool |
python | wandb__wandb | wandb/vendor/pygments/util.py | {
"start": 9123,
"end": 11900
} | class ____(object):
"""Generic class to defer some work.
Handled specially in RegexLexerMeta, to support regex string construction at
first use.
"""
def get(self):
raise NotImplementedError
def guess_decode(text):
"""Decode *text* with guessed encoding.
First try UTF-8; this should fail for non-UTF-8 encodings.
Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
try:
text = text.decode('utf-8')
return text, 'utf-8'
except UnicodeDecodeError:
try:
import locale
prefencoding = locale.getpreferredencoding()
text = text.decode()
return text, prefencoding
except (UnicodeDecodeError, LookupError):
text = text.decode('latin1')
return text, 'latin1'
def guess_decode_from_terminal(text, term):
"""Decode *text* coming from terminal *term*.
First try the terminal encoding, if given.
Then try UTF-8. Then try the preferred locale encoding.
Fall back to latin-1, which always works.
"""
if getattr(term, 'encoding', None):
try:
text = text.decode(term.encoding)
except UnicodeDecodeError:
pass
else:
return text, term.encoding
return guess_decode(text)
def terminal_encoding(term):
"""Return our best guess of encoding for the given *term*."""
if getattr(term, 'encoding', None):
return term.encoding
import locale
return locale.getpreferredencoding()
# Python 2/3 compatibility
if sys.version_info < (3, 0):
unichr = unichr
xrange = xrange
string_types = (str, unicode)
text_type = unicode
u_prefix = 'u'
iteritems = dict.iteritems
itervalues = dict.itervalues
import StringIO
import cStringIO
# unfortunately, io.StringIO in Python 2 doesn't accept str at all
StringIO = StringIO.StringIO
BytesIO = cStringIO.StringIO
else:
unichr = chr
xrange = range
string_types = (str,)
text_type = str
u_prefix = ''
iteritems = dict.items
itervalues = dict.values
from io import StringIO, BytesIO, TextIOWrapper
class UnclosingTextIOWrapper(TextIOWrapper):
# Don't close underlying buffer on destruction.
def close(self):
self.flush()
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
| Future |
python | doocs__leetcode | solution/3400-3499/3431.Minimum Unlocked Indices to Sort Nums/Solution.py | {
"start": 0,
"end": 591
} | class ____:
def minUnlockedIndices(self, nums: List[int], locked: List[int]) -> int:
n = len(nums)
first2 = first3 = n
last1 = last2 = -1
for i, x in enumerate(nums):
if x == 1:
last1 = i
elif x == 2:
first2 = min(first2, i)
last2 = i
else:
first3 = min(first3, i)
if first3 < last1:
return -1
return sum(
st and (first2 <= i < last1 or first3 <= i < last2)
for i, st in enumerate(locked)
)
| Solution |
python | astropy__astropy | astropy/io/ascii/latex.py | {
"start": 2339,
"end": 3614
} | class ____(core.BaseSplitter):
"""Split LaTeX table data. Default delimiter is `&`."""
delimiter = "&"
def __call__(self, lines: list[str]) -> Generator[list[str], None, None]:
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r"\\"):
lines[-1] = last_line + r"\\"
return super().__call__(lines)
def process_line(self, line: str) -> str:
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line.
"""
line = RE_COMMENT.split(line)[0].strip()
if not line.endswith(r"\\"):
raise core.InconsistentTableError(
r"Lines in LaTeX table have to end with \\"
)
return line.removesuffix(r"\\")
def process_val(self, val: str) -> str:
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == "{") and (val[-1] == "}"):
val = val[1:-1]
return val
def join(self, vals: list[str]) -> str:
"""Join values together and add a few extra spaces for readability."""
delimiter = " " + self.delimiter + " "
return delimiter.join(x.strip() for x in vals) + r" \\"
| LatexSplitter |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 507518,
"end": 513591
} | class ____(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def infer_builtin_types_operation(self, type1, type2):
if type1.is_builtin_type:
return PyrexTypes.result_type_of_builtin_operation(type1, type2)
else:
return PyrexTypes.result_type_of_builtin_operation(type2, type1)
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super().may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super().generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
if is_pythran_expr(self.type):
result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
else:
result1, result2 = self.operand1.result(), self.operand2.result()
return "(%s %s %s)" % (result1, self.operator, result2)
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
| NumBinopNode |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/colliding_class_names.py | {
"start": 272,
"end": 361
} | class ____:
def also_tainted_but_missing_from_analysis():
return _test_source()
| C |
python | getsentry__sentry | src/sentry/preprod/pull_request/comment_types.py | {
"start": 1507,
"end": 2002
} | class ____(BaseModel):
"""
Represents a GitHub issue comment (general PR comment).
These are comments in the main PR conversation thread.
"""
id: int
node_id: str
url: str
html_url: str
body: str
user: CommentUser | None
created_at: datetime
updated_at: datetime
issue_url: str
author_association: AuthorAssociation
body_text: str | None = None
body_html: str | None = None
reactions: CommentReactions | None = None
| IssueComment |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/_musllinux.py | {
"start": 301,
"end": 2674
} | class ____(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
return _parse_musl_version(proc.stderr)
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param archs: Sequence of compatible architectures.
The first one shall be the closest to the actual architecture and be the part of
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for arch in archs:
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
| _MuslVersion |
python | pytest-dev__pytest | testing/test_assertrewrite.py | {
"start": 72560,
"end": 74347
} | class ____:
"""
Check that verbosity also controls the string length threshold to shorten it using
ellipsis.
"""
@pytest.mark.parametrize(
"verbose, expected_size",
[
(0, DEFAULT_REPR_MAX_SIZE),
(1, DEFAULT_REPR_MAX_SIZE * 10),
(2, None),
(3, None),
],
)
def test_get_maxsize_for_saferepr(self, verbose: int, expected_size) -> None:
class FakeConfig:
def get_verbosity(self, verbosity_type: str | None = None) -> int:
return verbose
config = FakeConfig()
assert _get_maxsize_for_saferepr(cast(Config, config)) == expected_size
def test_get_maxsize_for_saferepr_no_config(self) -> None:
assert _get_maxsize_for_saferepr(None) == DEFAULT_REPR_MAX_SIZE
def create_test_file(self, pytester: Pytester, size: int) -> None:
pytester.makepyfile(
f"""
def test_very_long_string():
text = "x" * {size}
assert "hello world" in text
"""
)
def test_default_verbosity(self, pytester: Pytester) -> None:
self.create_test_file(pytester, DEFAULT_REPR_MAX_SIZE)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*xxx...xxx*"])
def test_increased_verbosity(self, pytester: Pytester) -> None:
self.create_test_file(pytester, DEFAULT_REPR_MAX_SIZE)
result = pytester.runpytest("-v")
result.stdout.no_fnmatch_line("*xxx...xxx*")
def test_max_increased_verbosity(self, pytester: Pytester) -> None:
self.create_test_file(pytester, DEFAULT_REPR_MAX_SIZE * 10)
result = pytester.runpytest("-vv")
result.stdout.no_fnmatch_line("*xxx...xxx*")
| TestReprSizeVerbosity |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/multitenancy/memcache.py | {
"start": 861,
"end": 1928
} | class ____(webapp2.RequestHandler):
"""Increments counters in the global namespace as well as in whichever
namespace is specified by the request, which is arbitrarily named 'default'
if not specified."""
def get(self, namespace="default"):
global_count = memcache.incr("counter", initial_value=0)
# Save the current namespace.
previous_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(namespace)
namespace_count = memcache.incr("counter", initial_value=0)
finally:
# Restore the saved namespace.
namespace_manager.set_namespace(previous_namespace)
self.response.write(
"Global: {}, Namespace {}: {}".format(
global_count, namespace, namespace_count
)
)
app = webapp2.WSGIApplication(
[
(r"/memcache", MemcacheCounterHandler),
(r"/memcache/(.*)", MemcacheCounterHandler),
],
debug=True,
)
# [END gae_multitenancy_memcache]
| MemcacheCounterHandler |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 193562,
"end": 195457
} | class ____(Binding):
"""
BindInput schema wrapper.
Parameters
----------
autocomplete : str
A hint for form autofill. See the `HTML autocomplete attribute
<https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete>`__ for
additional information.
debounce : float
If defined, delays event handling until the specified milliseconds have elapsed
since the last event was fired.
element : str, :class:`Element`
An optional CSS selector string indicating the parent element to which the input
element should be added. By default, all input elements are added within the parent
container of the Vega view.
input : str
The type of input element to use. The valid values are ``"checkbox"``, ``"radio"``,
``"range"``, ``"select"``, and any other legal `HTML form input type
<https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input>`__.
name : str
By default, the signal name is used to label input elements. This ``name`` property
can be used instead to specify a custom label for the bound signal.
placeholder : str
Text that appears in the form control when it has no value set.
"""
_schema = {"$ref": "#/definitions/BindInput"}
def __init__(
self,
autocomplete: Optional[str] = Undefined,
debounce: Optional[float] = Undefined,
element: Optional[str | SchemaBase] = Undefined,
input: Optional[str] = Undefined,
name: Optional[str] = Undefined,
placeholder: Optional[str] = Undefined,
**kwds,
):
super().__init__(
autocomplete=autocomplete,
debounce=debounce,
element=element,
input=input,
name=name,
placeholder=placeholder,
**kwds,
)
| BindInput |
python | simplejson__simplejson | simplejson/tests/test_pass1.py | {
"start": 1539,
"end": 1746
} | class ____(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEqual(res, json.loads(out))
| TestPass1 |
python | django__django | tests/forms_tests/widget_tests/test_choicewidget.py | {
"start": 91,
"end": 3150
} | class ____(WidgetTest):
widget = ChoiceWidget
@property
def nested_widgets(self):
nested_widget = self.widget(
choices=(
("outer1", "Outer 1"),
('Group "1"', (("inner1", "Inner 1"), ("inner2", "Inner 2"))),
),
)
nested_widget_dict = self.widget(
choices={
"outer1": "Outer 1",
'Group "1"': {"inner1": "Inner 1", "inner2": "Inner 2"},
},
)
nested_widget_dict_tuple = self.widget(
choices={
"outer1": "Outer 1",
'Group "1"': (("inner1", "Inner 1"), ("inner2", "Inner 2")),
},
)
return (nested_widget, nested_widget_dict, nested_widget_dict_tuple)
def test_deepcopy(self):
"""
__deepcopy__() should copy all attributes properly.
"""
widget = self.widget()
obj = copy.deepcopy(widget)
self.assertIsNot(widget, obj)
self.assertEqual(widget.choices, obj.choices)
self.assertIsNot(widget.choices, obj.choices)
self.assertEqual(widget.attrs, obj.attrs)
self.assertIsNot(widget.attrs, obj.attrs)
def test_options(self):
options = list(
self.widget(choices=self.beatles).options(
"name",
["J"],
attrs={"class": "super"},
)
)
self.assertEqual(len(options), 4)
self.assertEqual(options[0]["name"], "name")
self.assertEqual(options[0]["value"], "J")
self.assertEqual(options[0]["label"], "John")
self.assertEqual(options[0]["index"], "0")
self.assertIs(options[0]["selected"], True)
# Template-related attributes
self.assertEqual(options[1]["name"], "name")
self.assertEqual(options[1]["value"], "P")
self.assertEqual(options[1]["label"], "Paul")
self.assertEqual(options[1]["index"], "1")
self.assertIs(options[1]["selected"], False)
def test_optgroups_integer_choices(self):
"""The option 'value' is the same type as what's in `choices`."""
groups = list(
self.widget(choices=[[0, "choice text"]]).optgroups("name", ["vhs"])
)
label, options, index = groups[0]
self.assertEqual(options[0]["value"], 0)
def test_renders_required_when_possible_to_select_empty_field_none(self):
widget = self.widget(choices=[(None, "select please"), ("P", "Paul")])
self.assertIs(widget.use_required_attribute(initial=None), True)
def test_renders_required_when_possible_to_select_empty_field_list(self):
widget = self.widget(choices=[["", "select please"], ["P", "Paul"]])
self.assertIs(widget.use_required_attribute(initial=None), True)
def test_renders_required_when_possible_to_select_empty_field_str(self):
widget = self.widget(choices=[("", "select please"), ("P", "Paul")])
self.assertIs(widget.use_required_attribute(initial=None), True)
| ChoiceWidgetTest |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 24118,
"end": 24202
} | class ____(VariableTracker):
"""
It could be anything!
"""
| UnknownVariable |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 2041,
"end": 2232
} | class ____(CallableTemplate):
key = cuda.const.array_like
def generic(self):
def typer(ndarray):
return ndarray
return typer
@register
| Cuda_const_array_like |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_issue_metrics.py | {
"start": 7323,
"end": 7392
} | class ____(TypedDict):
timestamp: float
value: float
| TimeSeries |
python | tornadoweb__tornado | tornado/test/concurrent_test.py | {
"start": 2984,
"end": 3364
} | class ____:
def __init__(self, port):
self.port = port
def process_response(self, data):
m = re.match("(.*)\t(.*)\n", to_unicode(data))
if m is None:
raise Exception("did not match")
status, message = m.groups()
if status == "ok":
return message
else:
raise CapError(message)
| BaseCapClient |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_origin.py | {
"start": 14745,
"end": 16372
} | class ____(LegacyNamedTupleMixin):
"""Serializable representation of an ExternalRepository that can be used to
uniquely it or reload it in across process boundaries.
"""
code_location_origin: CodeLocationOrigin
repository_name: str
def get_id(self) -> str:
return create_snapshot_id(self)
def get_selector_id(self) -> str:
return create_snapshot_id(self.get_selector())
@cached_method
def get_selector(self) -> "RepositorySelector":
from dagster._core.definitions.selector import RepositorySelector
return RepositorySelector(
location_name=self.code_location_origin.location_name,
repository_name=self.repository_name,
)
def get_label(self) -> str:
return f"{self.repository_name}@{self.code_location_origin.location_name}"
def get_job_origin(self, job_name: str) -> "RemoteJobOrigin":
return RemoteJobOrigin(repository_origin=self, job_name=job_name)
def get_instigator_origin(self, instigator_name: str) -> "RemoteInstigatorOrigin":
return RemoteInstigatorOrigin(repository_origin=self, instigator_name=instigator_name)
def get_partition_set_origin(self, partition_set_name: str) -> "RemotePartitionSetOrigin":
return RemotePartitionSetOrigin(
repository_origin=self, partition_set_name=partition_set_name
)
@whitelist_for_serdes(
storage_name="ExternalPipelineOrigin",
storage_field_names={
"repository_origin": "external_repository_origin",
"job_name": "pipeline_name",
},
)
@record(kw_only=False)
| RemoteRepositoryOrigin |
python | facebook__pyre-check | tools/upgrade/repository.py | {
"start": 329,
"end": 1437
} | class ____:
MIGRATION_SUMMARY: str = (
"Migrating buck integration to use configurations.\n "
"For more information about this migration, please see: "
"https://fb.workplace.com/groups/295311271085134/permalink/552700215346237/"
)
def commit_message(
self,
title: str,
summary_override: Optional[str] = None,
reviewers: Optional[List[str]] = None,
) -> str:
return ""
def add_paths(self, paths: List[Path]) -> None:
pass
def remove_paths(self, paths: List[Path]) -> None:
pass
def commit_changes(
self,
commit: bool,
title: Optional[str] = None,
summary: Optional[str] = None,
reviewers: Optional[List[str]] = None,
ignore_failures: bool = False,
set_dependencies: bool = True,
) -> None:
pass
def revert_all(self, remove_untracked: bool) -> None:
pass
def format(self) -> bool:
return False
def force_format(self, paths: List[str]) -> None:
subprocess.check_call(["pyfmt", *paths])
| Repository |
python | plotly__plotly.py | plotly/graph_objs/layout/ternary/aaxis/_title.py | {
"start": 235,
"end": 2875
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.ternary.aaxis"
_path_str = "layout.ternary.aaxis.title"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this axis' title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.ternary.aaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.ternary.aaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of this axis.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font.
text
Sets the title of this axis.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.ternary.aaxis.Title`
font
Sets this axis' title font.
text
Sets the title of this axis.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.ternary.aaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.ternary.aaxis.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | getsentry__sentry | src/sentry/integrations/utils/codecov.py | {
"start": 4703,
"end": 4872
} | class ____(TypedDict):
repository: Repository
# Config is a serialized RepositoryProjectPathConfig
config: Any
outcome: RepositoryLinkOutcome
| CodecovConfig |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_dialect.py | {
"start": 5280,
"end": 8633
} | class ____(fixtures.TestBase):
__backend__ = True
__only_on__ = "oracle"
@testing.combinations(
(
"db is not connected",
None,
True,
),
(
"ORA-1234 fake error",
1234,
False,
),
(
"ORA-03114: not connected to ORACLE",
3114,
True,
),
(
"DPI-1010: not connected",
None,
True,
),
(
"DPI-1010: make sure we read the code",
None,
True,
),
(
"DPI-1080: connection was closed by ORA-3113",
None,
True,
),
(
"DPI-1234: some other DPI error",
None,
False,
),
)
@testing.only_on(["oracle+cx_oracle", "oracle+oracledb"])
def test_is_disconnect(self, message, code, expected):
dialect = testing.db.dialect
exception_obj = dialect.dbapi.InterfaceError()
exception_obj.args = (Exception(message),)
exception_obj.args[0].code = code
eq_(dialect.is_disconnect(exception_obj, None, None), expected)
def test_hypothetical_not_implemented_isolation_level(self):
engine = engines.testing_engine()
def get_isolation_level(connection):
raise NotImplementedError
with mock.patch.object(
engine.dialect, "get_isolation_level", get_isolation_level
):
conn = engine.connect()
# for NotImplementedError we get back None. But the
# cx_Oracle dialect does not raise this.
eq_(conn.dialect.default_isolation_level, None)
dbapi_conn = conn.connection.dbapi_connection
eq_(
testing.db.dialect.get_isolation_level(dbapi_conn),
"READ COMMITTED",
)
conn.close()
def test_graceful_failure_isolation_level_not_available(self):
engine = engines.testing_engine()
def get_isolation_level(connection):
raise exc.DBAPIError(
"get isolation level",
{},
engine.dialect.dbapi.Error("isolation level failed"),
)
with mock.patch.object(
engine.dialect, "get_isolation_level", get_isolation_level
):
conn = engine.connect()
eq_(conn.dialect.default_isolation_level, "READ COMMITTED")
# test that we can use isolation level setting and that it
# reverts for "real" back to READ COMMITTED even though we
# can't read it
dbapi_conn = conn.connection.dbapi_connection
conn = conn.execution_options(isolation_level="SERIALIZABLE")
eq_(
testing.db.dialect.get_isolation_level(dbapi_conn),
"SERIALIZABLE",
)
conn.close()
eq_(
testing.db.dialect.get_isolation_level(dbapi_conn),
"READ COMMITTED",
)
with engine.connect() as conn:
assert_raises_message(
exc.DBAPIError,
r".*isolation level failed.*",
conn.get_isolation_level,
)
| DialectWBackendTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.