language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/interleave_test.py | {
"start": 17134,
"end": 25288
} | class ____(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase
):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
symbolic_checkpoint=[False, True],
cycle_length=2,
block_length=[1, 3],
num_parallel_calls=[None, 1, 2])))
def test(self, verify_fn, symbolic_checkpoint, cycle_length, block_length,
num_parallel_calls):
num_repeats = 2
input_values = np.array([2, 3], dtype=np.int64)
def _build_dataset():
dataset = dataset_ops.Dataset.from_tensor_slices(input_values)
dataset = dataset.repeat(num_repeats)
dataset = dataset.interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x), cycle_length,
block_length, num_parallel_calls)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
dataset = dataset.with_options(options)
return dataset
num_outputs = np.sum(input_values) * num_repeats
verify_fn(self, _build_dataset, num_outputs)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
skip=[0, 1, 2, 3],
),
)
)
def testWithSkip(self, verify_fn, skip):
def _build_dataset():
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(3),
cycle_length=2,
block_length=1,
num_parallel_calls=None,
)
dataset = dataset.skip(skip)
return dataset
num_outputs = 4 * 3 - skip
verify_fn(self, _build_dataset, num_outputs)
@combinations.generate(test_base.v2_eager_only_combinations())
def testDelayedPurgeCheckpointAtTheSameCycleIdx(self):
"""Tests delayed checkpoint purging at the same cycle index works correctly.
This would crash if we were to use`cycle_index_` as part
of the prefix:
[0] [1]
1(prefix: ::Interleave[0] 2(prefix: ::Interleave[1])
EOF(delete ::Interleave[0]) EOF(delete ::Interleave[1])
3(prefix ::Interleave[2]) 4
^
(should be 2 instead of 0)
EOF EOF
If we checkpoint at the point right after 3 is generated and
restore it, restore would crash because the sub iterator
for generating 3 is incorrectly deleted due to delayed checkpoint purging.
"""
options = options_lib.Options()
options.experimental_symbolic_checkpoint = True
options.experimental_optimization.inject_prefetch = False
options.experimental_optimization.apply_default_optimizations = False
def _build_dataset():
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.interleave(
lambda x: dataset_ops.Dataset.from_tensor_slices([x]),
cycle_length=2,
block_length=1,
num_parallel_calls=None,
)
dataset = dataset.with_options(options)
return dataset
dataset = _build_dataset().with_options(options)
it = dataset.as_numpy_iterator()
for _ in range(3):
next(it)
checkpoint = it.save().numpy()
expected = next(it)
restored_it = dataset.as_numpy_iterator()
restored_it.restore(checkpoint)
actual = next(restored_it)
self.assertEqual(expected, actual)
@combinations.generate(test_base.v2_eager_only_combinations())
def testWithInputThatPurgeCheckpoint(self):
"""Tests underlying `expired_prefixes` are handled correctly.
Explanation:
The input for `interleave` looks like (created by `.repeat`):
[0, |1, |2]
^ ^
| |
| expired_prefixes=["FiniteRepeat[1]"]
expired_prefixes=["FiniteRepeat[0]"]
[0] [1]
0 1 <--- expired_prefixes=["...FiniteRepeat[0]"]
EOF EOF
2 <----- Tests the previous checkpoint stored at this index
should not have an effect on the new checkpoint.
EOF
"""
options = options_lib.Options()
options.experimental_symbolic_checkpoint = True
options.experimental_optimization.inject_prefetch = False
options.experimental_optimization.apply_default_optimizations = False
def carefully_designed_map(x):
if x == 0:
return dataset_ops.Dataset.from_tensor_slices([0])
elif x == 1:
return dataset_ops.Dataset.from_tensor_slices([1])
else:
return dataset_ops.Dataset.from_tensor_slices([2])
def _build_dataset():
dataset = dataset_ops.Dataset.from_tensor_slices(["does not matter"])
# Create [0, 1, 2] using repeat+enumerate+map
dataset = dataset.repeat(3)
dataset = dataset.enumerate()
dataset = dataset.map(lambda idx, x: idx)
dataset = dataset.interleave(
carefully_designed_map,
cycle_length=2,
block_length=1,
num_parallel_calls=None,
)
dataset = dataset.with_options(options)
return dataset
dataset = _build_dataset().with_options(options)
it = dataset.as_numpy_iterator()
try:
for _ in range(4):
next(it)
except StopIteration:
pass
# should not crash
it.save().numpy()
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2]),
)
)
def testNested(self, verify_fn, num_parallel_calls):
def build_ds():
inner_ds = dataset_ops.Dataset.from_tensor_slices(range(10))
ds = dataset_ops.Dataset.from_tensors(inner_ds).repeat(10)
return ds.interleave(
lambda x: x, cycle_length=5, num_parallel_calls=num_parallel_calls)
verify_fn(self, build_ds, num_outputs=100)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testSparse(self, verify_fn):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
verify_fn(self, _build_dataset, num_outputs=20)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2]),
)
)
def testSymbolicUnimplemented(self, verify_fn, num_parallel_calls):
if sys.platform == "darwin":
self.skipTest(
"MacOS does not support symbolic checkpointing."
) # b/284304023
def fn(x):
del x
dataset = dataset_ops.Dataset.range(7)
dataset = dataset.window(3, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda x: x)
return dataset
def build_ds():
dataset = dataset_ops.Dataset.range(2)
dataset = dataset.interleave(
fn,
cycle_length=3,
num_parallel_calls=num_parallel_calls,
)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = True
dataset = dataset.with_options(options)
return dataset
with self.assertRaisesRegex(
errors.UnimplementedError,
"WindowOp does not support symbolic checkpointing.",
):
verify_fn(self, build_ds, num_outputs=30)
if __name__ == "__main__":
test.main()
| InterleaveCheckpointTest |
python | pypa__setuptools | setuptools/_distutils/command/clean.py | {
"start": 285,
"end": 2644
} | class ____(Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b', "base build directory [default: 'build.build-base']"),
(
'build-lib=',
None,
"build directory for all modules [default: 'build.build-lib']",
),
('build-temp=', 't', "temporary build directory [default: 'build.build-temp']"),
(
'build-scripts=',
None,
"build directory for scripts [default: 'build.build-scripts']",
),
('bdist-base=', None, "temporary directory for built distributions"),
('all', 'a', "remove all build output, not just temporary by-products"),
]
boolean_options: ClassVar[list[str]] = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options(
'build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'),
)
self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it", self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib, self.bdist_base, self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warning("'%s' does not exist -- can't clean it", directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
| clean |
python | huggingface__transformers | src/transformers/models/deepseek_v2/modeling_deepseek_v2.py | {
"start": 2111,
"end": 4020
} | class ____(nn.Module):
"""Collection of expert weights stored as 3D tensors."""
def __init__(self, config):
super().__init__()
self.num_experts = config.n_routed_experts
self.hidden_dim = config.hidden_size
self.intermediate_dim = config.moe_intermediate_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
num_experts = top_k_weights.shape[1]
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=num_experts + 1)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == num_experts:
continue
_, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, expert_idx, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
| DeepseekV2Experts |
python | ray-project__ray | python/ray/serve/tests/unit/test_application_state.py | {
"start": 49760,
"end": 58087
} | class ____:
@pytest.fixture
def info(self):
return DeploymentInfo(
route_prefix="/",
version="123",
deployment_config=DeploymentConfig(num_replicas=1),
replica_config=ReplicaConfig.create(lambda x: x),
start_time_ms=0,
deployer_job_id="",
)
def test_override_deployment_config(self, info):
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
deployments=[
DeploymentSchema(
name="A",
num_replicas=3,
max_ongoing_requests=200,
user_config={"price": "4"},
graceful_shutdown_wait_loop_s=4,
graceful_shutdown_timeout_s=40,
health_check_period_s=20,
health_check_timeout_s=60,
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert updated_info.deployment_config.max_ongoing_requests == 200
assert updated_info.deployment_config.user_config == {"price": "4"}
assert updated_info.deployment_config.graceful_shutdown_wait_loop_s == 4
assert updated_info.deployment_config.graceful_shutdown_timeout_s == 40
assert updated_info.deployment_config.health_check_period_s == 20
assert updated_info.deployment_config.health_check_timeout_s == 60
def test_override_autoscaling_config(self, info):
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
deployments=[
DeploymentSchema(
name="A",
autoscaling_config={
"min_replicas": 1,
"initial_replicas": 12,
"max_replicas": 79,
},
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert updated_info.deployment_config.autoscaling_config.min_replicas == 1
assert updated_info.deployment_config.autoscaling_config.initial_replicas == 12
assert updated_info.deployment_config.autoscaling_config.max_replicas == 79
def test_override_route_prefix(self, info):
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
route_prefix="/bob",
deployments=[
DeploymentSchema(
name="A",
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/bob"
assert updated_info.version == "123"
def test_override_ray_actor_options_1(self, info):
"""Test runtime env specified in config at deployment level."""
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
deployments=[
DeploymentSchema(
name="A",
ray_actor_options={"runtime_env": {"working_dir": "s3://B"}},
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert (
updated_info.replica_config.ray_actor_options["runtime_env"]["working_dir"]
== "s3://B"
)
def test_override_ray_actor_options_2(self, info):
"""Test application runtime env is propagated to deployments."""
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
runtime_env={"working_dir": "s3://C"},
deployments=[
DeploymentSchema(
name="A",
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert (
updated_info.replica_config.ray_actor_options["runtime_env"]["working_dir"]
== "s3://C"
)
def test_override_ray_actor_options_3(self, info):
"""If runtime env is specified in the config at the deployment level, it should
override the application-level runtime env.
"""
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
runtime_env={"working_dir": "s3://C"},
deployments=[
DeploymentSchema(
name="A",
ray_actor_options={"runtime_env": {"working_dir": "s3://B"}},
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert (
updated_info.replica_config.ray_actor_options["runtime_env"]["working_dir"]
== "s3://B"
)
def test_override_ray_actor_options_4(self):
"""If runtime env is specified for the deployment in code, it should override
the application-level runtime env.
"""
info = DeploymentInfo(
route_prefix="/",
version="123",
deployment_config=DeploymentConfig(num_replicas=1),
replica_config=ReplicaConfig.create(
lambda x: x,
ray_actor_options={"runtime_env": {"working_dir": "s3://A"}},
),
start_time_ms=0,
deployer_job_id="",
)
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
runtime_env={"working_dir": "s3://C"},
deployments=[
DeploymentSchema(
name="A",
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert (
updated_info.replica_config.ray_actor_options["runtime_env"]["working_dir"]
== "s3://A"
)
def test_override_ray_actor_options_5(self):
"""If runtime env is specified in all three places:
- In code
- In the config at the deployment level
- In the config at the application level
The one specified in the config at the deployment level should take precedence.
"""
info = DeploymentInfo(
route_prefix="/",
version="123",
deployment_config=DeploymentConfig(num_replicas=1),
replica_config=ReplicaConfig.create(
lambda x: x,
ray_actor_options={"runtime_env": {"working_dir": "s3://A"}},
),
start_time_ms=0,
deployer_job_id="",
)
config = ServeApplicationSchema(
name="default",
import_path="test.import.path",
runtime_env={"working_dir": "s3://C"},
deployments=[
DeploymentSchema(
name="A",
ray_actor_options={"runtime_env": {"working_dir": "s3://B"}},
)
],
)
updated_infos = override_deployment_info({"A": info}, config)
updated_info = updated_infos["A"]
assert updated_info.route_prefix == "/"
assert updated_info.version == "123"
assert (
updated_info.replica_config.ray_actor_options["runtime_env"]["working_dir"]
== "s3://B"
)
| TestOverrideDeploymentInfo |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 65990,
"end": 66710
} | class ____(VariableTracker):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def __repr__(self) -> str:
return "NullVariable"
def reconstruct(self, codegen: "PyCodegen"):
if sys.version_info < (3, 11):
unimplemented(
gb_type="cannot reconstruct NullVariable in Python < 3.11",
context="",
explanation="Attempted to generate PUSH_NULL instruction in Python < 3.11; "
"where this instruction does not exist.",
hints=[
*graph_break_hints.DYNAMO_BUG,
],
)
codegen.append_output(create_instruction("PUSH_NULL"))
| NullVariable |
python | RaRe-Technologies__gensim | gensim/similarities/docsim.py | {
"start": 40411,
"end": 43854
} | class ____(interfaces.SimilarityABC):
"""Compute negative WMD similarity against a corpus of documents.
Check out `the Gallery <https://radimrehurek.com/gensim/auto_examples/tutorials/run_wmd.html>`__
for more examples.
When using this code, please consider citing the following papers:
* `Rémi Flamary et al. "POT: Python Optimal Transport"
<https://jmlr.org/papers/v22/20-451.html>`_
* `Matt Kusner et al. "From Word Embeddings To Document Distances"
<http://proceedings.mlr.press/v37/kusnerb15.pdf>`_
Example
-------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts
>>> from gensim.models import Word2Vec
>>> from gensim.similarities import WmdSimilarity
>>>
>>> model = Word2Vec(common_texts, vector_size=20, min_count=1) # train word-vectors
>>>
>>> index = WmdSimilarity(common_texts, model.wv)
>>> # Make query.
>>> query = ['trees']
>>> sims = index[query]
"""
def __init__(self, corpus, kv_model, num_best=None, chunksize=256):
"""
Parameters
----------
corpus: iterable of list of str
A list of documents, each of which is a list of tokens.
kv_model: :class:`~gensim.models.keyedvectors.KeyedVectors`
A set of KeyedVectors
num_best: int, optional
Number of results to retrieve.
chunksize : int, optional
Size of chunk.
"""
self.corpus = corpus
self.wv = kv_model
self.num_best = num_best
self.chunksize = chunksize
# Normalization of features is not possible, as corpus is a list (of lists) of strings.
self.normalize = False
# index is simply an array from 0 to size of corpus.
self.index = numpy.arange(len(corpus))
def __len__(self):
"""Get size of corpus."""
return len(self.corpus)
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly; use the `self[query]` syntax instead.
Parameters
----------
query : {list of str, iterable of list of str}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix.
"""
if isinstance(query, numpy.ndarray):
# Convert document indexes to actual documents.
query = [self.corpus[i] for i in query]
if not query or not isinstance(query[0], list):
query = [query]
n_queries = len(query)
result = []
for qidx in range(n_queries):
# Compute similarity for each query.
qresult = [self.wv.wmdistance(document, query[qidx]) for document in self.corpus]
qresult = numpy.array(qresult)
qresult = 1. / (1. + qresult) # Similarity is the negative of the distance.
# Append single query result to list of all results.
result.append(qresult)
if len(result) == 1:
# Only one query.
result = result[0]
else:
result = numpy.array(result)
return result
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.wv.vectors.shape[1])
| WmdSimilarity |
python | Lightning-AI__lightning | tests/tests_pytorch/overrides/test_distributed.py | {
"start": 988,
"end": 3650
} | class ____(LightningModule):
def setup(self, stage: str) -> None:
self.layer = torch.nn.Linear(1, 1)
weights = self.layer.weight.item(), self.layer.bias.item()
self.rank_0_weights = self.trainer.strategy.broadcast(weights)
def test_step(self, batch, batch_idx):
current = self.layer.weight.item(), self.layer.bias.item()
assert self.rank_0_weights == current
gathered = self.all_gather(current)
# the weights have been synced
assert all(torch.all(t == t[0]) for t in gathered), gathered
@RunIf(standalone=True)
def test_params_synced_during_nonfit():
model = MyModel()
trainer = Trainer(
barebones=True,
devices=2,
accelerator="cpu",
strategy="ddp",
)
trainer.test(model, [0])
@pytest.mark.parametrize("shuffle", [False, True])
def test_unrepeated_distributed_sampler(shuffle):
"""Test each rank will receive a different number of elements."""
seed_everything(42)
world_size = 4
samplers = []
dataset = range(103)
for rank in range(world_size):
samplers.append(UnrepeatedDistributedSampler(dataset, rank=rank, num_replicas=world_size, shuffle=shuffle))
indices = [list(s) for s in samplers]
assert len(indices[0]) == 26
assert len(indices[1]) == 26
assert len(indices[2]) == 26
assert len(indices[3]) == 25
assert indices[0][-1] == 18 if shuffle else 100
assert indices[1][-1] == 30 if shuffle else 101
assert indices[2][-1] == 29 if shuffle else 102
assert indices[3][-1] == 35 if shuffle else 99
def test_index_batch_sampler():
"""Test `IndexBatchSampler` properly extracts indices."""
dataset = range(15)
sampler = SequentialSampler(dataset)
batch_sampler = BatchSampler(sampler, 3, False)
index_batch_sampler = _IndexBatchSamplerWrapper(batch_sampler)
assert batch_sampler.batch_size == index_batch_sampler.batch_size
assert batch_sampler.drop_last == index_batch_sampler.drop_last
assert batch_sampler.sampler is sampler
assert index_batch_sampler.sampler is sampler
assert list(index_batch_sampler) == index_batch_sampler.seen_batch_indices
assert list(index_batch_sampler) == list(batch_sampler)
assert isinstance(index_batch_sampler, Iterable)
assert has_len(index_batch_sampler)
iterator = iter(index_batch_sampler)
assert index_batch_sampler.seen_batch_indices == []
b0 = next(iterator)
assert b0 == [0, 1, 2]
assert index_batch_sampler.seen_batch_indices == [b0]
b1 = next(iterator)
assert b1 == [3, 4, 5]
assert index_batch_sampler.seen_batch_indices == [b0, b1]
| MyModel |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ROI.py | {
"start": 1196,
"end": 58617
} | class ____(GraphicsObject):
"""
Generic region-of-interest widget.
Can be used for implementing many types of selection box with
rotate/translate/scale handles.
ROIs can be customized to have a variety of shapes (by subclassing or using
any of the built-in subclasses) and any combination of draggable handles
that allow the user to manipulate the ROI.
Default mouse interaction:
* Left drag moves the ROI
* Left drag + Ctrl moves the ROI with position snapping
* Left drag + Alt rotates the ROI
* Left drag + Alt + Ctrl rotates the ROI with angle snapping
* Left drag + Shift scales the ROI
* Left drag + Shift + Ctrl scales the ROI with size snapping
In addition to the above interaction modes, it is possible to attach any
number of handles to the ROI that can be dragged to change the ROI in
various ways (see the ROI.add____Handle methods).
================ ===========================================================
**Arguments**
pos (length-2 sequence) Indicates the position of the ROI's
origin. For most ROIs, this is the lower-left corner of
its bounding rectangle.
size (length-2 sequence) Indicates the width and height of the
ROI.
angle (float) The rotation of the ROI in degrees. Default is 0.
invertible (bool) If True, the user may resize the ROI to have
negative width or height (assuming the ROI has scale
handles). Default is False.
maxBounds (QRect, QRectF, or None) Specifies boundaries that the ROI
cannot be dragged outside of by the user. Default is None.
snapSize (float) The spacing of snap positions used when *scaleSnap*
or *translateSnap* are enabled. Default is 1.0.
scaleSnap (bool) If True, the width and height of the ROI are forced
to be integer multiples of *snapSize* when being resized
by the user. Default is False.
translateSnap (bool) If True, the x and y positions of the ROI are forced
to be integer multiples of *snapSize* when being resized
by the user. Default is False.
rotateSnap (bool) If True, the ROI angle is forced to a multiple of
the ROI's snap angle (default is 15 degrees) when rotated
by the user. Default is False.
parent (QGraphicsItem) The graphics item parent of this ROI. It
is generally not necessary to specify the parent.
pen (QPen or argument to pg.mkPen) The pen to use when drawing
the shape of the ROI.
hoverPen (QPen or argument to mkPen) The pen to use while the
mouse is hovering over the ROI shape.
handlePen (QPen or argument to mkPen) The pen to use when drawing
the ROI handles.
handleHoverPen (QPen or argument to mkPen) The pen to use while the mouse
is hovering over an ROI handle.
movable (bool) If True, the ROI can be moved by dragging anywhere
inside the ROI. Default is True.
rotatable (bool) If True, the ROI can be rotated by mouse drag + ALT
resizable (bool) If True, the ROI can be resized by mouse drag +
SHIFT
removable (bool) If True, the ROI will be given a context menu with
an option to remove the ROI. The ROI emits
sigRemoveRequested when this menu action is selected.
Default is False.
antialias (bool) If True, the ROI will render using AntiAliasing,
this is what is desired in almost all cases, the option is
added for testing purposes.
Default is True
================ ===========================================================
======================= ====================================================
**Signals**
sigRegionChangeFinished Emitted when the user stops dragging the ROI (or
one of its handles) or if the ROI is changed
programatically.
sigRegionChangeStarted Emitted when the user starts dragging the ROI (or
one of its handles).
sigRegionChanged Emitted any time the position of the ROI changes,
including while it is being dragged by the user.
sigHoverEvent Emitted when the mouse hovers over the ROI.
sigClicked Emitted when the user clicks on the ROI.
Note that clicking is disabled by default to prevent
stealing clicks from objects behind the ROI. To
enable clicking, call
roi.setAcceptedMouseButtons(QtCore.Qt.MouseButton.LeftButton).
See QtWidgets.QGraphicsItem documentation for more
details.
sigRemoveRequested Emitted when the user selects 'remove' from the
ROI's context menu (if available).
======================= ====================================================
"""
sigRegionChangeFinished = QtCore.Signal(object)
sigRegionChangeStarted = QtCore.Signal(object)
sigRegionChanged = QtCore.Signal(object)
sigHoverEvent = QtCore.Signal(object)
sigClicked = QtCore.Signal(object, object)
sigRemoveRequested = QtCore.Signal(object)
def __init__(self, pos, size=Point(1, 1), angle=0.0, invertible=False,
maxBounds=None, snapSize=1.0, scaleSnap=False,
translateSnap=False, rotateSnap=False, parent=None, pen=None,
hoverPen=None, handlePen=None, handleHoverPen=None,
movable=True, rotatable=True, resizable=True, removable=False,
aspectLocked=False, antialias=True):
GraphicsObject.__init__(self, parent)
self.setAcceptedMouseButtons(QtCore.Qt.MouseButton.NoButton)
pos = Point(pos)
size = Point(size)
self.aspectLocked = aspectLocked
self.translatable = movable
self.rotatable = rotatable
self.resizable = resizable
self.removable = removable
self.menu = None
self._antialias = antialias
self.freeHandleMoved = False ## keep track of whether free handles have moved since last change signal was emitted.
self.mouseHovering = False
if pen is None:
pen = (255, 255, 255)
self.setPen(pen)
if hoverPen is None:
hoverPen = (255, 255, 0)
self.hoverPen = fn.mkPen(hoverPen)
if handlePen is None:
handlePen = (150, 255, 255)
self.handlePen = fn.mkPen(handlePen)
if handleHoverPen is None:
handleHoverPen = (255, 255, 0)
self.handleHoverPen = handleHoverPen
self.handles = []
self.state = {'pos': Point(0,0), 'size': Point(1,1), 'angle': 0} ## angle is in degrees for ease of Qt integration
self.lastState = None
self.setPos(pos)
self.setAngle(angle)
self.setSize(size)
self.setZValue(10)
self.isMoving = False
self.handleSize = 5
self.invertible = invertible
self.maxBounds = maxBounds
self.snapSize = snapSize
self.translateSnap = translateSnap
self.rotateSnap = rotateSnap
self.rotateSnapAngle = 15.0
self.scaleSnap = scaleSnap
self.scaleSnapSize = snapSize
# Implement mouse handling in a separate class to allow easier customization
self.mouseDragHandler = MouseDragHandler(self)
def getState(self):
return self.stateCopy()
def stateCopy(self):
sc = {}
sc['pos'] = Point(self.state['pos'])
sc['size'] = Point(self.state['size'])
sc['angle'] = self.state['angle']
return sc
def saveState(self):
"""Return the state of the widget in a format suitable for storing to
disk. (Points are converted to tuple)
Combined with setState(), this allows ROIs to be easily saved and
restored."""
state = {}
state['pos'] = tuple(self.state['pos'])
state['size'] = tuple(self.state['size'])
state['angle'] = self.state['angle']
return state
def setState(self, state, update=True):
"""
Set the state of the ROI from a structure generated by saveState() or
getState().
"""
self.setPos(state['pos'], update=False)
self.setSize(state['size'], update=False)
self.setAngle(state['angle'], update=update)
def setZValue(self, z):
QtWidgets.QGraphicsItem.setZValue(self, z)
for h in self.handles:
h['item'].setZValue(z+1)
def parentBounds(self):
"""
Return the bounding rectangle of this ROI in the coordinate system
of its parent.
"""
return self.mapToParent(self.boundingRect()).boundingRect()
def setPen(self, *args, **kwargs):
"""
Set the pen to use when drawing the ROI shape.
For arguments, see :func:`mkPen <pyqtgraph.mkPen>`.
"""
self.pen = fn.mkPen(*args, **kwargs)
self.currentPen = self.pen
self.update()
def size(self):
"""Return the size (w,h) of the ROI."""
return self.getState()['size']
def pos(self):
"""Return the position (x,y) of the ROI's origin.
For most ROIs, this will be the lower-left corner."""
return self.getState()['pos']
def angle(self):
"""Return the angle of the ROI in degrees."""
return self.getState()['angle']
def setPos(self, pos, y=None, update=True, finish=True):
"""Set the position of the ROI (in the parent's coordinate system).
Accepts either separate (x, y) arguments or a single :class:`Point` or
``QPointF`` argument.
By default, this method causes both ``sigRegionChanged`` and
``sigRegionChangeFinished`` to be emitted. If *finish* is False, then
``sigRegionChangeFinished`` will not be emitted. You can then use
stateChangeFinished() to cause the signal to be emitted after a series
of state changes.
If *update* is False, the state change will be remembered but not processed and no signals
will be emitted. You can then use stateChanged() to complete the state change. This allows
multiple change functions to be called sequentially while minimizing processing overhead
and repeated signals. Setting ``update=False`` also forces ``finish=False``.
"""
if update not in (True, False):
raise TypeError("update argument must be bool")
if y is None:
pos = Point(pos)
else:
# avoid ambiguity where update is provided as a positional argument
if isinstance(y, bool):
raise TypeError("Positional arguments to setPos() must be numerical.")
pos = Point(pos, y)
self.state['pos'] = pos
QtWidgets.QGraphicsItem.setPos(self, pos)
if update:
self.stateChanged(finish=finish)
def setSize(self, size, center=None, centerLocal=None, snap=False, update=True, finish=True):
"""
Set the ROI's size.
=============== ==========================================================================
**Arguments**
size (Point | QPointF | sequence) The final size of the ROI
center (None | Point) Optional center point around which the ROI is scaled,
expressed as [0-1, 0-1] over the size of the ROI.
centerLocal (None | Point) Same as *center*, but the position is expressed in the
local coordinate system of the ROI
snap (bool) If True, the final size is snapped to the nearest increment (see
ROI.scaleSnapSize)
update (bool) See setPos()
finish (bool) See setPos()
=============== ==========================================================================
"""
if update not in (True, False):
raise TypeError("update argument must be bool")
size = Point(size)
if snap:
size[0] = round(size[0] / self.scaleSnapSize) * self.scaleSnapSize
size[1] = round(size[1] / self.scaleSnapSize) * self.scaleSnapSize
if centerLocal is not None:
oldSize = Point(self.state['size'])
oldSize[0] = 1 if oldSize[0] == 0 else oldSize[0]
oldSize[1] = 1 if oldSize[1] == 0 else oldSize[1]
center = Point(centerLocal) / oldSize
if center is not None:
center = Point(center)
c = self.mapToParent(Point(center) * self.state['size'])
c1 = self.mapToParent(Point(center) * size)
newPos = self.state['pos'] + c - c1
self.setPos(newPos, update=False, finish=False)
self.prepareGeometryChange()
self.state['size'] = size
if update:
self.stateChanged(finish=finish)
def setAngle(self, angle, center=None, centerLocal=None, snap=False, update=True, finish=True):
"""
Set the ROI's rotation angle.
=============== ==========================================================================
**Arguments**
angle (float) The final ROI angle in degrees
center (None | Point) Optional center point around which the ROI is rotated,
expressed as [0-1, 0-1] over the size of the ROI.
centerLocal (None | Point) Same as *center*, but the position is expressed in the
local coordinate system of the ROI
snap (bool) If True, the final ROI angle is snapped to the nearest increment
(default is 15 degrees; see ROI.rotateSnapAngle)
update (bool) See setPos()
finish (bool) See setPos()
=============== ==========================================================================
"""
if update not in (True, False):
raise TypeError("update argument must be bool")
if snap is True:
angle = round(angle / self.rotateSnapAngle) * self.rotateSnapAngle
self.state['angle'] = angle
tr = QtGui.QTransform() # note: only rotation is contained in the transform
tr.rotate(angle)
if center is not None:
centerLocal = Point(center) * self.state['size']
if centerLocal is not None:
centerLocal = Point(centerLocal)
# rotate to new angle, keeping a specific point anchored as the center of rotation
cc = self.mapToParent(centerLocal) - (tr.map(centerLocal) + self.state['pos'])
self.translate(cc, update=False)
self.setTransform(tr)
if update:
self.stateChanged(finish=finish)
def scale(self, s, center=None, centerLocal=None, snap=False, update=True, finish=True):
"""
Resize the ROI by scaling relative to *center*.
See setPos() for an explanation of the *update* and *finish* arguments.
"""
newSize = self.state['size'] * s
self.setSize(newSize, center=center, centerLocal=centerLocal, snap=snap, update=update, finish=finish)
def translate(self, *args, **kargs):
"""
Move the ROI to a new position.
Accepts either (x, y, snap) or ([x,y], snap) as arguments
If the ROI is bounded and the move would exceed boundaries, then the ROI
is moved to the nearest acceptable position instead.
*snap* can be:
=============== ==========================================================================
None (default) use self.translateSnap and self.snapSize to determine whether/how to snap
False do not snap
Point(w,h) snap to rectangular grid with spacing (w,h)
True snap using self.snapSize (and ignoring self.translateSnap)
=============== ==========================================================================
Also accepts *update* and *finish* arguments (see setPos() for a description of these).
"""
if len(args) == 1:
pt = args[0]
else:
pt = args
newState = self.stateCopy()
newState['pos'] = newState['pos'] + pt
snap = kargs.get('snap', None)
if snap is None:
snap = self.translateSnap
if snap is not False:
newState['pos'] = self.getSnapPosition(newState['pos'], snap=snap)
if self.maxBounds is not None:
r = self.stateRect(newState)
d = Point(0,0)
if self.maxBounds.left() > r.left():
d[0] = self.maxBounds.left() - r.left()
elif self.maxBounds.right() < r.right():
d[0] = self.maxBounds.right() - r.right()
if self.maxBounds.top() > r.top():
d[1] = self.maxBounds.top() - r.top()
elif self.maxBounds.bottom() < r.bottom():
d[1] = self.maxBounds.bottom() - r.bottom()
newState['pos'] += d
update = kargs.get('update', True)
finish = kargs.get('finish', True)
self.setPos(newState['pos'], update=update, finish=finish)
def rotate(self, angle, center=None, snap=False, update=True, finish=True):
"""
Rotate the ROI by *angle* degrees.
=============== ==========================================================================
**Arguments**
angle (float) The angle in degrees to rotate
center (None | Point) Optional center point around which the ROI is rotated, in
the local coordinate system of the ROI
snap (bool) If True, the final ROI angle is snapped to the nearest increment
(default is 15 degrees; see ROI.rotateSnapAngle)
update (bool) See setPos()
finish (bool) See setPos()
=============== ==========================================================================
"""
self.setAngle(self.angle()+angle, center=center, snap=snap, update=update, finish=finish)
def handleMoveStarted(self):
self.preMoveState = self.getState()
self.sigRegionChangeStarted.emit(self)
def addTranslateHandle(self, pos, axes=None, item=None, name=None, index=None):
"""
Add a new translation handle to the ROI. Dragging the handle will move
the entire ROI without changing its angle or shape.
Note that, by default, ROIs may be moved by dragging anywhere inside the
ROI. However, for larger ROIs it may be desirable to disable this and
instead provide one or more translation handles.
=================== ====================================================
**Arguments**
pos (length-2 sequence) The position of the handle
relative to the shape of the ROI. A value of (0,0)
indicates the origin, whereas (1, 1) indicates the
upper-right corner, regardless of the ROI's size.
item The Handle instance to add. If None, a new handle
will be created.
name The name of this handle (optional). Handles are
identified by name when calling
getLocalHandlePositions and getSceneHandlePositions.
=================== ====================================================
"""
pos = Point(pos)
return self.addHandle({'name': name, 'type': 't', 'pos': pos, 'item': item}, index=index)
def addFreeHandle(self, pos=None, axes=None, item=None, name=None, index=None):
"""
Add a new free handle to the ROI. Dragging free handles has no effect
on the position or shape of the ROI.
=================== ====================================================
**Arguments**
pos (length-2 sequence) The position of the handle
relative to the shape of the ROI. A value of (0,0)
indicates the origin, whereas (1, 1) indicates the
upper-right corner, regardless of the ROI's size.
item The Handle instance to add. If None, a new handle
will be created.
name The name of this handle (optional). Handles are
identified by name when calling
getLocalHandlePositions and getSceneHandlePositions.
=================== ====================================================
"""
if pos is not None:
pos = Point(pos)
return self.addHandle({'name': name, 'type': 'f', 'pos': pos, 'item': item}, index=index)
def addScaleHandle(self, pos, center, axes=None, item=None, name=None, lockAspect=False, index=None):
"""
Add a new scale handle to the ROI. Dragging a scale handle allows the
user to change the height and/or width of the ROI.
=================== ====================================================
**Arguments**
pos (length-2 sequence) The position of the handle
relative to the shape of the ROI. A value of (0,0)
indicates the origin, whereas (1, 1) indicates the
upper-right corner, regardless of the ROI's size.
center (length-2 sequence) The center point around which
scaling takes place. If the center point has the
same x or y value as the handle position, then
scaling will be disabled for that axis.
item The Handle instance to add. If None, a new handle
will be created.
name The name of this handle (optional). Handles are
identified by name when calling
getLocalHandlePositions and getSceneHandlePositions.
=================== ====================================================
"""
pos = Point(pos)
center = Point(center)
info = {'name': name, 'type': 's', 'center': center, 'pos': pos, 'item': item, 'lockAspect': lockAspect}
if pos.x() == center.x():
info['xoff'] = True
if pos.y() == center.y():
info['yoff'] = True
return self.addHandle(info, index=index)
def addRotateHandle(self, pos, center, item=None, name=None, index=None):
"""
Add a new rotation handle to the ROI. Dragging a rotation handle allows
the user to change the angle of the ROI.
=================== ====================================================
**Arguments**
pos (length-2 sequence) The position of the handle
relative to the shape of the ROI. A value of (0,0)
indicates the origin, whereas (1, 1) indicates the
upper-right corner, regardless of the ROI's size.
center (length-2 sequence) The center point around which
rotation takes place.
item The Handle instance to add. If None, a new handle
will be created.
name The name of this handle (optional). Handles are
identified by name when calling
getLocalHandlePositions and getSceneHandlePositions.
=================== ====================================================
"""
pos = Point(pos)
center = Point(center)
return self.addHandle({'name': name, 'type': 'r', 'center': center, 'pos': pos, 'item': item}, index=index)
def addScaleRotateHandle(self, pos, center, item=None, name=None, index=None):
"""
Add a new scale+rotation handle to the ROI. When dragging a handle of
this type, the user can simultaneously rotate the ROI around an
arbitrary center point as well as scale the ROI by dragging the handle
toward or away from the center point.
=================== ====================================================
**Arguments**
pos (length-2 sequence) The position of the handle
relative to the shape of the ROI. A value of (0,0)
indicates the origin, whereas (1, 1) indicates the
upper-right corner, regardless of the ROI's size.
center (length-2 sequence) The center point around which
scaling and rotation take place.
item The Handle instance to add. If None, a new handle
will be created.
name The name of this handle (optional). Handles are
identified by name when calling
getLocalHandlePositions and getSceneHandlePositions.
=================== ====================================================
"""
pos = Point(pos)
center = Point(center)
if pos[0] == center[0] and pos[1] == center[1]:
raise Exception("Scale/rotate handles cannot be at their center point.")
return self.addHandle({'name': name, 'type': 'sr', 'center': center, 'pos': pos, 'item': item}, index=index)
def addRotateFreeHandle(self, pos, center, axes=None, item=None, name=None, index=None):
"""
Add a new rotation+free handle to the ROI. When dragging a handle of
this type, the user can rotate the ROI around an
arbitrary center point, while moving toward or away from the center
point has no effect on the shape of the ROI.
=================== ====================================================
**Arguments**
pos (length-2 sequence) The position of the handle
relative to the shape of the ROI. A value of (0,0)
indicates the origin, whereas (1, 1) indicates the
upper-right corner, regardless of the ROI's size.
center (length-2 sequence) The center point around which
rotation takes place.
item The Handle instance to add. If None, a new handle
will be created.
name The name of this handle (optional). Handles are
identified by name when calling
getLocalHandlePositions and getSceneHandlePositions.
=================== ====================================================
"""
pos = Point(pos)
center = Point(center)
return self.addHandle({'name': name, 'type': 'rf', 'center': center, 'pos': pos, 'item': item}, index=index)
def addHandle(self, info, index=None):
## If a Handle was not supplied, create it now
if 'item' not in info or info['item'] is None:
h = Handle(self.handleSize, typ=info['type'], pen=self.handlePen,
hoverPen=self.handleHoverPen, parent=self, antialias=self._antialias)
info['item'] = h
else:
h = info['item']
if info['pos'] is None:
info['pos'] = h.pos()
h.setPos(info['pos'] * self.state['size'])
## connect the handle to this ROI
#iid = len(self.handles)
h.connectROI(self)
if index is None:
self.handles.append(info)
else:
self.handles.insert(index, info)
h.setZValue(self.zValue()+1)
self.stateChanged()
return h
def indexOfHandle(self, handle):
"""
Return the index of *handle* in the list of this ROI's handles.
"""
if isinstance(handle, Handle):
index = [i for i, info in enumerate(self.handles) if info['item'] is handle]
if len(index) == 0:
raise Exception("Cannot return handle index; not attached to this ROI")
return index[0]
else:
return handle
def removeHandle(self, handle):
"""Remove a handle from this ROI. Argument may be either a Handle
instance or the integer index of the handle."""
index = self.indexOfHandle(handle)
handle = self.handles[index]['item']
self.handles.pop(index)
handle.disconnectROI(self)
if len(handle.rois) == 0 and self.scene() is not None:
self.scene().removeItem(handle)
self.stateChanged()
def replaceHandle(self, oldHandle, newHandle):
"""Replace one handle in the ROI for another. This is useful when
connecting multiple ROIs together.
*oldHandle* may be a Handle instance or the index of a handle to be
replaced."""
index = self.indexOfHandle(oldHandle)
info = self.handles[index]
self.removeHandle(index)
info['item'] = newHandle
info['pos'] = newHandle.pos()
self.addHandle(info, index=index)
def checkRemoveHandle(self, handle):
## This is used when displaying a Handle's context menu to determine
## whether removing is allowed.
## Subclasses may wish to override this to disable the menu entry.
## Note: by default, handles are not user-removable even if this method returns True.
return True
def getLocalHandlePositions(self, index=None):
"""Returns the position of handles in the ROI's coordinate system.
The format returned is a list of (name, pos) tuples.
"""
if index is None:
positions = []
for h in self.handles:
positions.append((h['name'], h['pos']))
return positions
else:
return (self.handles[index]['name'], self.handles[index]['pos'])
def getSceneHandlePositions(self, index=None):
"""Returns the position of handles in the scene coordinate system.
The format returned is a list of (name, pos) tuples.
"""
if index is None:
positions = []
for h in self.handles:
positions.append((h['name'], h['item'].scenePos()))
return positions
else:
return (self.handles[index]['name'], self.handles[index]['item'].scenePos())
def getHandles(self):
"""
Return a list of this ROI's Handles.
"""
return [h['item'] for h in self.handles]
def mapSceneToParent(self, pt):
return self.mapToParent(self.mapFromScene(pt))
def setSelected(self, s):
QtWidgets.QGraphicsItem.setSelected(self, s)
#print "select", self, s
if s:
for h in self.handles:
h['item'].show()
else:
for h in self.handles:
h['item'].hide()
def hoverEvent(self, ev):
hover = False
if not ev.isExit():
if self.translatable and ev.acceptDrags(QtCore.Qt.MouseButton.LeftButton):
hover=True
for btn in [QtCore.Qt.MouseButton.LeftButton, QtCore.Qt.MouseButton.RightButton, QtCore.Qt.MouseButton.MiddleButton]:
if (self.acceptedMouseButtons() & btn) and ev.acceptClicks(btn):
hover=True
if self.contextMenuEnabled():
ev.acceptClicks(QtCore.Qt.MouseButton.RightButton)
if hover:
self.setMouseHover(True)
ev.acceptClicks(QtCore.Qt.MouseButton.LeftButton) ## If the ROI is hilighted, we should accept all clicks to avoid confusion.
ev.acceptClicks(QtCore.Qt.MouseButton.RightButton)
ev.acceptClicks(QtCore.Qt.MouseButton.MiddleButton)
self.sigHoverEvent.emit(self)
else:
self.setMouseHover(False)
def setMouseHover(self, hover):
## Inform the ROI that the mouse is(not) hovering over it
if self.mouseHovering == hover:
return
self.mouseHovering = hover
self._updateHoverColor()
def _updateHoverColor(self):
pen = self._makePen()
if self.currentPen != pen:
self.currentPen = pen
self.update()
def _makePen(self):
# Generate the pen color for this ROI based on its current state.
if self.mouseHovering:
return self.hoverPen
else:
return self.pen
def contextMenuEnabled(self):
return self.removable or self.menu and len(self.menu.children()) > 1
def raiseContextMenu(self, ev):
if not self.contextMenuEnabled():
return
menu = self.getMenu()
menu = self.scene().addParentContextMenus(self, menu, ev)
pos = ev.screenPos()
menu.popup(QtCore.QPoint(int(pos.x()), int(pos.y())))
def getMenu(self):
if self.menu is None:
self.menu = QtWidgets.QMenu()
self.menu.setTitle(translate("ROI", "ROI"))
if self.removable:
remAct = QtGui.QAction(translate("ROI", "Remove ROI"), self.menu)
remAct.triggered.connect(self.removeClicked)
self.menu.addAction(remAct)
self.menu.remAct = remAct
return self.menu
def removeClicked(self):
## Send remove event only after we have exited the menu event handler
QtCore.QTimer.singleShot(0, self._emitRemoveRequest)
def _emitRemoveRequest(self):
self.sigRemoveRequested.emit(self)
def mouseDragEvent(self, ev):
self.mouseDragHandler.mouseDragEvent(ev)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.MouseButton.RightButton and self.isMoving:
ev.accept()
self.cancelMove()
if ev.button() == QtCore.Qt.MouseButton.RightButton and self.contextMenuEnabled():
self.raiseContextMenu(ev)
ev.accept()
elif self.acceptedMouseButtons() & ev.button():
ev.accept()
self.sigClicked.emit(self, ev)
else:
ev.ignore()
def _moveStarted(self):
self.isMoving = True
self.preMoveState = self.getState()
self.sigRegionChangeStarted.emit(self)
def _moveFinished(self):
if self.isMoving:
self.stateChangeFinished()
self.isMoving = False
def cancelMove(self):
self.isMoving = False
self.setState(self.preMoveState)
def checkPointMove(self, handle, pos, modifiers):
"""When handles move, they must ask the ROI if the move is acceptable.
By default, this always returns True. Subclasses may wish override.
"""
return True
def movePoint(self, handle, pos, modifiers=None, finish=True, coords='parent'):
## called by Handles when they are moved.
## pos is the new position of the handle in scene coords, as requested by the handle.
if modifiers is None:
modifiers = QtCore.Qt.KeyboardModifier.NoModifier
newState = self.stateCopy()
index = self.indexOfHandle(handle)
h = self.handles[index]
p0 = self.mapToParent(h['pos'] * self.state['size'])
p1 = Point(pos)
if coords == 'parent':
pass
elif coords == 'scene':
p1 = self.mapSceneToParent(p1)
else:
raise Exception("New point location must be given in either 'parent' or 'scene' coordinates.")
## Handles with a 'center' need to know their local position relative to the center point (lp0, lp1)
if 'center' in h:
c = h['center']
cs = c * self.state['size']
lp0 = self.mapFromParent(p0) - cs
lp1 = self.mapFromParent(p1) - cs
if h['type'] == 't':
snap = True if (modifiers & QtCore.Qt.KeyboardModifier.ControlModifier) else None
self.translate(p1-p0, snap=snap, update=False)
elif h['type'] == 'f':
newPos = self.mapFromParent(p1)
h['item'].setPos(newPos)
h['pos'] = newPos
self.freeHandleMoved = True
elif h['type'] == 's':
## If a handle and its center have the same x or y value, we can't scale across that axis.
if h['center'][0] == h['pos'][0]:
lp1[0] = 0
if h['center'][1] == h['pos'][1]:
lp1[1] = 0
## snap
if self.scaleSnap or (modifiers & QtCore.Qt.KeyboardModifier.ControlModifier):
lp1[0] = round(lp1[0] / self.scaleSnapSize) * self.scaleSnapSize
lp1[1] = round(lp1[1] / self.scaleSnapSize) * self.scaleSnapSize
## preserve aspect ratio (this can override snapping)
if h['lockAspect'] or (modifiers & QtCore.Qt.KeyboardModifier.AltModifier):
#arv = Point(self.preMoveState['size']) -
lp1 = lp1.proj(lp0)
## determine scale factors and new size of ROI
hs = h['pos'] - c
if hs[0] == 0:
hs[0] = 1
if hs[1] == 0:
hs[1] = 1
newSize = lp1 / hs
## Perform some corrections and limit checks
if newSize[0] == 0:
newSize[0] = newState['size'][0]
if newSize[1] == 0:
newSize[1] = newState['size'][1]
if not self.invertible:
if newSize[0] < 0:
newSize[0] = newState['size'][0]
if newSize[1] < 0:
newSize[1] = newState['size'][1]
if self.aspectLocked:
newSize[0] = newSize[1]
## Move ROI so the center point occupies the same scene location after the scale
s0 = c * self.state['size']
s1 = c * newSize
cc = self.mapToParent(s0 - s1) - self.mapToParent(Point(0, 0))
## update state, do more boundary checks
newState['size'] = newSize
newState['pos'] = newState['pos'] + cc
if self.maxBounds is not None:
r = self.stateRect(newState)
if not self.maxBounds.contains(r):
return
self.setPos(newState['pos'], update=False)
self.setSize(newState['size'], update=False)
elif h['type'] in ['r', 'rf']:
if h['type'] == 'rf':
self.freeHandleMoved = True
if not self.rotatable:
return
## If the handle is directly over its center point, we can't compute an angle.
try:
if lp1.length() == 0 or lp0.length() == 0:
return
except OverflowError:
return
## determine new rotation angle, constrained if necessary
ang = newState['angle'] - lp0.angle(lp1)
if ang is None: ## this should never happen..
return
if self.rotateSnap or (modifiers & QtCore.Qt.KeyboardModifier.ControlModifier):
ang = round(ang / self.rotateSnapAngle) * self.rotateSnapAngle
## create rotation transform
tr = QtGui.QTransform()
tr.rotate(ang)
## move ROI so that center point remains stationary after rotate
cc = self.mapToParent(cs) - (tr.map(cs) + self.state['pos'])
newState['angle'] = ang
newState['pos'] = newState['pos'] + cc
## check boundaries, update
if self.maxBounds is not None:
r = self.stateRect(newState)
if not self.maxBounds.contains(r):
return
self.setPos(newState['pos'], update=False)
self.setAngle(ang, update=False)
## If this is a free-rotate handle, its distance from the center may change.
if h['type'] == 'rf':
h['item'].setPos(self.mapFromScene(p1)) ## changes ROI coordinates of handle
h['pos'] = self.mapFromParent(p1)
elif h['type'] == 'sr':
try:
if lp1.length() == 0 or lp0.length() == 0:
return
except OverflowError:
return
ang = newState['angle'] - lp0.angle(lp1)
if ang is None:
return
if self.rotateSnap or (modifiers & QtCore.Qt.KeyboardModifier.ControlModifier):
ang = round(ang / self.rotateSnapAngle) * self.rotateSnapAngle
if self.aspectLocked or h['center'][0] != h['pos'][0]:
newState['size'][0] = self.state['size'][0] * lp1.length() / lp0.length()
if self.scaleSnap: # use CTRL only for angular snap here.
newState['size'][0] = round(newState['size'][0] / self.snapSize) * self.snapSize
if self.aspectLocked or h['center'][1] != h['pos'][1]:
newState['size'][1] = self.state['size'][1] * lp1.length() / lp0.length()
if self.scaleSnap: # use CTRL only for angular snap here.
newState['size'][1] = round(newState['size'][1] / self.snapSize) * self.snapSize
if newState['size'][0] == 0:
newState['size'][0] = 1
if newState['size'][1] == 0:
newState['size'][1] = 1
c1 = c * newState['size']
tr = QtGui.QTransform()
tr.rotate(ang)
cc = self.mapToParent(cs) - (tr.map(c1) + self.state['pos'])
newState['angle'] = ang
newState['pos'] = newState['pos'] + cc
if self.maxBounds is not None:
r = self.stateRect(newState)
if not self.maxBounds.contains(r):
return
self.setState(newState, update=False)
self.stateChanged(finish=finish)
def stateChanged(self, finish=True):
"""Process changes to the state of the ROI.
If there are any changes, then the positions of handles are updated accordingly
and sigRegionChanged is emitted. If finish is True, then
sigRegionChangeFinished will also be emitted."""
changed = False
if self.lastState is None:
changed = True
else:
state = self.getState()
for k in list(state.keys()):
if state[k] != self.lastState[k]:
changed = True
self.prepareGeometryChange()
if changed:
## Move all handles to match the current configuration of the ROI
for h in self.handles:
if h['item'] in self.childItems():
h['item'].setPos(h['pos'] * self.state['size'])
self.update()
self.sigRegionChanged.emit(self)
elif self.freeHandleMoved:
self.sigRegionChanged.emit(self)
self.freeHandleMoved = False
self.lastState = self.getState()
if finish:
self.stateChangeFinished()
self.informViewBoundsChanged()
def stateChangeFinished(self):
self.sigRegionChangeFinished.emit(self)
def stateRect(self, state):
r = QtCore.QRectF(0, 0, state['size'][0], state['size'][1])
tr = QtGui.QTransform()
tr.rotate(-state['angle'])
r = tr.mapRect(r)
return r.adjusted(state['pos'][0], state['pos'][1], state['pos'][0], state['pos'][1])
def getSnapPosition(self, pos, snap=None):
## Given that pos has been requested, return the nearest snap-to position
## optionally, snap may be passed in to specify a rectangular snap grid.
## override this function for more interesting snap functionality..
if snap is None or snap is True:
if self.snapSize is None:
return pos
snap = Point(self.snapSize, self.snapSize)
return Point(
round(pos[0] / snap[0]) * snap[0],
round(pos[1] / snap[1]) * snap[1]
)
def boundingRect(self):
return QtCore.QRectF(0, 0, self.state['size'][0], self.state['size'][1]).normalized()
def paint(self, p, opt, widget):
# Note: don't use self.boundingRect here, because subclasses may need to redefine it.
r = QtCore.QRectF(0, 0, self.state['size'][0], self.state['size'][1]).normalized()
p.setRenderHint(
QtGui.QPainter.RenderHint.Antialiasing,
self._antialias
)
p.setPen(self.currentPen)
p.translate(r.left(), r.top())
p.scale(r.width(), r.height())
p.drawRect(0, 0, 1, 1)
def getArraySlice(self, data, img, axes=(0,1), returnSlice=True):
"""Return a tuple of slice objects that can be used to slice the region
from *data* that is covered by the bounding rectangle of this ROI.
Also returns the transform that maps the ROI into data coordinates.
If returnSlice is set to False, the function returns a pair of tuples with the values that would have
been used to generate the slice objects. ((ax0Start, ax0Stop), (ax1Start, ax1Stop))
If the slice cannot be computed (usually because the scene/transforms are not properly
constructed yet), then the method returns None.
"""
## Determine shape of array along ROI axes
dShape = (data.shape[axes[0]], data.shape[axes[1]])
## Determine transform that maps ROI bounding box to image coordinates
try:
tr = self.sceneTransform() * fn.invertQTransform(img.sceneTransform())
except np.linalg.linalg.LinAlgError:
return None
## Modify transform to scale from image coords to data coords
axisOrder = img.axisOrder
if axisOrder == 'row-major':
tr.scale(float(dShape[1]) / img.width(), float(dShape[0]) / img.height())
else:
tr.scale(float(dShape[0]) / img.width(), float(dShape[1]) / img.height())
## Transform ROI bounds into data bounds
dataBounds = tr.mapRect(self.boundingRect())
## Intersect transformed ROI bounds with data bounds
if axisOrder == 'row-major':
intBounds = dataBounds.intersected(QtCore.QRectF(0, 0, dShape[1], dShape[0]))
else:
intBounds = dataBounds.intersected(QtCore.QRectF(0, 0, dShape[0], dShape[1]))
## Determine index values to use when referencing the array.
bounds = (
(int(min(intBounds.left(), intBounds.right())), int(1+max(intBounds.left(), intBounds.right()))),
(int(min(intBounds.bottom(), intBounds.top())), int(1+max(intBounds.bottom(), intBounds.top())))
)
if axisOrder == 'row-major':
bounds = bounds[::-1]
if returnSlice:
## Create slice objects
sl = [slice(None)] * data.ndim
sl[axes[0]] = slice(*bounds[0])
sl[axes[1]] = slice(*bounds[1])
return tuple(sl), tr
else:
return bounds, tr
def getArrayRegion(self, data, img, axes=(0,1), returnMappedCoords=False, **kwds):
r"""Use the position and orientation of this ROI relative to an imageItem
to pull a slice from an array.
=================== ====================================================
**Arguments**
data The array to slice from. Note that this array does
*not* have to be the same data that is represented
in *img*.
img (ImageItem or other suitable QGraphicsItem)
Used to determine the relationship between the
ROI and the boundaries of *data*.
axes (length-2 tuple) Specifies the axes in *data* that
correspond to the (x, y) axes of *img*. If the
image's axis order is set to
'row-major', then the axes are instead specified in
(y, x) order.
returnMappedCoords (bool) If True, the array slice is returned along
with a corresponding array of coordinates that were
used to extract data from the original array.
\**kwds All keyword arguments are passed to
:func:`affineSlice <pyqtgraph.affineSlice>`.
=================== ====================================================
This method uses :func:`affineSlice <pyqtgraph.affineSlice>` to generate
the slice from *data* and uses :func:`getAffineSliceParams <pyqtgraph.ROI.getAffineSliceParams>`
to determine the parameters to pass to :func:`affineSlice <pyqtgraph.affineSlice>`.
If *returnMappedCoords* is True, then the method returns a tuple (result, coords)
such that coords is the set of coordinates used to interpolate values from the original
data, mapped into the parent coordinate system of the image. This is useful, when slicing
data from images that have been transformed, for determining the location of each value
in the sliced data.
All extra keyword arguments are passed to :func:`affineSlice <pyqtgraph.affineSlice>`.
"""
# this is a hidden argument for internal use
fromBR = kwds.pop('fromBoundingRect', False)
# Automaticaly compute missing parameters
_shape, _vectors, _origin = self.getAffineSliceParams(data, img, axes, fromBoundingRect=fromBR)
# Replace them with user defined parameters if defined
shape = kwds.pop('shape', _shape)
vectors = kwds.pop('vectors', _vectors)
origin = kwds.pop('origin', _origin)
if not returnMappedCoords:
rgn = fn.affineSlice(data, shape=shape, vectors=vectors, origin=origin, axes=axes, **kwds)
return rgn
else:
kwds['returnCoords'] = True
result, coords = fn.affineSlice(data, shape=shape, vectors=vectors, origin=origin, axes=axes, **kwds)
### map coordinates and return
mapped = fn.transformCoordinates(img.transform(), coords)
return result, mapped
def _getArrayRegionForArbitraryShape(self, data, img, axes=(0,1), returnMappedCoords=False, **kwds):
"""
Return the result of :meth:`~pyqtgraph.ROI.getArrayRegion`, masked by
the shape of the ROI. Values outside the ROI shape are set to 0.
See :meth:`~pyqtgraph.ROI.getArrayRegion` for a description of the
arguments.
"""
if returnMappedCoords:
sliced, mappedCoords = ROI.getArrayRegion(
self, data, img, axes, returnMappedCoords, fromBoundingRect=True, **kwds)
else:
sliced = ROI.getArrayRegion(
self, data, img, axes, returnMappedCoords, fromBoundingRect=True, **kwds)
if img.axisOrder == 'col-major':
mask = self.renderShapeMask(sliced.shape[axes[0]], sliced.shape[axes[1]])
else:
mask = self.renderShapeMask(sliced.shape[axes[1]], sliced.shape[axes[0]])
mask = mask.T
# reshape mask to ensure it is applied to the correct data axes
shape = [1] * data.ndim
shape[axes[0]] = sliced.shape[axes[0]]
shape[axes[1]] = sliced.shape[axes[1]]
mask = mask.reshape(shape)
if returnMappedCoords:
return sliced * mask, mappedCoords
else:
return sliced * mask
def getAffineSliceParams(self, data, img, axes=(0,1), fromBoundingRect=False):
"""
Returns the parameters needed to use :func:`affineSlice <pyqtgraph.affineSlice>`
(shape, vectors, origin) to extract a subset of *data* using this ROI
and *img* to specify the subset.
If *fromBoundingRect* is True, then the ROI's bounding rectangle is used
rather than the shape of the ROI.
See :func:`getArrayRegion <pyqtgraph.ROI.getArrayRegion>` for more information.
"""
if self.scene() is not img.scene():
raise Exception("ROI and target item must be members of the same scene.")
origin = img.mapToData(self.mapToItem(img, QtCore.QPointF(0, 0)))
## vx and vy point in the directions of the slice axes, but must be scaled properly
vx = img.mapToData(self.mapToItem(img, QtCore.QPointF(1, 0))) - origin
vy = img.mapToData(self.mapToItem(img, QtCore.QPointF(0, 1))) - origin
lvx = hypot(vx.x(), vx.y()) # length
lvy = hypot(vy.x(), vy.y()) # length
##img.width is number of pixels, not width of item.
##need pxWidth and pxHeight instead of pxLen ?
sx = 1.0 / lvx
sy = 1.0 / lvy
vectors = ((vx.x()*sx, vx.y()*sx), (vy.x()*sy, vy.y()*sy))
if fromBoundingRect is True:
shape = self.boundingRect().width(), self.boundingRect().height()
origin = img.mapToData(self.mapToItem(img, self.boundingRect().topLeft()))
origin = (origin.x(), origin.y())
else:
shape = self.state['size']
origin = (origin.x(), origin.y())
shape = [abs(shape[0]/sx), abs(shape[1]/sy)]
if img.axisOrder == 'row-major':
# transpose output
vectors = vectors[::-1]
shape = shape[::-1]
return shape, vectors, origin
def renderShapeMask(self, width, height):
"""Return an array of 0.0-1.0 into which the shape of the item has been drawn.
This can be used to mask array selections.
"""
if width == 0 or height == 0:
return np.empty((width, height), dtype=float)
im = QtGui.QImage(width, height, QtGui.QImage.Format.Format_ARGB32)
im.fill(QtCore.Qt.GlobalColor.transparent)
p = QtGui.QPainter(im)
p.setPen(fn.mkPen(None))
p.setBrush(fn.mkBrush('w'))
shape = self.shape()
bounds = shape.boundingRect()
p.scale(im.width() / bounds.width(), im.height() / bounds.height())
p.translate(-bounds.topLeft())
p.drawPath(shape)
p.end()
cidx = 0 if sys.byteorder == 'little' else 3
mask = fn.ndarray_from_qimage(im)[...,cidx].T
return mask.astype(float) / 255
def getGlobalTransform(self, relativeTo=None):
"""Return global transformation (rotation angle+translation) required to move
from relative state to current state. If relative state isn't specified,
then we use the state of the ROI when mouse is pressed."""
if relativeTo is None:
relativeTo = self.preMoveState
st = self.getState()
## this is only allowed because we will be comparing the two
relativeTo['scale'] = relativeTo['size']
st['scale'] = st['size']
t1 = SRTTransform(relativeTo)
t2 = SRTTransform(st)
return t2/t1
def applyGlobalTransform(self, tr):
st = self.getState()
st['scale'] = st['size']
st = SRTTransform(st)
st = (st * tr).saveState()
st['size'] = st['scale']
self.setState(st)
| ROI |
python | tensorflow__tensorflow | tensorflow/core/function/polymorphism/function_type_test.py | {
"start": 29611,
"end": 31994
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{
"signature": ((1, 2, 3), {}),
"expected_types": (
trace_type.from_value(1),
trace_type.from_value(2),
trace_type.from_value(3),
),
},
{
"signature": (([1, 2, 3],), {}),
"expected_types": (
trace_type.from_value([1, 2, 3]),
),
},
{
"signature": ((), {}),
"expected_types": (),
},
)
def testArgs(self, signature, expected_types):
generated_type = function_type.from_structured_signature(signature)
self.assertEqual(generated_type.output, trace_type.from_value(None))
for i, p in enumerate(generated_type.parameters.values()):
self.assertEqual(p.kind, function_type.Parameter.POSITIONAL_ONLY)
self.assertEqual(p.type_constraint, expected_types[i])
@parameterized.parameters(
{
"signature": ((), {"a": 1, "b": 2, "c": 3}),
"expected_types": {
"a": trace_type.from_value(1),
"b": trace_type.from_value(2),
"c": trace_type.from_value(3),
},
},
{
"signature": ((), {"a": [1, 2, 3]}),
"expected_types": {
"a": trace_type.from_value([1, 2, 3]),
},
},
{
"signature": ((), {}),
"expected_types": {},
},
)
def testKwargs(self, signature, expected_types):
generated_type = function_type.from_structured_signature(signature)
self.assertEqual(generated_type.output, trace_type.from_value(None))
for p in generated_type.parameters.values():
self.assertEqual(p.kind, function_type.Parameter.KEYWORD_ONLY)
self.assertEqual(p.type_constraint, expected_types[p.name])
@parameterized.parameters(
{"output_signature": 1},
{"output_signature": [1, 2, 3]},
{"output_signature": ()},
)
def testOutput(self, output_signature):
generated_type = function_type.from_structured_signature(
((), {}), output_signature
)
self.assertEqual(
generated_type.output,
trace_type.from_value(
output_signature,
trace_type.InternalTracingContext(is_legacy_signature=True),
)
)
if __name__ == "__main__":
test.main()
| FromStructuredSignatureTest |
python | django__django | tests/cache/liberal_backend.py | {
"start": 60,
"end": 141
} | class ____:
def validate_key(self, key):
pass
| LiberalKeyValidationMixin |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 246753,
"end": 249140
} | class ____(NCCLTraceTestBase):
def _wait_process(self, rank, timeout):
try:
self.processes[rank].join(timeout)
return self.processes[rank].exitcode
except TimeoutError:
return None
@check_if_test_is_skipped
def _check_return_codes(self, elapsed_time):
# the base test infra assumes processes exit with matching return codes,
# but we want rank0 to abort with exception and rank1 to exit with exit 1
self.assertEqual(self.processes[0].exitcode, -6)
self.assertEqual(self.processes[1].exitcode, 1)
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(2)
@skip_if_rocm_multiprocess
def test_nccl_errors_dump(self):
os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1"
os.environ["TORCH_NCCL_TRACE_BUFFER_SIZE"] = "1000"
os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "1"
# need rank0 to dump before abort
os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "5"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for both rank0 and 1 to crash before looking for dump
self.assertEqual(self._wait_process(0, timeout=90), -6)
self.assertEqual(self._wait_process(1, timeout=90), 1)
# verify that the trace file exists for rank0
self.assertTrue(os.path.exists(self._trace_name(rank=0)))
return
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
# expect an error to be raised
with self.assertRaisesRegex(dist.DistBackendError, ""):
# Block the current stream on the NCCL stream
work.wait()
# Run some GPU operations
torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
sys.exit(1)
# tests that needs to be run with a larger world size
| NcclErrorDumpTest |
python | numba__llvmlite | llvmlite/binding/typeref.py | {
"start": 997,
"end": 5272
} | class ____(ffi.ObjectRef):
"""A weak reference to a LLVM type
"""
@property
def name(self):
"""
Get type name
"""
return ffi.ret_string(ffi.lib.LLVMPY_GetTypeName(self))
@property
def is_struct(self):
"""
Returns true if the type is a struct type.
"""
return ffi.lib.LLVMPY_TypeIsStruct(self)
@property
def is_pointer(self):
"""
Returns true if the type is a pointer type.
"""
return ffi.lib.LLVMPY_TypeIsPointer(self)
@property
def is_array(self):
"""
Returns true if the type is an array type.
"""
return ffi.lib.LLVMPY_TypeIsArray(self)
@property
def is_vector(self):
"""
Returns true if the type is a vector type.
"""
return ffi.lib.LLVMPY_TypeIsVector(self)
@property
def is_function(self):
"""
Returns true if the type is a function type.
"""
return ffi.lib.LLVMPY_TypeIsFunction(self)
@property
def is_function_vararg(self):
"""
Returns true if a function type accepts a variable number of arguments.
When the type is not a function, raises exception.
"""
if self.type_kind != TypeKind.function:
raise ValueError("Type {} is not a function".format(self))
return ffi.lib.LLVMPY_IsFunctionVararg(self)
@property
def elements(self):
"""
Returns iterator over enclosing types
"""
if self.is_pointer:
raise ValueError("Type {} doesn't contain elements.".format(self))
return _TypeListIterator(ffi.lib.LLVMPY_ElementIter(self))
@property
def element_count(self):
"""
Returns the number of elements in an array or a vector. For scalable
vectors, returns minimum number of elements. When the type is neither
an array nor a vector, raises exception.
"""
if not self.is_array and not self.is_vector:
raise ValueError("Type {} is not an array nor vector".format(self))
return ffi.lib.LLVMPY_GetTypeElementCount(self)
@property
def type_width(self):
"""
Return the basic size of this type if it is a primitive type. These are
fixed by LLVM and are not target-dependent.
This will return zero if the type does not have a size or is not a
primitive type.
If this is a scalable vector type, the scalable property will be set and
the runtime size will be a positive integer multiple of the base size.
Note that this may not reflect the size of memory allocated for an
instance of the type or the number of bytes that are written when an
instance of the type is stored to memory.
"""
return ffi.lib.LLVMPY_GetTypeBitWidth(self)
@property
def type_kind(self):
"""
Returns the LLVMTypeKind enumeration of this type.
"""
return TypeKind(ffi.lib.LLVMPY_GetTypeKind(self))
@property
def is_packed_struct(self):
return ffi.lib.LLVMPY_IsPackedStruct(self)
@property
def is_literal_struct(self):
return ffi.lib.LLVMPY_IsLiteralStruct(self)
@property
def is_opaque_struct(self):
return ffi.lib.LLVMPY_IsOpaqueStruct(self)
def get_function_parameters(self) -> tuple["TypeRef"]:
nparams = ffi.lib.LLVMPY_CountParamTypes(self)
if nparams > 0:
out_buffer = (ffi.LLVMTypeRef * nparams)(None)
ffi.lib.LLVMPY_GetParamTypes(self, out_buffer)
return tuple(map(TypeRef, out_buffer))
else:
return ()
def get_function_return(self) -> "TypeRef":
return TypeRef(ffi.lib.LLVMPY_GetReturnType(self))
def as_ir(self, ir_ctx: ir.Context) -> ir.Type:
"""Convert into a ``llvmlite.ir.Type``.
"""
try:
cls = _TypeKindToIRType[self.type_kind]
except KeyError:
msg = f"as_ir() unsupported for TypeRef of {self.type_kind}"
raise TypeError(msg)
else:
return cls.from_llvm(self, ir_ctx)
def __str__(self):
return ffi.ret_string(ffi.lib.LLVMPY_PrintType(self))
| TypeRef |
python | google__pytype | pytype/imports/typeshed.py | {
"start": 1719,
"end": 2923
} | class ____(TypeshedStore):
"""Filesystem-based typeshed store."""
def __init__(self, *, missing_file=None, open_function=open):
self._root = self.get_root()
self._open_function = open_function
self._missing_file = missing_file
@abc.abstractmethod
def get_root(self):
raise NotImplementedError
def filepath(self, relpath):
return path_utils.join(self._root, relpath)
def load_file(self, relpath) -> tuple[str, str]:
filename = self.filepath(relpath)
with self._open_function(filename) as f:
return relpath, f.read()
def _readlines(self, unix_relpath):
relpath = path_utils.join(*unix_relpath.split("/"))
_, data = self.load_file(relpath)
return data.splitlines()
def load_missing(self) -> list[str]:
"""List of modules that are known to be missing in typeshed."""
if not self._missing_file:
return []
return self._readlines(self._missing_file)
def load_pytype_blocklist(self) -> list[str]:
"""List of modules that we maintain our own versions of."""
return self._readlines("tests/pytype_exclude_list.txt")
def load_stdlib_versions(self) -> list[str]:
return self._readlines("stdlib/VERSIONS")
| TypeshedFs |
python | qdrant__qdrant-client | qdrant_client/local/sparse_distances.py | {
"start": 1864,
"end": 2449
} | class ____:
def __init__(self, target: SparseVector, context: list[SparseContextPair]):
validate_sparse_vector(target)
self.target: SparseVector = sort_sparse_vector(target)
self.context = context
def transform_sparse(
self, foo: Callable[["SparseVector"], "SparseVector"]
) -> "SparseDiscoveryQuery":
return SparseDiscoveryQuery(
target=foo(self.target),
context=[
SparseContextPair(foo(pair.positive), foo(pair.negative)) for pair in self.context
],
)
| SparseDiscoveryQuery |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py | {
"start": 11802,
"end": 11938
} | class ____(BaseModel):
"""Response for task states with run_id, task and state."""
task_states: dict[str, Any]
| TaskStatesResponse |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 42851,
"end": 43547
} | class ____(fixtures.TestBase):
@testing.combinations(
(["one", "two", "three"], True),
(("one", "two", "three"), True),
((), True),
("four", False),
(252, False),
(Decimal("252"), False),
(b"four", False),
(iter("four"), True),
(b"", False),
("", False),
(None, False),
({"dict": "value"}, True),
({}, True),
({"set", "two"}, True),
(set(), True),
(util.immutabledict(), True),
(util.immutabledict({"key": "value"}), True),
)
def test_non_string_iterable_check(self, fixture, expected):
is_(is_non_string_iterable(fixture), expected)
| MiscTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 99184,
"end": 101728
} | class ____(ColumnElement[_T]):
"""base for expressions that contain an operator and operands
.. versionadded:: 2.0
"""
operator: OperatorType
type: TypeEngine[_T]
group: bool = True
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[_T]]:
if (
self.group
and operators.is_precedent(self.operator, against)
or (
# a negate against a non-boolean operator
# doesn't make too much sense but we should
# group for that
against is operators.inv
and not operators.is_boolean(self.operator)
)
):
return Grouping(self)
else:
return self
@property
def _flattened_operator_clauses(
self,
) -> typing_Tuple[ColumnElement[Any], ...]:
raise NotImplementedError()
@classmethod
def _construct_for_op(
cls,
left: ColumnElement[Any],
right: ColumnElement[Any],
op: OperatorType,
*,
type_: TypeEngine[_T],
negate: Optional[OperatorType] = None,
modifiers: Optional[Mapping[str, Any]] = None,
) -> OperatorExpression[_T]:
if operators.is_associative(op):
assert (
negate is None
), f"negate not supported for associative operator {op}"
multi = False
if getattr(
left, "operator", None
) is op and type_._compare_type_affinity(left.type):
multi = True
left_flattened = left._flattened_operator_clauses
else:
left_flattened = (left,)
if getattr(
right, "operator", None
) is op and type_._compare_type_affinity(right.type):
multi = True
right_flattened = right._flattened_operator_clauses
else:
right_flattened = (right,)
if multi:
return ExpressionClauseList._construct_for_list(
op,
type_,
*(left_flattened + right_flattened),
)
if right._is_collection_aggregate:
negate = None
return BinaryExpression(
left, right, op, type_=type_, negate=negate, modifiers=modifiers
)
| OperatorExpression |
python | sympy__sympy | sympy/simplify/cse_main.py | {
"start": 11143,
"end": 31310
} | class ____:
def __init__(self, func, args):
self.func = func
self.args = args
def __str__(self):
return "Uneval<{}>({})".format(
self.func, ", ".join(str(a) for a in self.args))
def as_unevaluated_basic(self):
return self.func(*self.args, evaluate=False)
@property
def free_symbols(self):
return set().union(*[a.free_symbols for a in self.args])
__repr__ = __str__
def match_common_args(func_class, funcs, opt_subs):
"""
Recognize and extract common subexpressions of function arguments within a
set of function calls. For instance, for the following function calls::
x + z + y
sin(x + y)
this will extract a common subexpression of `x + y`::
w = x + y
w + z
sin(w)
The function we work with is assumed to be associative and commutative.
Parameters
==========
func_class: class
The function class (e.g. Add, Mul)
funcs: list of functions
A list of function calls.
opt_subs: dict
A dictionary of substitutions which this function may update.
"""
# Sort to ensure that whole-function subexpressions come before the items
# that use them.
funcs = sorted(funcs, key=lambda f: len(f.args))
arg_tracker = FuncArgTracker(funcs)
changed = OrderedSet()
for i in range(len(funcs)):
common_arg_candidates_counts = arg_tracker.get_common_arg_candidates(
arg_tracker.func_to_argset[i], min_func_i=i + 1)
# Sort the candidates in order of match size.
# This makes us try combining smaller matches first.
common_arg_candidates = OrderedSet(sorted(
common_arg_candidates_counts.keys(),
key=lambda k: (common_arg_candidates_counts[k], k)))
while common_arg_candidates:
j = common_arg_candidates.pop(last=False)
com_args = arg_tracker.func_to_argset[i].intersection(
arg_tracker.func_to_argset[j])
if len(com_args) <= 1:
# This may happen if a set of common arguments was already
# combined in a previous iteration.
continue
# For all sets, replace the common symbols by the function
# over them, to allow recursive matches.
diff_i = arg_tracker.func_to_argset[i].difference(com_args)
if diff_i:
# com_func needs to be unevaluated to allow for recursive matches.
com_func = Unevaluated(
func_class, arg_tracker.get_args_in_value_order(com_args))
com_func_number = arg_tracker.get_or_add_value_number(com_func)
arg_tracker.update_func_argset(i, diff_i | OrderedSet([com_func_number]))
changed.add(i)
else:
# Treat the whole expression as a CSE.
#
# The reason this needs to be done is somewhat subtle. Within
# tree_cse(), to_eliminate only contains expressions that are
# seen more than once. The problem is unevaluated expressions
# do not compare equal to the evaluated equivalent. So
# tree_cse() won't mark funcs[i] as a CSE if we use an
# unevaluated version.
com_func_number = arg_tracker.get_or_add_value_number(funcs[i])
diff_j = arg_tracker.func_to_argset[j].difference(com_args)
arg_tracker.update_func_argset(j, diff_j | OrderedSet([com_func_number]))
changed.add(j)
for k in arg_tracker.get_subset_candidates(
com_args, common_arg_candidates):
diff_k = arg_tracker.func_to_argset[k].difference(com_args)
arg_tracker.update_func_argset(k, diff_k | OrderedSet([com_func_number]))
changed.add(k)
if i in changed:
opt_subs[funcs[i]] = Unevaluated(func_class,
arg_tracker.get_args_in_value_order(arg_tracker.func_to_argset[i]))
arg_tracker.stop_arg_tracking(i)
def opt_cse(exprs, order='canonical'):
"""Find optimization opportunities in Adds, Muls, Pows and negative
coefficient Muls.
Parameters
==========
exprs : list of SymPy expressions
The expressions to optimize.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
Returns
=======
opt_subs : dictionary of expression substitutions
The expression substitutions which can be useful to optimize CSE.
Examples
========
>>> from sympy.simplify.cse_main import opt_cse
>>> from sympy.abc import x
>>> opt_subs = opt_cse([x**-2])
>>> k, v = list(opt_subs.keys())[0], list(opt_subs.values())[0]
>>> print((k, v.as_unevaluated_basic()))
(x**(-2), 1/(x**2))
"""
opt_subs = {}
adds = OrderedSet()
muls = OrderedSet()
seen_subexp = set()
collapsible_subexp = set()
def _find_opts(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return
if expr.is_Atom or expr.is_Order:
return
if iterable(expr):
list(map(_find_opts, expr))
return
if expr in seen_subexp:
return expr
seen_subexp.add(expr)
list(map(_find_opts, expr.args))
if not isinstance(expr, MatrixExpr) and expr.could_extract_minus_sign():
# XXX -expr does not always work rigorously for some expressions
# containing UnevaluatedExpr.
# https://github.com/sympy/sympy/issues/24818
if isinstance(expr, Add):
neg_expr = Add(*(-i for i in expr.args))
else:
neg_expr = -expr
if not neg_expr.is_Atom:
opt_subs[expr] = Unevaluated(Mul, (S.NegativeOne, neg_expr))
seen_subexp.add(neg_expr)
expr = neg_expr
if isinstance(expr, (Mul, MatMul)):
if len(expr.args) == 1:
collapsible_subexp.add(expr)
else:
muls.add(expr)
elif isinstance(expr, (Add, MatAdd)):
if len(expr.args) == 1:
collapsible_subexp.add(expr)
else:
adds.add(expr)
elif isinstance(expr, Inverse):
# Do not want to treat `Inverse` as a `MatPow`
pass
elif isinstance(expr, (Pow, MatPow)):
base, exp = expr.base, expr.exp
if exp.could_extract_minus_sign():
opt_subs[expr] = Unevaluated(Pow, (Pow(base, -exp), -1))
for e in exprs:
if isinstance(e, (Basic, Unevaluated)):
_find_opts(e)
# Handle collapsing of multinary operations with single arguments
edges = [(s, s.args[0]) for s in collapsible_subexp
if s.args[0] in collapsible_subexp]
for e in reversed(topological_sort((collapsible_subexp, edges))):
opt_subs[e] = opt_subs.get(e.args[0], e.args[0])
# split muls into commutative
commutative_muls = OrderedSet()
for m in muls:
c, nc = m.args_cnc(cset=False)
if c:
c_mul = m.func(*c)
if nc:
if c_mul == 1:
new_obj = m.func(*nc)
else:
if isinstance(m, MatMul):
new_obj = m.func(c_mul, *nc, evaluate=False)
else:
new_obj = m.func(c_mul, m.func(*nc), evaluate=False)
opt_subs[m] = new_obj
if len(c) > 1:
commutative_muls.add(c_mul)
match_common_args(Add, adds, opt_subs)
match_common_args(Mul, commutative_muls, opt_subs)
return opt_subs
def tree_cse(exprs, symbols, opt_subs=None, order='canonical', ignore=()):
"""Perform raw CSE on expression tree, taking opt_subs into account.
Parameters
==========
exprs : list of SymPy expressions
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out.
opt_subs : dictionary of expression substitutions
The expressions to be substituted before any CSE action is performed.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
ignore : iterable of Symbols
Substitutions containing any Symbol from ``ignore`` will be ignored.
"""
if opt_subs is None:
opt_subs = {}
## Find repeated sub-expressions
to_eliminate = set()
seen_subexp = set()
excluded_symbols = set()
def _find_repeated(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return
if isinstance(expr, RootOf):
return
if isinstance(expr, Basic) and (
expr.is_Atom or
expr.is_Order or
isinstance(expr, (MatrixSymbol, MatrixElement))):
if expr.is_Symbol:
excluded_symbols.add(expr.name)
return
if iterable(expr):
args = expr
else:
if expr in seen_subexp:
for ign in ignore:
if ign in expr.free_symbols:
break
else:
to_eliminate.add(expr)
return
seen_subexp.add(expr)
if expr in opt_subs:
expr = opt_subs[expr]
args = expr.args
list(map(_find_repeated, args))
for e in exprs:
if isinstance(e, Basic):
_find_repeated(e)
## Rebuild tree
# Remove symbols from the generator that conflict with names in the expressions.
symbols = (_ for _ in symbols if _.name not in excluded_symbols)
replacements = []
subs = {}
def _rebuild(expr):
if not isinstance(expr, (Basic, Unevaluated)):
return expr
if not expr.args:
return expr
if iterable(expr):
new_args = [_rebuild(arg) for arg in expr.args]
return expr.func(*new_args)
if expr in subs:
return subs[expr]
orig_expr = expr
if expr in opt_subs:
expr = opt_subs[expr]
# If enabled, parse Muls and Adds arguments by order to ensure
# replacement order independent from hashes
if order != 'none':
if isinstance(expr, (Mul, MatMul)):
c, nc = expr.args_cnc()
if c == [1]:
args = nc
else:
args = list(ordered(c)) + nc
elif isinstance(expr, (Add, MatAdd)):
args = list(ordered(expr.args))
else:
args = expr.args
else:
args = expr.args
new_args = list(map(_rebuild, args))
if isinstance(expr, Unevaluated) or new_args != args:
new_expr = expr.func(*new_args)
else:
new_expr = expr
if orig_expr in to_eliminate:
try:
sym = next(symbols)
except StopIteration:
raise ValueError("Symbols iterator ran out of symbols.")
if isinstance(orig_expr, MatrixExpr):
sym = MatrixSymbol(sym.name, orig_expr.rows,
orig_expr.cols)
subs[orig_expr] = sym
replacements.append((sym, new_expr))
return sym
else:
return new_expr
reduced_exprs = []
for e in exprs:
if isinstance(e, Basic):
reduced_e = _rebuild(e)
else:
reduced_e = e
reduced_exprs.append(reduced_e)
return replacements, reduced_exprs
def cse(exprs, symbols=None, optimizations=None, postprocess=None,
order='canonical', ignore=(), list=True):
""" Perform common subexpression elimination on an expression.
Parameters
==========
exprs : list of SymPy expressions, or a single SymPy expression
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out. The ``numbered_symbols`` generator is useful. The default is a
stream of symbols of the form "x0", "x1", etc. This must be an
infinite iterator.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs of external optimization
functions. Optionally 'basic' can be passed for a set of predefined
basic optimizations. Such 'basic' optimizations were used by default
in old implementation, however they can be really slow on larger
expressions. Now, no pre or post optimizations are made by default.
postprocess : a function which accepts the two return values of cse and
returns the desired form of output from cse, e.g. if you want the
replacements reversed the function might be the following lambda:
lambda r, e: return reversed(r), e
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. If set to
'canonical', arguments will be canonically ordered. If set to 'none',
ordering will be faster but dependent on expressions hashes, thus
machine dependent and variable. For large expressions where speed is a
concern, use the setting order='none'.
ignore : iterable of Symbols
Substitutions containing any Symbol from ``ignore`` will be ignored.
list : bool, (default True)
Returns expression in list or else with same type as input (when False).
Returns
=======
replacements : list of (Symbol, expression) pairs
All of the common subexpressions that were replaced. Subexpressions
earlier in this list might show up in subexpressions later in this
list.
reduced_exprs : list of SymPy expressions
The reduced expressions with all of the replacements above.
Examples
========
>>> from sympy import cse, SparseMatrix
>>> from sympy.abc import x, y, z, w
>>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3)
([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3])
List of expressions with recursive substitutions:
>>> m = SparseMatrix([x + y, x + y + z])
>>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m])
([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([
[x0],
[x1]])])
Note: the type and mutability of input matrices is retained.
>>> isinstance(_[1][-1], SparseMatrix)
True
The user may disallow substitutions containing certain symbols:
>>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,))
([(x0, x + 1)], [x0*y**2, 3*x0*y**2])
The default return value for the reduced expression(s) is a list, even if there is only
one expression. The `list` flag preserves the type of the input in the output:
>>> cse(x)
([], [x])
>>> cse(x, list=False)
([], x)
"""
if not list:
return _cse_homogeneous(exprs,
symbols=symbols, optimizations=optimizations,
postprocess=postprocess, order=order, ignore=ignore)
if isinstance(exprs, (int, float)):
exprs = sympify(exprs)
# Handle the case if just one expression was passed.
if isinstance(exprs, (Basic, MatrixBase)):
exprs = [exprs]
copy = exprs
temp = []
for e in exprs:
if isinstance(e, (Matrix, ImmutableMatrix)):
temp.append(Tuple(*e.flat()))
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
temp.append(Tuple(*e.todok().items()))
else:
temp.append(e)
exprs = temp
del temp
if optimizations is None:
optimizations = []
elif optimizations == 'basic':
optimizations = basic_optimizations
# Preprocess the expressions to give us better optimization opportunities.
reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs]
if symbols is None:
symbols = numbered_symbols(cls=Symbol)
else:
# In case we get passed an iterable with an __iter__ method instead of
# an actual iterator.
symbols = iter(symbols)
# Find other optimization opportunities.
opt_subs = opt_cse(reduced_exprs, order)
# Main CSE algorithm.
replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs,
order, ignore)
# Postprocess the expressions to return the expressions to canonical form.
exprs = copy
replacements = [(sym, postprocess_for_cse(subtree, optimizations))
for sym, subtree in replacements]
reduced_exprs = [postprocess_for_cse(e, optimizations)
for e in reduced_exprs]
# Get the matrices back
for i, e in enumerate(exprs):
if isinstance(e, (Matrix, ImmutableMatrix)):
reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i])
if isinstance(e, ImmutableMatrix):
reduced_exprs[i] = reduced_exprs[i].as_immutable()
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
m = SparseMatrix(e.rows, e.cols, {})
for k, v in reduced_exprs[i]:
m[k] = v
if isinstance(e, ImmutableSparseMatrix):
m = m.as_immutable()
reduced_exprs[i] = m
if postprocess is None:
return replacements, reduced_exprs
return postprocess(replacements, reduced_exprs)
def _cse_homogeneous(exprs, **kwargs):
"""
Same as ``cse`` but the ``reduced_exprs`` are returned
with the same type as ``exprs`` or a sympified version of the same.
Parameters
==========
exprs : an Expr, iterable of Expr or dictionary with Expr values
the expressions in which repeated subexpressions will be identified
kwargs : additional arguments for the ``cse`` function
Returns
=======
replacements : list of (Symbol, expression) pairs
All of the common subexpressions that were replaced. Subexpressions
earlier in this list might show up in subexpressions later in this
list.
reduced_exprs : list of SymPy expressions
The reduced expressions with all of the replacements above.
Examples
========
>>> from sympy.simplify.cse_main import cse
>>> from sympy import cos, Tuple, Matrix
>>> from sympy.abc import x
>>> output = lambda x: type(cse(x, list=False)[1])
>>> output(1)
<class 'sympy.core.numbers.One'>
>>> output('cos(x)')
<class 'str'>
>>> output(cos(x))
cos
>>> output(Tuple(1, x))
<class 'sympy.core.containers.Tuple'>
>>> output(Matrix([[1,0], [0,1]]))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> output([1, x])
<class 'list'>
>>> output((1, x))
<class 'tuple'>
>>> output({1, x})
<class 'set'>
"""
if isinstance(exprs, str):
replacements, reduced_exprs = _cse_homogeneous(
sympify(exprs), **kwargs)
return replacements, repr(reduced_exprs)
if isinstance(exprs, (list, tuple, set)):
replacements, reduced_exprs = cse(exprs, **kwargs)
return replacements, type(exprs)(reduced_exprs)
if isinstance(exprs, dict):
keys = list(exprs.keys()) # In order to guarantee the order of the elements.
replacements, values = cse([exprs[k] for k in keys], **kwargs)
reduced_exprs = dict(zip(keys, values))
return replacements, reduced_exprs
try:
replacements, (reduced_exprs,) = cse(exprs, **kwargs)
except TypeError: # For example 'mpf' objects
return [], exprs
else:
return replacements, reduced_exprs
| Unevaluated |
python | django__django | tests/queries/models.py | {
"start": 13168,
"end": 13374
} | class ____(models.Model):
a = models.ForeignKey(FK1, models.SET_NULL, null=True)
b = models.ForeignKey(FK2, models.SET_NULL, null=True)
c = models.ForeignKey(FK3, models.SET_NULL, null=True)
| BaseA |
python | sphinx-doc__sphinx | sphinx/transforms/post_transforms/__init__.py | {
"start": 11256,
"end": 11743
} | class ____(SphinxPostTransform):
default_priority = 50
def run(self, **kwargs: Any) -> None:
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
# something, even if it's just a comment and will lose the id anyway...
process_only_nodes(self.document, self.env._tags)
| OnlyNodeTransform |
python | kamyu104__LeetCode-Solutions | Python/count-anagrams.py | {
"start": 66,
"end": 1316
} | class ____(object):
def countAnagrams(self, s):
"""
:type s: str
:rtype: int
"""
MOD = 10**9+7
fact, inv, inv_fact = [[1]*2 for _ in xrange(3)]
def lazy_init(n):
while len(inv) <= n: # lazy initialization
fact.append(fact[-1]*len(inv) % MOD)
inv.append(inv[MOD%len(inv)]*(MOD-MOD//len(inv)) % MOD) # https://cp-algorithms.com/algebra/module-inverse.html
inv_fact.append(inv_fact[-1]*inv[-1] % MOD)
def factorial(n):
lazy_init(n)
return fact[n]
def inv_factorial(n):
lazy_init(n)
return inv_fact[n]
def count(j, i):
result = 1
cnt = collections.Counter()
for k in xrange(j, i+1):
cnt[s[k]] += 1
result = factorial(sum(cnt.itervalues()))
for c in cnt.itervalues():
result = (result*inv_factorial(c))%MOD
return result
result = 1
j = 0
for i in xrange(len(s)):
if i+1 != len(s) and s[i+1] != ' ':
continue
result = (result*count(j, i))%MOD
j = i+2
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-instagram/components.py | {
"start": 14251,
"end": 15264
} | class ____(TypeTransformer):
"""
The Instagram API returns dates in the format 2025-01-01T07:00:00+0000, but the existing implementation
normalized dates to the RFC 3339 format 2025-01-01T07:00:00+00:00.
"""
def __init__(self, *args, **kwargs):
config = TransformConfig.CustomSchemaNormalization
super().__init__(config)
self.registerCustomTransform(self.get_transform_function())
def get_transform_function(self):
def transform_function(original_value: str, field_schema: Dict[str, Any]) -> Any:
target_format = field_schema.get("format")
target_airbyte_type = field_schema.get("airbyte_type")
if original_value and target_format == "date-time" and target_airbyte_type == "timestamp_with_timezone":
ab_datetime = ab_datetime_parse(original_value)
return ab_datetime_format(ab_datetime)
return original_value
return transform_function
| RFC3339DatetimeSchemaNormalization |
python | huggingface__transformers | src/transformers/models/esm/modeling_esmfold.py | {
"start": 10665,
"end": 15822
} | class ____(nn.Module):
"""
Standard multi-head attention using AlphaFold's default layer initialization. Allows multiple bias vectors.
"""
def __init__(
self,
c_q: int,
c_k: int,
c_v: int,
c_hidden: int,
no_heads: int,
gating: bool = True,
):
"""
Args:
c_q:
Input dimension of query data
c_k:
Input dimension of key data
c_v:
Input dimension of value data
c_hidden:
Per-head hidden dimension
no_heads:
Number of attention heads
gating:
Whether the output should be gated using query data
"""
super().__init__()
self.c_q = c_q
self.c_k = c_k
self.c_v = c_v
self.c_hidden = c_hidden
self.no_heads = no_heads
self.gating = gating
# DISCREPANCY: c_hidden is not the per-head channel dimension, as
# stated in the supplement, but the overall channel dimension.
self.linear_q = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, bias=False, init="glorot")
self.linear_k = EsmFoldLinear(self.c_k, self.c_hidden * self.no_heads, bias=False, init="glorot")
self.linear_v = EsmFoldLinear(self.c_v, self.c_hidden * self.no_heads, bias=False, init="glorot")
self.linear_o = EsmFoldLinear(self.c_hidden * self.no_heads, self.c_q, init="final")
self.linear_g = None
if self.gating:
self.linear_g = EsmFoldLinear(self.c_q, self.c_hidden * self.no_heads, init="gating")
self.sigmoid = nn.Sigmoid()
def _prep_qkv(self, q_x: torch.Tensor, kv_x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# [*, Q/K/V, H * C_hidden]
q = self.linear_q(q_x)
k = self.linear_k(kv_x)
v = self.linear_v(kv_x)
# [*, Q/K, H, C_hidden]
q = q.view(q.shape[:-1] + (self.no_heads, -1))
k = k.view(k.shape[:-1] + (self.no_heads, -1))
v = v.view(v.shape[:-1] + (self.no_heads, -1))
# [*, H, Q/K, C_hidden]
q = q.transpose(-2, -3)
k = k.transpose(-2, -3)
v = v.transpose(-2, -3)
q /= math.sqrt(self.c_hidden)
return q, k, v
def _wrap_up(self, o: torch.Tensor, q_x: torch.Tensor) -> torch.Tensor:
if self.linear_g is not None:
g = self.sigmoid(self.linear_g(q_x))
# [*, Q, H, C_hidden]
g = g.view(g.shape[:-1] + (self.no_heads, -1))
o = o * g
# [*, Q, H * C_hidden]
o = flatten_final_dims(o, 2)
# [*, Q, C_q]
o = self.linear_o(o)
return o
def forward(
self,
q_x: torch.Tensor,
kv_x: torch.Tensor,
biases: Optional[list[torch.Tensor]] = None,
use_memory_efficient_kernel: bool = False,
use_lma: bool = False,
lma_q_chunk_size: int = 1024,
lma_kv_chunk_size: int = 4096,
use_flash: bool = False,
flash_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Args:
q_x:
[*, Q, C_q] query data
kv_x:
[*, K, C_k] key data
biases:
List of biases that broadcast to [*, H, Q, K]
use_memory_efficient_kernel:
Whether to use a custom memory-efficient attention kernel. This should be the default choice for most.
If none of the "use_<...>" flags are True, a stock PyTorch implementation is used instead
use_lma:
Whether to use low-memory attention (Staats & Rabe 2021). If none of the "use_<...>" flags are True, a
stock PyTorch implementation is used instead
lma_q_chunk_size:
Query chunk size (for LMA)
lma_kv_chunk_size:
Key/Value chunk size (for LMA)
Returns
[*, Q, C_q] attention update
"""
if use_lma and (lma_q_chunk_size is None or lma_kv_chunk_size is None):
raise ValueError("If use_lma is specified, lma_q_chunk_size and lma_kv_chunk_size must be provided")
if use_flash and biases is not None:
raise ValueError("use_flash is incompatible with the bias option. For masking, use flash_mask instead")
attn_options = [use_memory_efficient_kernel, use_lma, use_flash]
if sum(attn_options) > 1:
raise ValueError("Choose at most one alternative attention algorithm")
if biases is None:
biases = []
# [*, H, Q/K, C_hidden]
query, key, value = self._prep_qkv(q_x, kv_x)
key = permute_final_dims(key, (1, 0))
# [*, H, Q, K]
output = torch.matmul(query, key)
for b in biases:
output += b
output = softmax_no_cast(output, -1)
# [*, H, Q, C_hidden]
output = torch.matmul(output, value)
output = output.transpose(-2, -3)
output = self._wrap_up(output, q_x)
return output
| EsmFoldAttention |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/fixtures/sql.py | {
"start": 14148,
"end": 16171
} | class ____(CacheKeyFixture):
@classmethod
def run_suite_tests(cls, fn):
def decorate(self):
self._run_cache_key_fixture(fn(self), compare_values=False)
self._run_compare_fixture(fn(self), compare_values=False)
decorate.__name__ = fn.__name__
return decorate
def insertmanyvalues_fixture(
connection, randomize_rows=False, warn_on_downgraded=False
):
dialect = connection.dialect
orig_dialect = dialect._deliver_insertmanyvalues_batches
orig_conn = connection._exec_insertmany_context
class RandomCursor:
__slots__ = ("cursor",)
def __init__(self, cursor):
self.cursor = cursor
# only this method is called by the deliver method.
# by not having the other methods we assert that those aren't being
# used
@property
def description(self):
return self.cursor.description
def fetchall(self):
rows = self.cursor.fetchall()
rows = list(rows)
random.shuffle(rows)
return rows
def _deliver_insertmanyvalues_batches(
connection,
cursor,
statement,
parameters,
generic_setinputsizes,
context,
):
if randomize_rows:
cursor = RandomCursor(cursor)
for batch in orig_dialect(
connection,
cursor,
statement,
parameters,
generic_setinputsizes,
context,
):
if warn_on_downgraded and batch.is_downgraded:
util.warn("Batches were downgraded for sorted INSERT")
yield batch
def _exec_insertmany_context(dialect, context):
with mock.patch.object(
dialect,
"_deliver_insertmanyvalues_batches",
new=_deliver_insertmanyvalues_batches,
):
return orig_conn(dialect, context)
connection._exec_insertmany_context = _exec_insertmany_context
| CacheKeySuite |
python | getsentry__sentry | tests/sentry/notifications/platform/slack/test_provider.py | {
"start": 2167,
"end": 3181
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.integration, self.org_integration = self.create_provider_integration_for(
provider=IntegrationProviderSlug.SLACK,
organization=self.organization,
user=self.user,
name="test-slack",
metadata={"domain_name": "test-workspace.slack.com"},
)
def test_basic_fields(self) -> None:
provider = SlackNotificationProvider()
assert provider.key == NotificationProviderKey.SLACK
assert provider.target_class == IntegrationNotificationTarget
assert provider.target_resource_types == [
NotificationTargetResourceType.CHANNEL,
NotificationTargetResourceType.DIRECT_MESSAGE,
]
def test_is_available(self) -> None:
assert SlackNotificationProvider.is_available() is False
assert SlackNotificationProvider.is_available(organization=self.organization) is False
| SlackNotificationProviderTest |
python | huggingface__transformers | src/transformers/models/sew_d/modeling_sew_d.py | {
"start": 38995,
"end": 43871
} | class ____(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([SEWDLayer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
pos_ebd_size = self.position_buckets * 2
self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
self.gradient_checkpointing = False
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(
q,
hidden_states.size(-2),
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
device=hidden_states.device,
)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
if attention_mask.dim() <= 2:
input_mask = attention_mask
else:
input_mask = attention_mask.sum(-2) > 0
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[0]
else:
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
output_states = layer_module(
next_kv,
attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
)
if output_attentions:
output_states, att_m = output_states
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
if query_states is not None:
query_states = output_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
| SEWDTransformerEncoder |
python | getsentry__sentry | src/sentry/auth/providers/saml2/provider.py | {
"start": 12425,
"end": 12781
} | class ____(TypedDict):
authnRequestsSigned: bool
logoutRequestSigned: bool
logoutResponseSigned: bool
signMetadata: bool
wantMessagesSigned: bool
wantAssertionsSigned: bool
wantAssertionsEncrypted: bool
signatureAlgorithm: bool
digestAlgorithm: bool
wantNameId: bool
requestedAuthnContext: bool
| _SamlConfigSecurity |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 47478,
"end": 48597
} | class ____(ASTBaseParenExprList):
def __init__(self, exprs: list[ASTExpression]) -> None:
self.exprs = exprs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTParenExprList):
return NotImplemented
return self.exprs == other.exprs
def __hash__(self) -> int:
return hash(self.exprs)
def _stringify(self, transform: StringifyTransform) -> str:
exprs = [transform(e) for e in self.exprs]
return '(%s)' % ', '.join(exprs)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('(', '(')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
| ASTParenExprList |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/shim_components/asset_check.py | {
"start": 458,
"end": 1427
} | class ____(ShimScaffolder[AssetCheckScaffoldParams]):
@classmethod
def get_scaffold_params(cls) -> type[AssetCheckScaffoldParams]:
return AssetCheckScaffoldParams
def get_text(self, request: ScaffoldRequest[AssetCheckScaffoldParams]) -> str:
"""Get the text for an asset check.
Args:
request: The scaffold request containing type name, target path, format, project root and optional params
Returns:
The text for the asset check
"""
assert request.params
asset_key = AssetKey.from_user_string(request.params.asset_key)
return textwrap.dedent(
f"""\
import dagster as dg
@dg.asset_check(asset=dg.AssetKey({asset_key.path!s}))
def {request.target_path.stem}(context: dg.AssetCheckExecutionContext) -> dg.AssetCheckResult: ...
"""
)
scaffold_with(AssetCheckScaffolder)(asset_check)
| AssetCheckScaffolder |
python | neetcode-gh__leetcode | python/0006-zigzag-conversion.py | {
"start": 0,
"end": 420
} | class ____:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1 or numRows >= len(s):
return s
res = [""] * numRows
index = 0
step = 1
for c in s:
res[index] += c
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return "".join(res) | Solution |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-google/llama_index/tools/google/calendar/base.py | {
"start": 847,
"end": 7604
} | class ____(BaseToolSpec):
"""
Google Calendar tool spec.
Currently a simple wrapper around the data loader.
TODO: add more methods to the Google Calendar spec.
"""
spec_functions = ["load_data", "create_event", "get_date"]
def __init__(self, creds: Optional[Any] = None):
"""
Initialize the GoogleCalendarToolSpec.
Args:
creds (Optional[Any]): Pre-configured credentials to use for authentication.
If provided, these will be used instead of the OAuth flow.
"""
self.creds = creds
def load_data(
self,
number_of_results: Optional[int] = 100,
start_date: Optional[Union[str, datetime.date]] = None,
) -> List[Document]:
"""
Load data from user's calendar.
Args:
number_of_results (Optional[int]): the number of events to return. Defaults to 100.
start_date (Optional[Union[str, datetime.date]]): the start date to return events from in date isoformat. Defaults to today.
"""
from googleapiclient.discovery import build
credentials = self._get_credentials()
service = build("calendar", "v3", credentials=credentials)
if start_date is None:
start_date = datetime.date.today()
elif isinstance(start_date, str):
start_date = datetime.date.fromisoformat(start_date)
start_datetime = datetime.datetime.combine(start_date, datetime.time.min)
start_datetime_utc = start_datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
events_result = (
service.events()
.list(
calendarId="primary",
timeMin=start_datetime_utc,
maxResults=number_of_results,
singleEvents=True,
orderBy="startTime",
)
.execute()
)
events = events_result.get("items", [])
if not events:
return []
results = []
for event in events:
if "dateTime" in event["start"]:
start_time = event["start"]["dateTime"]
else:
start_time = event["start"]["date"]
if "dateTime" in event["end"]:
end_time = event["end"]["dateTime"]
else:
end_time = event["end"]["date"]
event_string = f"Status: {event['status']}, "
event_string += f"Summary: {event['summary']}, "
event_string += f"Start time: {start_time}, "
event_string += f"End time: {end_time}, "
organizer = event.get("organizer", {})
display_name = organizer.get("displayName", "N/A")
email = organizer.get("email", "N/A")
if display_name != "N/A":
event_string += f"Organizer: {display_name} ({email})"
else:
event_string += f"Organizer: {email}"
results.append(Document(text=event_string))
return results
def _get_credentials(self) -> Any:
"""
Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
if self.creds is not None:
return self.creds
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
creds = None
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", SCOPES
)
creds = flow.run_local_server(port=8080)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
def create_event(
self,
title: Optional[str] = None,
description: Optional[str] = None,
location: Optional[str] = None,
start_datetime: Optional[Union[str, datetime.datetime]] = None,
end_datetime: Optional[Union[str, datetime.datetime]] = None,
attendees: Optional[List[str]] = None,
) -> str:
"""
Create an event on the users calendar.
Args:
title (Optional[str]): The title for the event
description (Optional[str]): The description for the event
location (Optional[str]): The location for the event
start_datetime Optional[Union[str, datetime.datetime]]: The start datetime for the event
end_datetime Optional[Union[str, datetime.datetime]]: The end datetime for the event
attendees Optional[List[str]]: A list of email address to invite to the event
"""
from googleapiclient.discovery import build
credentials = self._get_credentials()
service = build("calendar", "v3", credentials=credentials)
attendees_list = (
[{"email": attendee} for attendee in attendees] if attendees else []
)
start_time = (
datetime.datetime.strptime(start_datetime, "%Y-%m-%dT%H:%M:%S%z")
.astimezone()
.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
)
end_time = (
datetime.datetime.strptime(end_datetime, "%Y-%m-%dT%H:%M:%S%z")
.astimezone()
.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
)
event = {
"summary": title,
"location": location,
"description": description,
"start": {
"dateTime": start_time,
},
"end": {
"dateTime": end_time,
},
"attendees": attendees_list,
}
event = service.events().insert(calendarId="primary", body=event).execute()
return (
"Your calendar event has been created successfully! You can move on to the"
" next step."
)
def get_date(self):
"""
A function to return todays date. Call this before any other functions if you are unaware of the date.
"""
return datetime.date.today()
| GoogleCalendarToolSpec |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 243807,
"end": 243930
} | class ____(Structure):
pass # opaque handle
c_nvmlGpmSample_t = POINTER(struct_c_nvmlGpmSample_t)
| struct_c_nvmlGpmSample_t |
python | django__django | tests/invalid_models_tests/test_models.py | {
"start": 23777,
"end": 32183
} | class ____(TestCase):
databases = {"default", "other"}
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField("self")
self.assertEqual(
Model.check(),
[
Error(
"Field names must not end with an underscore.",
obj=Model._meta.get_field("field_"),
id="fields.E001",
),
Error(
"Field names must not end with an underscore.",
obj=Model._meta.get_field("m2m_"),
id="fields.E001",
),
],
)
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(
max_column_name_length is None,
"The database doesn't have a column name length limit.",
)
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(
models.Model
):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn1",
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn2",
through="m2msimple",
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn3",
through="m2mcomplex",
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name="rn4",
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = "a" * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name,
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check(databases=("default", "other"))
# First error because of M2M field set on the model with long name.
m2m_long_name = (
"verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
)
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (
m2m_long_name,
self.max_column_name_length,
self.column_limit_db_alias,
),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id="models.E019",
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-generated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (
long_field_name,
self.max_column_name_length,
self.column_limit_db_alias,
),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id="models.E019",
)
)
self.assertEqual(errors, expected)
# Check for long column names is called only for specified database
# aliases.
self.assertEqual(ModelWithLongField.check(databases=None), [])
@unittest.skipIf(
max_column_name_length is None,
"The database doesn't have a column name length limit.",
)
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = "a" * (self.max_column_name_length + 1)
long_field_name2 = "b" * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(
ModelWithLongField, long_field_name
)
models.CharField(max_length=11, db_column="vlmn").contribute_to_class(
ModelWithLongField, long_field_name2
)
self.assertEqual(
ModelWithLongField.check(databases=("default", "other")),
[
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (
long_field_name,
self.max_column_name_length,
self.column_limit_db_alias,
),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id="models.E018",
)
],
)
# Check for long column names is called only for specified database
# aliases.
self.assertEqual(ModelWithLongField.check(databases=None), [])
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
self.assertEqual(
Model.check(),
[
Error(
'Field names must not contain "__".',
obj=Model._meta.get_field("some__field"),
id="fields.E002",
)
],
)
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
self.assertEqual(
Model.check(),
[
Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=Model._meta.get_field("pk"),
id="fields.E003",
)
],
)
def test_db_column_clash(self):
class Model(models.Model):
foo = models.IntegerField()
bar = models.IntegerField(db_column="foo")
self.assertEqual(
Model.check(),
[
Error(
"Field 'bar' has column name 'foo' that is used by "
"another field.",
hint="Specify a 'db_column' for the field.",
obj=Model,
id="models.E007",
)
],
)
@isolate_apps("invalid_models_tests")
| FieldNamesTests |
python | keras-team__keras | keras/src/layers/preprocessing/text_vectorization.py | {
"start": 561,
"end": 28161
} | class ____(Layer):
"""A preprocessing layer which maps text features to integer sequences.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one example = one string) into either a list
of token indices (one example = 1D tensor of integer token indices) or a
dense representation (one example = 1D tensor of float values representing
data about the example's tokens). This layer is meant to handle natural
language inputs. To handle simple string inputs (categorical strings or
pre-tokenized strings) see `kers_core.layers.StringLookup`.
The vocabulary for the layer must be either supplied on construction or
learned via `adapt()`. When this layer is adapted, it will analyze the
dataset, determine the frequency of individual string values, and create a
vocabulary from them. This vocabulary can have unlimited size or be capped,
depending on the configuration options for this layer; if there are more
unique values in the input than the maximum vocabulary size, the most
frequent terms will be used to create the vocabulary.
The processing of each example contains the following steps:
1. Standardize each example (usually lowercasing + punctuation stripping)
2. Split each example into substrings (usually words)
3. Recombine substrings into tokens (usually ngrams)
4. Index tokens (associate a unique int value with each token)
5. Transform each example using this index, either into a vector of ints or
a dense float vector.
Some notes on passing callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `keras.saving.register_keras_serializable`
for more details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`.
The callable should return a `tf.Tensor` of dtype `string`
with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to",
"split"], ["another", "string", "to", "split"]]`.
**Note:** This layer uses TensorFlow internally. It cannot
be used as part of the compiled computation graph of a model with
any backend other than TensorFlow.
It can however be used with any backend when running eagerly.
It can also always be used as part of an input preprocessing pipeline
with any backend (outside the model itself), which is how we recommend
to use this layer.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
max_tokens: Maximum size of the vocabulary for this layer. This should
only be specified when adapting a vocabulary or when setting
`pad_to_max_tokens=True`. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is
`(max_tokens - 1 - (1 if output_mode == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be:
- `None`: No standardization.
- `"lower_and_strip_punctuation"`: Text will be lowercased and all
punctuation removed.
- `"lower"`: Text will be lowercased.
- `"strip_punctuation"`: All punctuation will be removed.
- Callable: Inputs will passed to the callable function,
which should be standardized and returned.
split: Optional specification for splitting the input text.
Values can be:
- `None`: No splitting.
- `"whitespace"`: Split on whitespace.
- `"character"`: Split on each unicode character.
- Callable: Standardized inputs will passed to the callable
function, which should be split and returned.
ngrams: Optional specification for ngrams to create from the
possibly-split input text. Values can be `None`, an integer
or tuple of integers; passing an integer will create ngrams
up to that integer, and passing a tuple of integers will
create ngrams for the specified values in the tuple.
Passing `None` means that no ngrams will be created.
output_mode: Optional specification for the output of the layer.
Values can be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`,
configuring the layer as follows:
- `"int"`: Outputs integer indices, one integer index per split
string token. When `output_mode == "int"`,
0 is reserved for masked locations;
this reduces the vocab size to `max_tokens - 2`
instead of `max_tokens - 1`.
- `"multi_hot"`: Outputs a single int array per batch, of either
vocab_size or max_tokens size, containing 1s in all elements
where the token mapped to that index exists at least
once in the batch item.
- `"count"`: Like `"multi_hot"`, but the int array contains
a count of the number of times the token at that index
appeared in the batch item.
- `"tf_idf"`: Like `"multi_hot"`, but the TF-IDF algorithm
is applied to find the value in each token slot.
For `"int"` output, any shape of input and output is supported.
For all other output modes, currently only rank 1 inputs
(and rank 2 outputs after splitting) are supported.
output_sequence_length: Only valid in INT mode. If set, the output will
have its time dimension padded or truncated to exactly
`output_sequence_length` values, resulting in a tensor of shape
`(batch_size, output_sequence_length)` regardless of how many tokens
resulted from the splitting step. Defaults to `None`. If `ragged`
is `True` then `output_sequence_length` may still truncate the
output.
pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`,
and `"tf_idf"` modes. If `True`, the output will have
its feature axis padded to `max_tokens` even if the number
of unique tokens in the vocabulary is less than `max_tokens`,
resulting in a tensor of shape `(batch_size, max_tokens)`
regardless of vocabulary size. Defaults to `False`.
vocabulary: Optional. Either an array of strings or a string path to a
text file. If passing an array, can pass a tuple, list,
1D NumPy array, or 1D tensor containing the string vocabulary terms.
If passing a file path, the file should contain one line per term
in the vocabulary. If this argument is set,
there is no need to `adapt()` the layer.
idf_weights: Only valid when `output_mode` is `"tf_idf"`. A tuple, list,
1D NumPy array, or 1D tensor of the same length as the vocabulary,
containing the floating point inverse document frequency weights,
which will be multiplied by per sample term counts for
the final `tf_idf` weight. If the `vocabulary` argument is set,
and `output_mode` is `"tf_idf"`, this argument must be supplied.
ragged: Boolean. Only applicable to `"int"` output mode.
Only supported with TensorFlow backend.
If `True`, returns a `RaggedTensor` instead of a dense `Tensor`,
where each sequence may have a different length
after string splitting. Defaults to `False`.
sparse: Boolean. Only applicable to `"multi_hot"`, `"count"`, and
`"tf_idf"` output modes. Only supported with TensorFlow
backend. If `True`, returns a `SparseTensor`
instead of a dense `Tensor`. Defaults to `False`.
encoding: Optional. The text encoding to use to interpret the input
strings. Defaults to `"utf-8"`.
Examples:
This example instantiates a `TextVectorization` layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> max_tokens = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_tokens,
... output_mode='int',
... output_sequence_length=max_len)
>>> # Now that the vocab layer has been created, call `adapt` on the
>>> # list of strings to create the vocabulary.
>>> vectorize_layer.adapt(["foo bar", "bar baz", "baz bada boom"])
>>> # Now, the layer can map strings to integers -- you can use an
>>> # embedding layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> vectorize_layer(input_data)
array([[4, 1, 3, 0],
[1, 2, 0, 0]])
This example instantiates a `TextVectorization` layer by passing a list
of vocabulary terms to the layer's `__init__()` method.
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = keras.layers.TextVectorization(
... max_tokens=max_tokens,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]')
>>> # as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
def __init__(
self,
max_tokens=None,
standardize="lower_and_strip_punctuation",
split="whitespace",
ngrams=None,
output_mode="int",
output_sequence_length=None,
pad_to_max_tokens=False,
vocabulary=None,
idf_weights=None,
sparse=False,
ragged=False,
encoding="utf-8",
name=None,
**kwargs,
):
if not tf.available:
raise ImportError(
"Layer TextVectorization requires TensorFlow. "
"Install it via `pip install tensorflow`."
)
if sparse and backend.backend() != "tensorflow":
raise ValueError(
"`sparse=True` can only be used with the TensorFlow backend."
)
if ragged and backend.backend() != "tensorflow":
raise ValueError(
"`ragged=True` can only be used with the TensorFlow backend."
)
# 'standardize' must be one of
# (None, "lower_and_strip_punctuation", "lower", "strip_punctuation",
# callable)
argument_validation.validate_string_arg(
standardize,
allowable_strings=(
"lower_and_strip_punctuation",
"lower",
"strip_punctuation",
),
caller_name=self.__class__.__name__,
arg_name="standardize",
allow_none=True,
allow_callables=True,
)
# 'split' must be one of (None, "whitespace", "character", callable)
argument_validation.validate_string_arg(
split,
allowable_strings=("whitespace", "character"),
caller_name=self.__class__.__name__,
arg_name="split",
allow_none=True,
allow_callables=True,
)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = "multi_hot"
if output_mode == "tf-idf":
output_mode = "tf_idf"
argument_validation.validate_string_arg(
output_mode,
allowable_strings=(
"int",
"one_hot",
"multi_hot",
"count",
"tf_idf",
),
caller_name=self.__class__.__name__,
arg_name="output_mode",
)
# 'ngrams' must be one of (None, int, tuple(int))
if not (
ngrams is None
or isinstance(ngrams, int)
or isinstance(ngrams, tuple)
and all(isinstance(item, int) for item in ngrams)
):
raise ValueError(
"`ngrams` must be None, an integer, or a tuple of "
f"integers. Received: ngrams={ngrams}"
)
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is "int"".
if output_mode == "int" and not (
isinstance(output_sequence_length, int)
or (output_sequence_length is None)
):
raise ValueError(
"`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. Received: "
f"output_sequence_length={output_sequence_length}"
)
if output_mode != "int" and output_sequence_length is not None:
raise ValueError(
"`output_sequence_length` must not be set if `output_mode` is "
"not 'int'. "
f"Received output_sequence_length={output_sequence_length}."
)
if ragged and output_mode != "int":
raise ValueError(
"`ragged` must not be true if `output_mode` is "
f"`'int'`. Received: ragged={ragged} and "
f"output_mode={output_mode}"
)
self._max_tokens = max_tokens
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._ragged = ragged
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._encoding = encoding
# We save this hidden option to persist the fact
# that we have a non-adaptable layer with a
# manually set vocab.
self._has_input_vocabulary = kwargs.pop(
"has_input_vocabulary", (vocabulary is not None)
)
vocabulary_size = kwargs.pop("vocabulary_size", None)
super().__init__(name=name, **kwargs)
self._lookup_layer = StringLookup(
max_tokens=max_tokens,
vocabulary=vocabulary,
idf_weights=idf_weights,
pad_to_max_tokens=pad_to_max_tokens,
mask_token="",
output_mode=output_mode,
sparse=sparse,
has_input_vocabulary=self._has_input_vocabulary,
encoding=encoding,
vocabulary_size=vocabulary_size,
)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.supports_jit = False
@property
def compute_dtype(self):
return "string"
@property
def variable_dtype(self):
return "string"
def build(self, input_shape=None):
pass
def compute_output_shape(self, input_shape):
if self._output_mode == "int":
return (input_shape[0], self._output_sequence_length)
if self._split is None:
if len(input_shape) <= 1:
input_shape = tuple(input_shape) + (1,)
else:
input_shape = tuple(input_shape) + (None,)
return self._lookup_layer.compute_output_shape(input_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
if self._output_mode == "int":
output_dtype = "int64"
else:
output_dtype = backend.floatx()
return backend.KerasTensor(output_shape, dtype=output_dtype)
def adapt(self, data, batch_size=None, steps=None):
"""Computes a vocabulary of string terms from tokens in a dataset.
Calling `adapt()` on a `TextVectorization` layer is an alternative to
passing in a precomputed vocabulary on construction via the `vocabulary`
argument. A `TextVectorization` layer should always be either adapted
over a dataset or supplied with a vocabulary.
During `adapt()`, the layer will build a vocabulary of all string tokens
seen in the dataset, sorted by occurrence count, with ties broken by
sort order of the tokens (high to low). At the end of `adapt()`, if
`max_tokens` is set, the vocabulary will be truncated to `max_tokens`
size. For example, adapting a layer with `max_tokens=1000` will compute
the 1000 most frequent tokens occurring in the input dataset. If
`output_mode='tf-idf'`, `adapt()` will also learn the document
frequencies of each token in the input dataset.
Arguments:
data: The data to train on. It can be passed either as a
batched `tf.data.Dataset`, as a list of strings,
or as a NumPy array.
steps: Integer or `None`.
Total number of steps (batches of samples) to process.
If `data` is a `tf.data.Dataset`, and `steps` is `None`,
`adapt()` will run until the input dataset is exhausted.
When passing an infinitely
repeating dataset, you must specify the `steps` argument. This
argument is not supported with array inputs or list inputs.
"""
self.reset_state()
if isinstance(data, tf.data.Dataset):
if steps is not None:
data = data.take(steps)
for batch in data:
self.update_state(batch)
else:
data = tf_utils.ensure_tensor(data, dtype="string")
if data.shape.rank == 1:
# A plain list of strings
# is treated as as many documents
data = tf.expand_dims(data, -1)
self.update_state(data)
self.finalize_state()
def update_state(self, data):
self._lookup_layer.update_state(self._preprocess(data))
def finalize_state(self):
self._lookup_layer.finalize_state()
def reset_state(self):
self._lookup_layer.reset_state()
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If `True`, the returned vocabulary
will include the padding and OOV tokens,
and a term's index in the vocabulary will equal
the term's index when calling the layer. If `False`, the
returned vocabulary will not include any padding
or OOV tokens.
"""
return self._lookup_layer.get_vocabulary(include_special_tokens)
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional
mask and OOV indices.
"""
return self._lookup_layer.vocabulary_size()
def get_config(self):
config = {
"max_tokens": self._lookup_layer.max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._lookup_layer.pad_to_max_tokens,
"sparse": self._lookup_layer.sparse,
"ragged": self._ragged,
"vocabulary": listify_tensors(self._lookup_layer.input_vocabulary),
"idf_weights": listify_tensors(
self._lookup_layer.input_idf_weights
),
"encoding": self._encoding,
"vocabulary_size": self.vocabulary_size(),
}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config):
if not isinstance(config["standardize"], str):
config["standardize"] = serialization_lib.deserialize_keras_object(
config["standardize"]
)
if not isinstance(config["split"], str):
config["split"] = serialization_lib.deserialize_keras_object(
config["split"]
)
if isinstance(config["ngrams"], list):
config["ngrams"] = tuple(config["ngrams"])
return cls(**config)
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and IDF weights for this layer directly,
instead of analyzing a dataset through `adapt()`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is already present in the layer, this
method will replace it.
Args:
vocabulary: Either an array or a string path to a text file.
If passing an array, can pass a tuple, list, 1D NumPy array,
or 1D tensor containing the vocabulary terms.
If passing a file path, the file should contain one line
per term in the vocabulary.
idf_weights: A tuple, list, 1D NumPy array, or 1D tensor of inverse
document frequency weights with equal length to vocabulary.
Must be set if `output_mode` is `"tf_idf"`.
Should not be set otherwise.
"""
self._lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
def _preprocess(self, inputs):
inputs = tf_utils.ensure_tensor(inputs, dtype=tf.string)
if self._standardize in ("lower", "lower_and_strip_punctuation"):
inputs = tf.strings.lower(inputs)
if self._standardize in (
"strip_punctuation",
"lower_and_strip_punctuation",
):
inputs = tf.strings.regex_replace(
inputs, r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']', ""
)
if callable(self._standardize):
inputs = self._standardize(inputs)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension
# 1 and so can be squeezed out. We do this here instead of after
# splitting for performance reasons - it's more expensive to squeeze
# a ragged tensor.
if inputs.shape.rank > 1:
if inputs.shape[-1] != 1:
raise ValueError(
"When using `TextVectorization` to tokenize strings, "
"the input rank must be 1 or the last shape dimension "
f"must be 1. Received: inputs.shape={inputs.shape} "
f"with rank={inputs.shape.rank}"
)
else:
inputs = tf.squeeze(inputs, axis=-1)
if self._split == "whitespace":
# This treats multiple whitespaces as one whitespace, and strips
# leading and trailing whitespace.
inputs = tf.strings.split(inputs)
elif self._split == "character":
inputs = tf.strings.unicode_split(inputs, "UTF-8")
elif callable(self._split):
inputs = self._split(inputs)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however,
# does support both ragged and dense inputs.
if self._ngrams is not None:
inputs = tf.strings.ngrams(
inputs, ngram_width=self._ngrams, separator=" "
)
return inputs
def call(self, inputs):
if not isinstance(
inputs, (tf.Tensor, tf.RaggedTensor, np.ndarray, list, tuple)
):
inputs = tf.convert_to_tensor(backend.convert_to_numpy(inputs))
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
outputs = inputs
lookup_data = self._lookup_layer.call(inputs)
# For non-int output, we can return directly from the underlying layer.
if self._output_mode != "int":
return backend_utils.convert_tf_tensor(lookup_data)
# If we have a ragged tensor, we can pad during the conversion to dense.
if isinstance(lookup_data, tf.RaggedTensor) and not self._ragged:
shape = lookup_data.shape.as_list()
# If output sequence length is None, to_tensor will pad the last
# dimension to the bounding shape of the ragged dimension.
shape[-1] = self._output_sequence_length
outputs = lookup_data.to_tensor(default_value=0, shape=shape)
# If we have a dense tensor, we need to pad/trim directly.
elif self._output_sequence_length is not None:
# Maybe trim the output.
outputs = lookup_data[..., : self._output_sequence_length]
# Maybe pad the output. We need to be careful to use dynamic shape
# here as required_space_to_batch_paddings requires a fully known
# shape.
if not self._ragged:
shape = tf.shape(outputs)
padded_shape = tf.concat(
(shape[:-1], [self._output_sequence_length]), 0
)
padding, _ = tf.required_space_to_batch_paddings(
shape, padded_shape
)
outputs = tf.pad(outputs, padding)
# Because `tf.pad` used a dynamic shape, the output shape is
# dynamic. Apply the known static `_output_sequence_length`.
static_padded_shape = lookup_data.shape.as_list()
static_padded_shape[-1] = self._output_sequence_length
outputs.set_shape(static_padded_shape)
else:
outputs = lookup_data
return backend_utils.convert_tf_tensor(outputs)
def save_own_variables(self, store):
self._lookup_layer.save_own_variables(store)
def load_own_variables(self, store):
self._lookup_layer.load_own_variables(store)
def save_assets(self, dir_path):
self._lookup_layer.save_assets(dir_path)
def load_assets(self, dir_path):
self._lookup_layer.load_assets(dir_path)
| TextVectorization |
python | huggingface__transformers | src/transformers/models/mistral3/modeling_mistral3.py | {
"start": 2508,
"end": 4063
} | class ____(nn.Module):
"""
Learned merging of spatial_merge_size ** 2 patches
"""
def __init__(self, config: Mistral3Config):
super().__init__()
self.config = config
hidden_size = config.vision_config.hidden_size
self.spatial_merge_size = config.spatial_merge_size
self.patch_size = self.config.vision_config.patch_size
self.merging_layer = nn.Linear(hidden_size * self.spatial_merge_size**2, hidden_size, bias=False)
def forward(self, image_features: torch.Tensor, image_sizes: torch.Tensor) -> torch.Tensor:
image_sizes = [
(image_size[0] // self.patch_size, image_size[1] // self.patch_size) for image_size in image_sizes
]
tokens_per_image = [h * w for h, w in image_sizes]
d = image_features.shape[-1]
permuted_tensor = []
for image_index, image_tokens in enumerate(image_features.split(tokens_per_image)):
# Reshape image_tokens into a 2D grid
h, w = image_sizes[image_index]
image_grid = image_tokens.view(h, w, d).permute(2, 0, 1).unsqueeze(0)
grid = torch.nn.functional.unfold(
image_grid, kernel_size=self.spatial_merge_size, stride=self.spatial_merge_size
)
grid = grid.view(d * self.spatial_merge_size**2, -1).t()
permuted_tensor.append(grid)
image_features = torch.cat(permuted_tensor, dim=0)
image_features = self.merging_layer(image_features)
return image_features
| Mistral3PatchMerger |
python | bottlepy__bottle | bottle.py | {
"start": 98171,
"end": 98749
} | class ____(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self.default
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
new_app = push
@property
def default(self):
try:
return self[-1]
except IndexError:
return self.push()
| AppStack |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/service.py | {
"start": 4636,
"end": 7130
} | class ____(DelegatedBySiloMode["RpcService"]):
def __init__(
self,
base_service_cls: type[RpcService],
constructors: Mapping[SiloMode, Callable[[], RpcService]],
signatures: Mapping[str, RpcMethodSignature],
) -> None:
super().__init__(constructors)
self._base_service_cls = base_service_cls
self._signatures = signatures
@property
def local_mode(self) -> SiloMode:
return self._base_service_cls.local_mode
def __repr__(self) -> str:
return f"{type(self).__name__}({self._base_service_cls.__name__})"
def deserialize_rpc_arguments(
self, method_name: str, serial_arguments: ArgumentDict
) -> pydantic.BaseModel:
signature = self._signatures[method_name]
return signature.deserialize_arguments(serial_arguments)
def deserialize_rpc_response(self, method_name: str, serial_response: Any) -> Any:
signature = self._signatures[method_name]
return signature.deserialize_return_value(serial_response)
def get_all_signatures(self) -> Iterable[RpcMethodSignature]:
return self._signatures.values()
def rpc_method(method: Callable[..., _T]) -> Callable[..., _T]:
"""Decorate methods to be exposed as part of the RPC interface.
Should be applied only to methods of an RpcService subclass.
"""
setattr(method, _IS_RPC_METHOD_ATTR, True)
return method
def regional_rpc_method(
resolve: RegionResolutionStrategy,
return_none_if_mapping_not_found: bool = False,
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
"""Decorate methods to be exposed as part of the RPC interface.
In addition, resolves the region based on the resolve callback function.
Should be applied only to methods of an RpcService subclass.
The `return_none_if_mapping_not_found` option indicates that, if we fail to find
a region in which to look for the queried object, the decorated method should
return `None` indicating that the queried object does not exist. This should be
set only on methods with an `Optional[...]` return type.
"""
def decorator(method: Callable[..., _T]) -> Callable[..., _T]:
setattr(method, _REGION_RESOLUTION_ATTR, resolve)
setattr(method, _REGION_RESOLUTION_OPTIONAL_RETURN_ATTR, return_none_if_mapping_not_found)
return rpc_method(method)
return decorator
_global_service_registry: dict[str, DelegatingRpcService] = {}
| DelegatingRpcService |
python | pola-rs__polars | py-polars/src/polars/lazyframe/in_process.py | {
"start": 208,
"end": 1133
} | class ____:
"""
A placeholder for an in process query.
This can be used to do something else while a query is running.
The queries can be cancelled. You can peek if the query is finished,
or you can await the result.
"""
def __init__(self, ipq: PyInProcessQuery) -> None:
self._inner = ipq
def cancel(self) -> None:
"""Cancel the query at earliest convenience."""
self._inner.cancel()
def fetch(self) -> DataFrame | None:
"""
Fetch the result.
If it is ready, a materialized DataFrame is returned.
If it is not ready it will return `None`.
"""
if (out := self._inner.fetch()) is not None:
return wrap_df(out)
else:
return None
def fetch_blocking(self) -> DataFrame:
"""Await the result synchronously."""
return wrap_df(self._inner.fetch_blocking())
| InProcessQuery |
python | pytorch__pytorch | torch/export/unflatten.py | {
"start": 4102,
"end": 4474
} | class ____:
_ty: Optional[str]
def type_name(self) -> Optional[str]:
"""
Subclass of this class - InterpreterModule, InterpreterModuleDispatcher, represents
corresponding model in eager model. To get this type information for those modules
in eager model we need to use this method.
"""
return self._ty
| _SubmoduleBase |
python | pypa__warehouse | warehouse/accounts/models.py | {
"start": 1405,
"end": 1695
} | class ____:
def __init__(self, request):
self.request = request
def __getitem__(self, username):
try:
return self.request.db.query(User).filter(User.username == username).one()
except NoResultFound:
raise KeyError from None
| UserFactory |
python | mlflow__mlflow | mlflow/bedrock/stream.py | {
"start": 2583,
"end": 4434
} | class ____(BaseEventStreamWrapper):
"""A wrapper class for a event stream returned by the InvokeModelWithResponseStream API.
This wrapper intercepts streaming events from Bedrock's invoke_model_with_response_stream
API and accumulates token usage information across multiple chunks. It buffers partial
token usage data as it arrives and sets the final aggregated usage on the span when
the stream is exhausted.
Attributes:
_usage_buffer (dict): Internal buffer to accumulate token usage data from
streaming chunks. Uses TokenUsageKey constants as keys.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._usage_buffer = {}
def _buffer_token_usage_from_chunk(self, chunk: dict[str, Any]):
"""Buffer token usage from streaming chunk."""
if usage_data := _extract_token_usage_from_chunk(chunk):
for token_key, token_value in usage_data.items():
self._usage_buffer[token_key] = token_value
@capture_exception("Failed to handle event for the stream")
def _handle_event(self, span, event):
"""Process streaming event and buffer token usage."""
chunk = json.loads(event["chunk"]["bytes"])
self._span.add_event(SpanEvent(name=chunk["type"], attributes={"json": json.dumps(chunk)}))
# Buffer usage information from streaming chunks
self._buffer_token_usage_from_chunk(chunk)
def _close(self):
"""Set accumulated token usage on span and end it."""
# Build a standardized usage dict from buffered data using the utility function
if usage_data := parse_complete_token_usage_from_response(self._usage_buffer):
self._span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage_data)
self._end_span()
| InvokeModelStreamWrapper |
python | run-llama__llama_index | llama-index-core/llama_index/core/extractors/interface.py | {
"start": 517,
"end": 5578
} | class ____(TransformComponent):
"""Metadata extractor."""
is_text_node_only: bool = True
show_progress: bool = Field(default=True, description="Whether to show progress.")
metadata_mode: MetadataMode = Field(
default=MetadataMode.ALL, description="Metadata mode to use when reading nodes."
)
node_text_template: str = Field(
default=DEFAULT_NODE_TEXT_TEMPLATE,
description="Template to represent how node text is mixed with metadata text.",
)
disable_template_rewrite: bool = Field(
default=False, description="Disable the node template rewrite."
)
in_place: bool = Field(
default=True, description="Whether to process nodes in place."
)
num_workers: int = Field(
default=4,
description="Number of workers to use for concurrent async processing.",
)
@classmethod
def from_dict(cls, data: Dict[str, Any], **kwargs: Any) -> Self: # type: ignore
if isinstance(kwargs, dict):
data.update(kwargs)
data.pop("class_name", None)
llm_predictor = data.get("llm_predictor")
if llm_predictor:
from llama_index.core.llm_predictor.loading import load_predictor
llm_predictor = load_predictor(llm_predictor)
data["llm_predictor"] = llm_predictor
llm = data.get("llm")
if llm:
from llama_index.core.llms.loading import load_llm
llm = load_llm(llm)
data["llm"] = llm
return cls(**data)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "MetadataExtractor"
@abstractmethod
async def aextract(self, nodes: Sequence[BaseNode]) -> List[Dict]:
"""
Extracts metadata for a sequence of nodes, returning a list of
metadata dictionaries corresponding to each node.
Args:
nodes (Sequence[Document]): nodes to extract metadata from
"""
def extract(self, nodes: Sequence[BaseNode]) -> List[Dict]:
"""
Extracts metadata for a sequence of nodes, returning a list of
metadata dictionaries corresponding to each node.
Args:
nodes (Sequence[Document]): nodes to extract metadata from
"""
return asyncio_run(self.aextract(nodes))
async def aprocess_nodes(
self,
nodes: Sequence[BaseNode],
excluded_embed_metadata_keys: Optional[List[str]] = None,
excluded_llm_metadata_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> List[BaseNode]:
"""
Post process nodes parsed from documents.
Allows extractors to be chained.
Args:
nodes (List[BaseNode]): nodes to post-process
excluded_embed_metadata_keys (Optional[List[str]]):
keys to exclude from embed metadata
excluded_llm_metadata_keys (Optional[List[str]]):
keys to exclude from llm metadata
"""
if self.in_place:
new_nodes = nodes
else:
new_nodes = [deepcopy(node) for node in nodes]
cur_metadata_list = await self.aextract(new_nodes)
for idx, node in enumerate(new_nodes):
node.metadata.update(cur_metadata_list[idx])
for idx, node in enumerate(new_nodes):
if excluded_embed_metadata_keys is not None:
node.excluded_embed_metadata_keys.extend(excluded_embed_metadata_keys)
if excluded_llm_metadata_keys is not None:
node.excluded_llm_metadata_keys.extend(excluded_llm_metadata_keys)
if not self.disable_template_rewrite:
if isinstance(node, TextNode):
cast(TextNode, node).text_template = self.node_text_template
return new_nodes # type: ignore
def process_nodes(
self,
nodes: Sequence[BaseNode],
excluded_embed_metadata_keys: Optional[List[str]] = None,
excluded_llm_metadata_keys: Optional[List[str]] = None,
**kwargs: Any,
) -> List[BaseNode]:
return asyncio_run(
self.aprocess_nodes(
nodes,
excluded_embed_metadata_keys=excluded_embed_metadata_keys,
excluded_llm_metadata_keys=excluded_llm_metadata_keys,
**kwargs,
)
)
def __call__(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[BaseNode]:
"""
Post process nodes parsed from documents.
Allows extractors to be chained.
Args:
nodes (List[BaseNode]): nodes to post-process
"""
return self.process_nodes(nodes, **kwargs)
async def acall(self, nodes: Sequence[BaseNode], **kwargs: Any) -> List[BaseNode]:
"""
Post process nodes parsed from documents.
Allows extractors to be chained.
Args:
nodes (List[BaseNode]): nodes to post-process
"""
return await self.aprocess_nodes(nodes, **kwargs)
| BaseExtractor |
python | kamyu104__LeetCode-Solutions | Python/find-the-highest-altitude.py | {
"start": 29,
"end": 296
} | class ____(object):
def largestAltitude(self, gain):
"""
:type gain: List[int]
:rtype: int
"""
result = curr = 0
for g in gain:
curr += g
result = max(result, curr)
return result
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_proguard_artifact_releases.py | {
"start": 202,
"end": 8087
} | class ____(APITestCase):
def test_create_proguard_artifact_release_successfully(self) -> None:
project = self.create_project(name="foo")
proguard_uuid = "660f839b-8bfd-580d-9a7c-ea339a6c9867"
url = reverse(
"sentry-api-0-proguard-artifact-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
data = {
"release_name": "test@1.0.0",
"proguard_uuid": proguard_uuid,
}
file = File.objects.create(
name="proguard.txt", type="default", headers={"Content-Type": "text/plain"}
)
ProjectDebugFile.objects.create(
file=file,
object_name="proguard.txt",
cpu_name="x86",
project_id=project.id,
debug_id=proguard_uuid,
)
self.login_as(user=self.user)
response = self.client.post(url, data=data, format="json")
assert response.status_code == 201, response.content
assert ProguardArtifactRelease.objects.count() == 1
proguard_artifact_release = ProguardArtifactRelease.objects.get()
assert proguard_artifact_release.organization_id == project.organization.id
assert proguard_artifact_release.project_id == project.id
def test_create_proguard_artifact_release_with_missing_fields(self) -> None:
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-proguard-artifact-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
data_missing_uuid = {
"release_name": "test@1.0.0",
}
data_missing_release_name = {
"proguard_uuid": "660f839b-8bfd-580d-9a7c-ea339a6c9867",
}
data_missing_all: dict[str, str] = {}
self.login_as(user=self.user)
response = self.client.post(url, data=data_missing_uuid, format="json")
assert response.status_code == 400, response.content
assert response.data == {"error": "Missing required fields: proguard_uuid"}
response = self.client.post(url, data=data_missing_release_name, format="json")
assert response.status_code == 400, response.content
assert response.data == {"error": "Missing required fields: release_name"}
response = self.client.post(url, data=data_missing_all, format="json")
assert response.status_code == 400, response.content
assert response.data == {"error": "Missing required fields: release_name, proguard_uuid"}
def test_create_proguard_artifact_release_with_conflicting_release_name(self) -> None:
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-proguard-artifact-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
data = {
"release_name": "test@1.0.0",
"proguard_uuid": "660f839b-8bfd-580d-9a7c-ea339a6c9867",
}
file = File.objects.create(
name="proguard.txt", type="default", headers={"Content-Type": "text/plain"}
)
project_debug_file = ProjectDebugFile.objects.create(
file=file,
object_name="proguard.txt",
cpu_name="x86",
project_id=project.id,
debug_id="660f839b-8bfd-580d-9a7c-ea339a6c9867",
)
ProguardArtifactRelease.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release_name=data["release_name"],
proguard_uuid=data["proguard_uuid"],
project_debug_file=project_debug_file,
)
self.login_as(user=self.user)
response = self.client.post(url, data=data)
assert response.status_code == 409, response.content
assert response.data == {
"error": "Proguard artifact release with this name in this project already exists."
}
def test_list_proguard_artifact_releases_with_uuid_successfully(self) -> None:
project = self.create_project(name="foo")
proguard_uuid = "660f839b-8bfd-580d-9a7c-ea339a6c9867"
file = File.objects.create(
name="proguard.txt", type="default", headers={"Content-Type": "text/plain"}
)
project_debug_file = ProjectDebugFile.objects.create(
file=file,
object_name="proguard.txt",
cpu_name="x86",
project_id=project.id,
debug_id=proguard_uuid,
)
ProguardArtifactRelease.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release_name="test@1.0.0",
proguard_uuid=proguard_uuid,
project_debug_file=project_debug_file,
)
url = reverse(
"sentry-api-0-proguard-artifact-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
self.login_as(user=self.user)
response = self.client.get(url, {"proguard_uuid": proguard_uuid})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert list(response.data["releases"]) == ["test@1.0.0"]
def test_create_proguard_artifact_release_with_non_existent_uuid(self) -> None:
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-proguard-artifact-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
data = {
"release_name": "test@1.0.0",
"proguard_uuid": "660f839b-8bfd-580d-9a7c-ea339a6ccccc",
}
file = File.objects.create(
name="proguard.txt", type="default", headers={"Content-Type": "text/plain"}
)
ProjectDebugFile.objects.create(
file=file,
object_name="proguard.txt",
cpu_name="x86",
project_id=project.id,
debug_id="660f839b-8bfd-580d-9a7c-ea339a6cbbbb",
)
self.login_as(user=self.user)
response = self.client.post(url, data=data)
assert response.status_code == 400, response.content
assert response.data == {"error": "No matching proguard mapping file with this uuid found"}
def test_create_proguard_artifact_release_with_invalid_uuid(self) -> None:
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-proguard-artifact-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
data = {
"release_name": "test@1.0.0",
"proguard_uuid": "invalid-uuid",
}
file = File.objects.create(
name="proguard.txt", type="default", headers={"Content-Type": "text/plain"}
)
ProjectDebugFile.objects.create(
file=file,
object_name="proguard.txt",
cpu_name="x86",
project_id=project.id,
debug_id="660f839b-8bfd-580d-9a7c-ea339a6cbbbb",
)
self.login_as(user=self.user)
response = self.client.post(url, data=data)
assert response.status_code == 400, response.content
assert response.data == {"error": "Invalid proguard_uuid"}
| ProguardArtifactReleasesEndpointTest |
python | scipy__scipy | scipy/datasets/tests/test_data.py | {
"start": 817,
"end": 4213
} | class ____:
@pytest.fixture(scope='module', autouse=True)
def test_download_all(self):
# This fixture requires INTERNET CONNECTION
# test_setup phase
download_all()
yield
@pytest.mark.fail_slow(10)
def test_existence_all(self):
assert len(os.listdir(data_dir)) >= len(registry)
def test_ascent(self):
assert_equal(ascent().shape, (512, 512))
# hash check
assert _has_hash(os.path.join(data_dir, "ascent.dat"),
registry["ascent.dat"])
def test_face(self):
assert_equal(face().shape, (768, 1024, 3))
# hash check
assert _has_hash(os.path.join(data_dir, "face.dat"),
registry["face.dat"])
def test_electrocardiogram(self):
# Test shape, dtype and stats of signal
ecg = electrocardiogram()
assert_equal(ecg.dtype, float)
assert_equal(ecg.shape, (108000,))
assert_almost_equal(ecg.mean(), -0.16510875)
assert_almost_equal(ecg.std(), 0.5992473991177294)
# hash check
assert _has_hash(os.path.join(data_dir, "ecg.dat"),
registry["ecg.dat"])
def test_clear_cache(tmp_path):
# Note: `tmp_path` is a pytest fixture, it handles cleanup
thread_basepath = tmp_path / str(get_ident())
thread_basepath.mkdir()
dummy_basepath = thread_basepath / "dummy_cache_dir"
dummy_basepath.mkdir()
# Create three dummy dataset files for dummy dataset methods
dummy_method_map = {}
for i in range(4):
dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
data_filepath = dummy_basepath / f"data{i}.dat"
data_filepath.write_text("")
# clear files associated to single dataset method data0
# also test callable argument instead of list of callables
def data0():
pass
_clear_cache(datasets=data0, cache_dir=dummy_basepath,
method_map=dummy_method_map)
assert not os.path.exists(dummy_basepath/"data0.dat")
# clear files associated to multiple dataset methods "data3" and "data4"
def data1():
pass
def data2():
pass
_clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
method_map=dummy_method_map)
assert not os.path.exists(dummy_basepath/"data1.dat")
assert not os.path.exists(dummy_basepath/"data2.dat")
# clear multiple dataset files "data3_0.dat" and "data3_1.dat"
# associated with dataset method "data3"
def data4():
pass
# create files
(dummy_basepath / "data4_0.dat").write_text("")
(dummy_basepath / "data4_1.dat").write_text("")
dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
_clear_cache(datasets=[data4], cache_dir=dummy_basepath,
method_map=dummy_method_map)
assert not os.path.exists(dummy_basepath/"data4_0.dat")
assert not os.path.exists(dummy_basepath/"data4_1.dat")
# wrong dataset method should raise ValueError since it
# doesn't exist in the dummy_method_map
def data5():
pass
with pytest.raises(ValueError):
_clear_cache(datasets=[data5], cache_dir=dummy_basepath,
method_map=dummy_method_map)
# remove all dataset cache
_clear_cache(datasets=None, cache_dir=dummy_basepath)
assert not os.path.exists(dummy_basepath)
| TestDatasets |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/coroutines1.py | {
"start": 733,
"end": 1332
} | class ____:
def __aenter__(self):
return self
@coroutine
def __await__(self) -> Generator[Any, None, int]:
yield 3
return 3
async def __aexit__(
self,
t: Optional[type] = None,
exc: Optional[BaseException] = None,
tb: Optional[Any] = None,
) -> bool:
return True
async def consumer2():
a = ScopedClass1()
# This should generate two errors because
# there is no __enter__ or __exit__ method on ScopedClass1.
with a as b:
needs_int(b)
async with a as b:
needs_int(b)
| ScopedClass1 |
python | huggingface__transformers | src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py | {
"start": 7599,
"end": 10933
} | class ____(GraniteMoeSharedModel):
def __init__(self, config: GraniteMoeHybridConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[GraniteMoeHybridDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.embedding_multiplier = config.embedding_multiplier
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[GraniteFlashAttentionKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
inputs_embeds = inputs_embeds * self.embedding_multiplier
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
self.config,
inputs_embeds,
attention_mask,
cache_position,
past_key_values,
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
# embed positions
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
# Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention)
layer_mask = mamba_mask if decoder_layer.layer_type == "mamba" else causal_mask
hidden_states = decoder_layer(
hidden_states,
attention_mask=layer_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
mamba_mask = None
return mamba_mask
| GraniteMoeHybridModel |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 32600,
"end": 33737
} | class ____(datetime.time, AnsibleTaggedObject):
__slots__ = _ANSIBLE_TAGGED_OBJECT_SLOTS
@classmethod
def _instance_factory(cls, value: datetime.time, tags_mapping: _AnsibleTagsMapping) -> _AnsibleTaggedTime:
instance = cls(
hour=value.hour,
minute=value.minute,
second=value.second,
microsecond=value.microsecond,
tzinfo=value.tzinfo,
fold=value.fold,
)
instance._ansible_tags_mapping = tags_mapping
return instance
def _native_copy(self) -> datetime.time:
return datetime.time(
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond,
tzinfo=self.tzinfo,
fold=self.fold,
)
def __new__(cls, hour, *args, **kwargs):
return super()._new(hour, *args, **kwargs)
def __reduce_ex__(self, protocol: t.SupportsIndex) -> tuple:
return super()._reduce(super().__reduce_ex__(protocol))
def __repr__(self) -> str:
return self._native_copy().__repr__()
| _AnsibleTaggedTime |
python | facelessuser__pymdown-extensions | pymdownx/superfences.py | {
"start": 6057,
"end": 10241
} | class ____(Extension):
"""SuperFences code block extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.superfences = []
self.config = {
'disable_indented_code_blocks': [False, "Disable indented code blocks - Default: False"],
'custom_fences': [[], 'Specify custom fences. Default: See documentation.'],
'css_class': [
'',
"Set class name for wrapper element. The default of CodeHilite or Highlight will be used"
"if nothing is set. - "
"Default: ''"
],
'preserve_tabs': [False, "Preserve tabs in fences - Default: False"],
'relaxed_headers': [False, "Relaxed fenced code headers - Default: False"]
}
super().__init__(*args, **kwargs)
def extend_super_fences(self, name, formatter, validator):
"""Extend SuperFences with the given name, language, and formatter."""
obj = {
"name": name,
"test": functools.partial(_test, test_language=name),
"formatter": formatter,
"validator": validator
}
if name == '*':
self.superfences[0] = obj
else:
self.superfences.append(obj)
def extendMarkdown(self, md):
"""Add fenced block preprocessor to the Markdown instance."""
# Not super yet, so let's make it super
md.registerExtension(self)
config = self.getConfigs()
# Default fenced blocks
self.superfences.insert(
0,
{
"name": "superfences",
"test": _test,
"formatter": None,
"validator": functools.partial(_validator, validator=highlight_validator)
}
)
# Custom Fences
custom_fences = config.get('custom_fences', [])
for custom in custom_fences:
name = custom.get('name')
class_name = custom.get('class')
fence_format = custom.get('format', fence_code_format)
validator = custom.get('validator', default_validator)
if name is not None and class_name is not None:
self.extend_super_fences(
name,
functools.partial(_formatter, class_name=class_name, _fmt=fence_format),
functools.partial(_validator, validator=validator)
)
self.md = md
self.patch_fenced_rule()
self.stash = CodeStash()
def patch_fenced_rule(self):
"""
Patch Python Markdown with our own fenced block extension.
We don't attempt to protect against a user loading the `fenced_code` extension with this.
Most likely they will have issues, but they shouldn't have loaded them together in the first place :).
"""
config = self.getConfigs()
fenced = SuperFencesBlockPreprocessor(self.md)
fenced.config = config
fenced.extension = self
if self.superfences[0]['name'] == "superfences":
self.superfences[0]["formatter"] = fenced.highlight
self.md.preprocessors.register(fenced, "fenced_code_block", 25)
indented_code = SuperFencesCodeBlockProcessor(self.md.parser)
indented_code.config = config
indented_code.extension = self
self.md.parser.blockprocessors.register(indented_code, "code", 80)
if config["preserve_tabs"]:
# Need to squeeze in right after critic.
raw_fenced = SuperFencesRawBlockPreprocessor(self.md)
raw_fenced.config = config
raw_fenced.extension = self
self.md.preprocessors.register(raw_fenced, "fenced_raw_block", 31.05)
self.md.registerExtensions(["pymdownx._bypassnorm"], {})
# Add the highlight extension, but do so in a disabled state so we can just retrieve default configurations
self.md.registerExtensions(["pymdownx.highlight"], {"pymdownx.highlight": {"_enabled": False}})
def reset(self):
"""Clear the stash."""
self.stash.clear_stash()
| SuperFencesCodeExtension |
python | walkccc__LeetCode | solutions/385. Mini Parser/385.py | {
"start": 0,
"end": 643
} | class ____:
def deserialize(self, s: str) -> NestedInteger:
if s[0] != '[':
return NestedInteger(int(s))
stack = []
for i, c in enumerate(s):
if c == '[':
stack.append(NestedInteger())
start = i + 1
elif c == ',':
if i > start:
num = int(s[start:i])
stack[-1].add(NestedInteger(num))
start = i + 1
elif c == ']':
popped = stack.pop()
if i > start:
num = int(s[start:i])
popped.add(NestedInteger(num))
if stack:
stack[-1].add(popped)
else:
return popped
start = i + 1
| Solution |
python | readthedocs__readthedocs.org | readthedocs/embed/views.py | {
"start": 358,
"end": 900
} | class ____(EmbedAPIMixin, APIView):
permission_classes = [AllowAny]
renderer_classes = [JSONRenderer]
def get(self, request):
return Response(
{
"error": (
"Embed API v2 has been deprecated and is no longer available, please use embed API v3 instead. "
"Read our blog post for more information: https://about.readthedocs.com/blog/2024/11/embed-api-v2-deprecated/."
)
},
status=status.HTTP_410_GONE,
)
| EmbedAPI |
python | great-expectations__great_expectations | great_expectations/core/serializer.py | {
"start": 2189,
"end": 2699
} | class ____(AbstractConfigSerializer):
@override
def serialize(self, obj: AbstractConfig) -> dict:
"""Serialize config to json dict.
Args:
obj: AbstractConfig object to serialize.
Returns:
Representation of object as a dict suitable for serializing to json.
"""
config: dict = self.schema.dump(obj)
json_serializable_dict: dict = convert_to_json_serializable(data=config)
return json_serializable_dict
| JsonConfigSerializer |
python | redis__redis-py | redis/commands/search/field.py | {
"start": 3593,
"end": 3818
} | class ____(Field):
"""
GeoField is used to define a geo-indexing field in a schema definition
"""
def __init__(self, name: str, **kwargs):
Field.__init__(self, name, args=[Field.GEO], **kwargs)
| GeoField |
python | jina-ai__jina | jina/orchestrate/flow/asyncio.py | {
"start": 134,
"end": 2175
} | class ____(AsyncPostMixin, AsyncProfileMixin, AsyncHealthCheckMixin, Flow):
"""
Asynchronous version of :class:`jina.Flow`. They share the same interface, except
in :class:`AsyncFlow` :meth:`train`, :meth:`index`, :meth:`search` methods are coroutines
(i.e. declared with the async/await syntax), simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an eventloop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncFlow` can be very useful in
the integration settings, where Jina/Jina Flow is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Flow`
is controlling and wrapping the eventloop internally, making the Flow looks synchronous from outside.
In particular, :class:`AsyncFlow` makes Jina usage in Jupyter Notebook more natural and reliable.
For example, the following code
will use the eventloop that already spawned in Jupyter/ipython to run Jina Flow (instead of creating a new one).
.. highlight:: python
.. code-block:: python
from jina import AsyncFlow
from jina.types.document.generators import from_ndarray
import numpy as np
with AsyncFlow().add() as f:
await f.index(from_ndarray(np.random.random([5, 4])), on_done=print)
Notice that the above code will NOT work in standard Python REPL, as only Jupyter/ipython implements "autoawait".
.. seealso::
Asynchronous in REPL: Autoawait
https://ipython.readthedocs.io/en/stable/interactive/autoawait.html
Another example is when using Jina as an integration. Say you have another IO-bounded job ``heavylifting()``, you
can use this feature to schedule Jina ``index()`` and ``heavylifting()`` concurrently.
One can think of :class:`Flow` as Jina-managed eventloop, whereas :class:`AsyncFlow` is self-managed eventloop.
"""
| AsyncFlow |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial007_py310.py | {
"start": 144,
"end": 329
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: set[str] = set()
images: list[Image] | None = None
| Item |
python | pytorch__pytorch | test/test_serialization.py | {
"start": 40407,
"end": 40526
} | class ____(ClassThatUsesBuildInstructionAllSlots):
x: int
y: int
c: str
| ClassThatUsesBuildInstructionSomeSlots |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 32747,
"end": 33309
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.transform = EntityPredictionHeadTransform(config)
self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
@auto_docstring
| EntityPredictionHead |
python | encode__django-rest-framework | rest_framework/permissions.py | {
"start": 4298,
"end": 4630
} | class ____(BasePermission):
"""
The request is authenticated as a user, or is a read-only request.
"""
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated
)
| IsAuthenticatedOrReadOnly |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/bedrock.py | {
"start": 12398,
"end": 17116
} | class ____(AwsBaseOperator[BedrockHook]):
"""
Create a fine-tuning job to customize a base model.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BedrockCreateProvisionedModelThroughputOperator`
:param model_units: Number of model units to allocate. (templated)
:param provisioned_model_name: Unique name for this provisioned throughput. (templated)
:param model_id: Name or ARN of the model to associate with this provisioned throughput. (templated)
:param create_throughput_kwargs: Any optional parameters to pass to the API.
:param wait_for_completion: Whether to wait for cluster to stop. (default: True)
:param waiter_delay: Time in seconds to wait between status checks. (default: 60)
:param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 20)
:param deferrable: If True, the operator will wait asynchronously for the cluster to stop.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = BedrockHook
template_fields: Sequence[str] = aws_template_fields(
"model_units",
"provisioned_model_name",
"model_id",
)
def __init__(
self,
model_units: int,
provisioned_model_name: str,
model_id: str,
create_throughput_kwargs: dict[str, Any] | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 60,
waiter_max_attempts: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.model_units = model_units
self.provisioned_model_name = provisioned_model_name
self.model_id = model_id
self.create_throughput_kwargs = create_throughput_kwargs or {}
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
def execute(self, context: Context) -> str:
provisioned_model_id = self.hook.conn.create_provisioned_model_throughput(
modelUnits=self.model_units,
provisionedModelName=self.provisioned_model_name,
modelId=self.model_id,
**self.create_throughput_kwargs,
)["provisionedModelArn"]
if self.deferrable:
self.log.info("Deferring for provisioned throughput.")
self.defer(
trigger=BedrockProvisionModelThroughputCompletedTrigger(
provisioned_model_id=provisioned_model_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
if self.wait_for_completion:
self.log.info("Waiting for provisioned throughput.")
self.hook.get_waiter("provisioned_model_throughput_complete").wait(
provisionedModelId=provisioned_model_id,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
return provisioned_model_id
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error while running job: {validated_event}")
self.log.info(
"Bedrock provisioned throughput job `%s` complete.", validated_event["provisioned_model_id"]
)
return validated_event["provisioned_model_id"]
| BedrockCreateProvisionedModelThroughputOperator |
python | django__django | tests/gis_tests/utils.py | {
"start": 954,
"end": 2398
} | class ____:
"""Assert that Func expressions aren't mutated during their as_sql()."""
def setUp(self):
def as_sql_wrapper(original_as_sql):
def inner(*args, **kwargs):
func = original_as_sql.__self__
# Resolve output_field before as_sql() so touching it in
# as_sql() won't change __dict__.
func.output_field
__dict__original = copy.deepcopy(func.__dict__)
result = original_as_sql(*args, **kwargs)
msg = (
"%s Func was mutated during compilation." % func.__class__.__name__
)
self.assertEqual(func.__dict__, __dict__original, msg)
return result
return inner
def __getattribute__(self, name):
if name != vendor_impl:
return __getattribute__original(self, name)
try:
as_sql = __getattribute__original(self, vendor_impl)
except AttributeError:
as_sql = __getattribute__original(self, "as_sql")
return as_sql_wrapper(as_sql)
vendor_impl = "as_" + connection.vendor
__getattribute__original = Func.__getattribute__
func_patcher = mock.patch.object(Func, "__getattribute__", __getattribute__)
func_patcher.start()
self.addCleanup(func_patcher.stop)
super().setUp()
| FuncTestMixin |
python | kamyu104__LeetCode-Solutions | Python/find-the-most-common-response.py | {
"start": 83,
"end": 424
} | class ____(object):
def findCommonResponse(self, responses):
"""
:type responses: List[List[str]]
:rtype: str
"""
cnt = collections.defaultdict(int)
for r in responses:
for x in set(r):
cnt[x] += 1
return min((-c, x) for x, c in cnt.iteritems())[1]
| Solution |
python | conda__conda | conda/models/enums.py | {
"start": 1684,
"end": 2559
} | class ____(Enum):
"""
Refers to if the file in question is hard linked or soft linked. Originally designed to be used
in paths.json
"""
hardlink = "hardlink"
softlink = "softlink"
directory = "directory"
# these additional types should not be included by conda-build in packages
linked_package_record = (
"linked_package_record" # a package's .json file in conda-meta
)
pyc_file = "pyc_file"
unix_python_entry_point = "unix_python_entry_point"
windows_python_entry_point_script = "windows_python_entry_point_script"
windows_python_entry_point_exe = "windows_python_entry_point_exe"
@classproperty
def basic_types(self):
return (PathType.hardlink, PathType.softlink, PathType.directory)
def __str__(self):
return self.name
def __json__(self):
return self.name
| PathType |
python | jazzband__django-redis | tests/test_client.py | {
"start": 582,
"end": 2162
} | class ____:
def test_close_client_disconnect_default(
self,
cache_client: DefaultClient,
mocker: MockerFixture,
):
cache_client._options.clear()
mock = mocker.patch.object(cache_client.connection_factory, "disconnect")
cache_client.close()
assert not mock.called
def test_close_disconnect_settings(
self,
cache_client: DefaultClient,
settings: SettingsWrapper,
mocker: MockerFixture,
):
with override_settings(DJANGO_REDIS_CLOSE_CONNECTION=True):
mock = mocker.patch.object(cache_client.connection_factory, "disconnect")
cache_client.close()
assert mock.called
def test_close_disconnect_settings_cache(
self,
cache_client: DefaultClient,
mocker: MockerFixture,
settings: SettingsWrapper,
):
caches = settings.CACHES
caches[DEFAULT_CACHE_ALIAS]["OPTIONS"]["CLOSE_CONNECTION"] = True
with override_settings(CACHES=caches):
cache_client.set("TestClientClose", 0)
mock = mocker.patch.object(cache_client.connection_factory, "disconnect")
cache_client.close()
assert mock.called
def test_close_disconnect_client_options(
self,
cache_client: DefaultClient,
mocker: MockerFixture,
):
cache_client._options["CLOSE_CONNECTION"] = True
mock = mocker.patch.object(cache_client.connection_factory, "disconnect")
cache_client.close()
assert mock.called
| TestClientClose |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/__init__.py | {
"start": 2382,
"end": 2553
} | class ____:
"""Foo"""
class Inner:
"""Foo"""
def meth(self):
"""Foo"""
# should be documented as an alias
factory = dict
| Outer |
python | cython__cython | Cython/Shadow.py | {
"start": 12728,
"end": 16473
} | class ____(CythonType):
__getitem__ = index_type
def fused_type(*args):
if not args:
raise TypeError("Expected at least one type as argument")
# Find the numeric type with biggest rank if all types are numeric
rank = -1
for type in args:
if type not in (py_int, py_long, py_float, py_complex):
break
if type_ordering.index(type) > rank:
result_type = type
else:
return result_type
# Not a simple numeric type, return a fused type instance. The result
# isn't really meant to be used, as we can't keep track of the context in
# pure-mode. Casting won't do anything in this case.
return _FusedType()
def _specialized_from_args(signatures, args, kwargs):
"Perhaps this should be implemented in a TreeFragment in Cython code"
raise Exception("yet to be implemented")
py_int = typedef(int, "int")
py_long = typedef(int, "long") # for legacy Py2 code only
py_float = typedef(float, "float")
py_complex = typedef(complex, "double complex")
# Predefined types
int_types = [
'char',
'short',
'Py_UNICODE',
'int',
'Py_UCS4',
'long',
'longlong',
'Py_hash_t',
'Py_ssize_t',
'size_t',
'ssize_t',
'ptrdiff_t',
]
float_types = [
'longdouble',
'double',
'float',
]
complex_types = [
'longdoublecomplex',
'doublecomplex',
'floatcomplex',
'complex',
]
other_types = [
'bint',
'void',
'Py_tss_t',
]
to_repr = {
'longlong': 'long long',
'longdouble': 'long double',
'longdoublecomplex': 'long double complex',
'doublecomplex': 'double complex',
'floatcomplex': 'float complex',
}.get
gs = globals()
gs['unicode'] = typedef(str, 'unicode')
for name in int_types:
reprname = to_repr(name, name)
gs[name] = typedef(py_int, reprname)
if name not in ('Py_UNICODE', 'Py_UCS4', 'Py_hash_t', 'ptrdiff_t') and not name.endswith('size_t'):
gs['u'+name] = typedef(py_int, "unsigned " + reprname)
gs['s'+name] = typedef(py_int, "signed " + reprname)
for name in float_types:
gs[name] = typedef(py_float, to_repr(name, name))
for name in complex_types:
gs[name] = typedef(py_complex, to_repr(name, name))
del name, reprname
bint = typedef(bool, "bint")
void = typedef(None, "void")
Py_tss_t = typedef(None, "Py_tss_t")
# Generate const types.
for t in int_types + float_types + complex_types + other_types:
for t in (t, f'u{t}', f's{t}'):
if t in gs:
gs[f"const_{t}"] = const(gs[t], t)
# Generate pointer types: p_int, p_const_char, etc.
for i in range(1, 4):
for const_ in ('', 'const_'):
for t in int_types:
for t in (t, f'u{t}', f's{t}'):
if t in gs:
gs[f"{'p'*i}_{const_}{t}"] = pointer(gs[f"{'p'*(i-1)}{'_' if i > 1 else ''}{const_}{t}"])
for t in float_types + complex_types:
gs[f"{'p'*i}_{const_}{t}"] = pointer(gs[f"{'p'*(i-1)}{'_' if i > 1 else ''}{const_}{t}"])
gs[f"{'p'*i}_const_bint"] = pointer(gs[f"{'p'*(i-1)}{'_' if i > 1 else ''}const_bint"])
for t in other_types:
gs[f"{'p'*i}_{t}"] = pointer(gs[f"{'p'*(i-1)}{'_' if i > 1 else ''}{t}"])
del t, const_, i
NULL = gs['p_void'](0)
del gs
def __getattr__(name):
# looks like 'gs' has some users out there by now...
if name == 'gs':
import warnings
warnings.warn(
"'gs' is not a publicly exposed name in cython.*. Use vars() or globals() instead.",
DeprecationWarning)
return globals()
raise AttributeError(f"'cython' has no attribute {name!r}")
integral = floating = numeric = _FusedType()
type_ordering = [py_int, py_long, py_float, py_complex]
| _FusedType |
python | walkccc__LeetCode | solutions/412. Fizz Buzz/412.py | {
"start": 0,
"end": 179
} | class ____:
def fizzBuzz(self, n: int) -> list[str]:
d = {3: 'Fizz', 5: 'Buzz'}
return [''.join([d[k] for k in d if i % k == 0]) or str(i) for i in range(1, n + 1)]
| Solution |
python | huggingface__transformers | tests/models/owlvit/test_modeling_owlvit.py | {
"start": 7268,
"end": 10591
} | class ____:
def __init__(
self,
parent,
batch_size=12,
num_queries=4,
seq_length=16,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
num_hidden_layers=12,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=16,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_queries = num_queries
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size * self.num_queries, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size * self.num_queries, self.seq_length])
if input_mask is not None:
num_text, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(num_text,))
for idx, start_index in enumerate(rnd_start_indices):
input_mask[idx, :start_index] = 1
input_mask[idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return OwlViTTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = OwlViTTextModel(config=config).to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids=input_ids, attention_mask=input_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size * self.num_queries, self.seq_length, self.hidden_size)
)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_queries, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| OwlViTTextModelTester |
python | scipy__scipy | scipy/signal/tests/test_ltisys.py | {
"start": 26184,
"end": 26980
} | class ____:
def test_lti_instantiation(self):
# Test that lti can be instantiated with sequences, scalars.
# See PR-225.
# TransferFunction
s = lti([1], [-1])
assert isinstance(s, TransferFunction)
assert isinstance(s, lti)
assert not isinstance(s, dlti)
assert s.dt is None
# ZerosPolesGain
s = lti(np.array([]), np.array([-1]), 1)
assert isinstance(s, ZerosPolesGain)
assert isinstance(s, lti)
assert not isinstance(s, dlti)
assert s.dt is None
# StateSpace
s = lti([], [-1], 1)
s = lti([1], [-1], 1, 3)
assert isinstance(s, StateSpace)
assert isinstance(s, lti)
assert not isinstance(s, dlti)
assert s.dt is None
| TestLti |
python | langchain-ai__langchain | libs/cli/langchain_cli/integration_template/integration_template/document_loaders.py | {
"start": 176,
"end": 2112
} | class ____(BaseLoader):
# TODO: Replace all TODOs in docstring. See example docstring:
# https://github.com/langchain-ai/langchain/blob/869523ad728e6b76d77f170cce13925b4ebc3c1e/libs/community/langchain_community/document_loaders/recursive_url_loader.py#L54
"""
__ModuleName__ document loader integration
# TODO: Replace with relevant packages, env vars.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Replace with relevant init params.
Instantiate:
```python
from langchain_community.document_loaders import __ModuleName__Loader
loader = __ModuleName__Loader(
# required params = ...
# other params = ...
)
```
Lazy load:
```python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
```
```python
TODO: Example output
```
# TODO: Delete if async load is not implemented
Async load:
```python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
```
```python
TODO: Example output
```
"""
# TODO: This method must be implemented to load documents.
# Do not implement load(), a default implementation is already available.
def lazy_load(self) -> Iterator[Document]:
raise NotImplementedError()
# TODO: Implement if you would like to change default BaseLoader implementation
# async def alazy_load(self) -> AsyncIterator[Document]:
| __ModuleName__Loader |
python | numpy__numpy | numpy/f2py/tests/test_return_character.py | {
"start": 138,
"end": 817
} | class ____(util.F2PyTest):
def check_function(self, t, tname):
if tname in ["t0", "t1", "s0", "s1"]:
assert t("23") == b"2"
r = t("ab")
assert r == b"a"
r = t(array("ab"))
assert r == b"a"
r = t(array(77, "u1"))
assert r == b"M"
elif tname in ["ts", "ss"]:
assert t(23) == b"23"
assert t("123456789abcdef") == b"123456789a"
elif tname in ["t5", "s5"]:
assert t(23) == b"23"
assert t("ab") == b"ab"
assert t("123456789abcdef") == b"12345"
else:
raise NotImplementedError
| TestReturnCharacter |
python | milvus-io__pymilvus | pymilvus/orm/schema.py | {
"start": 24620,
"end": 30420
} | class ____:
def __init__(self):
self.name = ""
self._kwargs = {}
self._fields = []
self._description = ""
self._type_params = {}
# max_capacity will be set when added to CollectionSchema
self.max_capacity = None
def _check_kwargs(self):
"""Check struct-level kwargs."""
def _check_fields(self):
"""Check struct fields restrictions."""
if not self._fields:
raise ParamError(message="Struct field must have at least one field")
for field in self._fields:
if field.is_primary:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot be primary key"
)
if field.is_partition_key:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot be partition key"
)
if field.is_clustering_key:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot be clustering key"
)
if field.is_dynamic:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot be dynamic field"
)
if field.nullable:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot be nullable"
)
if field.auto_id:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot have auto_id"
)
if hasattr(field, "default_value") and field.default_value is not None:
raise ParamError(
message=f"Field '{field.name}' in struct '{self.name}' cannot have default value"
)
# Check field name uniqueness
field_names = [f.name for f in self._fields]
if len(field_names) != len(set(field_names)):
duplicate_names = [name for name in field_names if field_names.count(name) > 1]
raise ParamError(
message=f"Duplicate field names in struct '{self.name}': {set(duplicate_names)}"
)
def add_field(self, field_name: str, datatype: DataType, **kwargs):
if datatype in {DataType.ARRAY, DataType._ARRAY_OF_VECTOR, DataType.STRUCT}:
raise ParamError(
message="Struct field schema does not support Array, ArrayOfVector or Struct"
)
field = FieldSchema(field_name, datatype, **kwargs)
self._fields.append(field)
return self
@property
def fields(self):
return self._fields
@property
def description(self):
return self._description
@property
def params(self):
return self._type_params
@property
def dtype(self) -> DataType:
return DataType.STRUCT
def to_dict(self):
"""Convert StructFieldSchema to dictionary representation."""
struct_dict = {
"name": self.name,
"description": self._description,
"fields": [field.to_dict() for field in self._fields],
}
# Include max_capacity if it's set
if self.max_capacity is not None:
struct_dict["max_capacity"] = self.max_capacity
# Include type_params if not empty
if self._type_params:
struct_dict["params"] = copy.deepcopy(self._type_params)
return struct_dict
@classmethod
def construct_from_dict(cls, raw: Dict):
"""Construct StructFieldSchema from dictionary.
The input can be either:
1. User-friendly format (from convert_struct_fields_to_user_format)
2. Direct struct field format with sub-fields
"""
# Create empty instance
instance = cls()
# Set name and description
instance.name = raw.get("name", "")
instance._description = raw.get("description", "")
# Extract max_capacity if present
if "max_capacity" in raw:
instance.max_capacity = raw["max_capacity"]
elif (
"params" in raw and isinstance(raw["params"], dict) and "max_capacity" in raw["params"]
):
instance.max_capacity = raw["params"]["max_capacity"]
# Extract type_params from params dict
if "params" in raw and isinstance(raw["params"], dict):
for key, value in raw["params"].items():
if key != "max_capacity": # max_capacity is already handled above
instance._type_params[key] = value
# Build fields list
fields = []
if "struct_fields" in raw:
# User format from convert_struct_fields_to_user_format
for field_dict in raw["struct_fields"]:
field_kwargs = {}
if field_dict.get("params"):
field_kwargs.update(field_dict["params"])
field = FieldSchema(
name=field_dict["name"],
dtype=field_dict["type"],
description=field_dict.get("description", ""),
**field_kwargs,
)
fields.append(field)
elif "fields" in raw:
# Direct format with FieldSchema dicts
for field_raw in raw["fields"]:
if isinstance(field_raw, dict):
fields.append(FieldSchema.construct_from_dict(field_raw))
elif isinstance(field_raw, FieldSchema):
fields.append(field_raw)
instance._fields = [copy.deepcopy(field) for field in fields]
return instance
| StructFieldSchema |
python | PyCQA__pylint | tests/functional/u/unbalanced/unbalanced_tuple_unpacking.py | {
"start": 1698,
"end": 2944
} | class ____:
"""Test unbalanced tuple unpacking in instance attributes."""
# pylint: disable=attribute-defined-outside-init, invalid-name, too-few-public-methods
def test(self):
"""unpacking in instance attributes"""
# we're not sure if temp() returns two or three values
# so we shouldn't emit an error
self.a, self.b = temp()
self.a, self.b = temp2()
self.a, self.b = unpack() # [unbalanced-tuple-unpacking]
def issue329(*args):
"""Don't emit unbalanced tuple unpacking if the
rhs of the assignment is a variable-length argument,
because we don't know the actual length of the tuple.
"""
first, second, third = args
return first, second, third
def test_decimal():
"""Test a false positive with decimal.Decimal.as_tuple
See astroid https://bitbucket.org/logilab/astroid/issues/92/
"""
from decimal import Decimal
dec = Decimal(2)
first, second, third = dec.as_tuple()
return first, second, third
def test_issue_559():
"""Test that we don't have a false positive wrt to issue #559."""
from ctypes import c_int
root_x, root_y, win_x, win_y = [c_int()] * 4
return root_x, root_y, win_x, win_y
| UnbalancedUnpacking |
python | pyqtgraph__pyqtgraph | benchmarks/renderImageItem.py | {
"start": 923,
"end": 8085
} | class ____:
unit = "seconds"
param_names = ["size", "acceleration", "use_levels", "dtype", "channels", "lut_length"]
params = Parameters(
[
# (256, 256), # other sizes useful to test for
# (512, 512), # seeing performance scale
# (1024, 1024), # but not helpful for tracking history
# (2048, 2048), # so we test the most taxing size only
# (3072, 3072),
(4096, 4096)
], # size
["numpy"], # acceleration
[True, False], # use_levels
['uint8', 'uint16', 'float32'], # dtype
[1, 3, 4], # channels
['uint8', 'uint16', None] # lut_length
)
def __init__(self):
self.data = np.empty((), dtype=np.uint8)
self.lut = np.empty((), dtype=np.ubyte)
# No need to add acceleration that isn't available
if numba is not None:
self.params.acceleration.append('numba')
if cp is not None:
self.params.acceleration.append('cupy')
self.levels = None
def teardown(self, *args, **kwargs):
# toggle options off
pg.setConfigOption("useNumba", False)
pg.setConfigOption("useCupy", False)
def setup_cache(self) -> dict:
accelerations = [np]
if cp is not None:
accelerations.append(cp)
cache = {}
for xp in accelerations:
cache[xp.__name__] = {"lut": {}, "data": {}}
random_generator = xp.random.default_rng(42) # answer to everything
# handle lut caching
c_map = xp.array([[-500.0, 255.0], [-255.0, 255.0], [0.0, 500.0]])
for lut_length in self.params.lut_lengths:
if lut_length is None:
continue
bits = xp.dtype(lut_length).itemsize * 8
# create the LUT
lut = xp.zeros((2 ** bits, 4), dtype="ubyte")
for i in range(3):
lut[:, i] = xp.clip(xp.linspace(c_map[i][0], c_map[i][1], 2 ** bits), 0, 255)
lut[:, -1] = 255
cache[xp.__name__]["lut"][lut_length] = lut
# handle data caching
for dtype in self.params.dtypes:
cache[xp.__name__]["data"][dtype] = {}
for channels in self.params.channels:
cache[xp.__name__]["data"][dtype][channels] = {}
for size in self.params.sizes:
size_with_channels = (size[0], size[1], channels) if channels != 1 else size
if xp.dtype(dtype) in (xp.float32, xp.float64):
data = random_generator.standard_normal(
size=size_with_channels,
dtype=dtype
)
else:
iinfo = xp.iinfo(dtype)
data = random_generator.integers(
low=iinfo.min,
high=iinfo.max,
size=size_with_channels,
dtype=dtype,
endpoint=True
)
cache[xp.__name__]["data"][dtype][channels][size] = data
return cache
def setup(
self,
cache: dict,
size: tuple[int, int],
acceleration: str,
use_levels: bool,
dtype: npt.DTypeLike,
channels: int,
lut_length: typing.Optional[npt.DTypeLike]
):
xp = np
if acceleration == "numba":
if numba is None:
# if numba is not available, skip it...
raise NotImplementedError("numba not available")
pg.setConfigOption("useNumba", True)
elif acceleration == "cupy":
if cp is None:
# if cupy is not available, skip it...
raise NotImplementedError("cupy not available")
pg.setConfigOption("useCupy", True)
xp = cp # use cupy instead of numpy
# does it even make sense to have a LUT with multiple channels?
if lut_length is not None and channels != 1:
raise NotImplementedError(
f"{lut_length=} and {channels=} not implemented. LUT with multiple channels not supported."
)
# skip when the code paths bypass makeARGB
if acceleration != "numpy":
if xp.dtype(dtype) == xp.ubyte and not use_levels:
if lut_length is None:
# Grayscale8, RGB888 or RGB[AX]8888
raise NotImplementedError(
f"{dtype=} and {use_levels=} not tested for {acceleration=} with {lut_length=}"
)
elif channels == 1 and xp.dtype(lut_length) == xp.uint8:
# Indexed8
raise NotImplementedError(
f"{dtype=} and {use_levels=} not tested with {acceleration=} for {channels=} and {lut_length=}"
)
elif xp.dtype(dtype) == xp.uint16 and not use_levels and lut_length is None:
if channels == 1:
# Grayscale16
raise NotImplementedError(
f"{dtype=} {use_levels=} {lut_length=} and {channels=} not tested for {acceleration=}"
)
elif channels == 4:
# RGBA64
raise NotImplementedError(
f"{dtype=} {use_levels=} {lut_length=} and {channels=} not tested with {acceleration=}"
)
if use_levels:
if xp.dtype(dtype) == xp.float32:
self.levels = (-4.0, 4.0)
elif xp.dtype(dtype) == xp.uint16:
self.levels = (250, 3000)
elif xp.dtype(dtype) == xp.uint8:
self.levels = (20, 220)
else:
raise ValueError(
"dtype needs to be one of {'float32', 'uint8', 'uint16'}"
)
elif xp.dtype(dtype) in (xp.float32, xp.float64):
# float images always need levels
raise NotImplementedError(
f"{use_levels=} {dtype=} is not supported. Float images always need levels."
)
else:
self.levels = None
if lut_length is None:
self.lut = None
else:
self.lut = cache[xp.__name__]["lut"][lut_length]
self.data = cache[xp.__name__]["data"][dtype][channels][size]
if acceleration in {"numba", "cupy"}:
prime(self.data, self.lut, self.levels)
def time_test(self, *args, **kwargs):
kwargs = {}
if self.lut is not None:
kwargs["lut"] = self.lut
if self.levels is not None:
kwargs["levels"] = self.levels
renderQImage(self.data, **kwargs)
| TimeSuite |
python | dask__distributed | distributed/utils_test.py | {
"start": 72547,
"end": 78528
} | class ____(Nanny):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.in_kill = asyncio.Event()
self.wait_kill = asyncio.Event()
async def kill(self, **kwargs):
self.in_kill.set()
await self.wait_kill.wait()
return await super().kill(**kwargs)
async def wait_for_state(
key: Key,
state: str | Collection[str],
dask_worker: Worker | Scheduler,
*,
interval: float = 0.01,
) -> None:
"""Wait for a task to appear on a Worker or on the Scheduler and to be in a specific
state or one of a set of possible states.
"""
tasks: Mapping[Key, SchedulerTaskState | WorkerTaskState]
if isinstance(dask_worker, Worker):
tasks = dask_worker.state.tasks
elif isinstance(dask_worker, Scheduler):
tasks = dask_worker.tasks
else:
raise TypeError(dask_worker) # pragma: nocover
if isinstance(state, str):
state = (state,)
state_str = repr(next(iter(state))) if len(state) == 1 else str(state)
try:
while key not in tasks or tasks[key].state not in state:
await asyncio.sleep(interval)
except (asyncio.CancelledError, asyncio.TimeoutError):
if key in tasks:
msg = (
f"tasks[{key!r}].state={tasks[key].state!r} on {dask_worker.address}; "
f"expected state={state_str}"
)
else:
msg = f"tasks[{key!r}] not found on {dask_worker.address}"
# 99% of the times this is triggered by @gen_cluster timeout, so raising the
# message as an exception wouldn't work.
print(msg)
raise
async def wait_for_stimulus(
type_: type[StateMachineEvent] | tuple[type[StateMachineEvent], ...],
dask_worker: Worker,
*,
interval: float = 0.01,
**matches: Any,
) -> StateMachineEvent:
"""Wait for a specific stimulus to appear in the log of the WorkerState."""
log = dask_worker.state.stimulus_log
last_ev = None
while True:
if log and log[-1] is not last_ev:
last_ev = log[-1]
for ev in log:
if not isinstance(ev, type_):
continue
if all(getattr(ev, k) == v for k, v in matches.items()):
return ev
await asyncio.sleep(interval)
@pytest.fixture
def ws():
"""An empty WorkerState"""
with dask.config.set({"distributed.admin.low-level-log-length": None}):
state = WorkerState(address="127.0.0.1:1", transition_counter_max=50_000)
yield state
if state.validate:
state.validate_state()
@pytest.fixture(params=["executing", "long-running"])
def ws_with_running_task(ws, request):
"""A WorkerState running a single task 'x' with resources {R: 1}.
The task may or may not raise secede(); the tests using this fixture runs twice.
"""
ws.available_resources = {"R": 1}
ws.total_resources = {"R": 1}
instructions = ws.handle_stimulus(
ComputeTaskEvent.dummy(
key="x", resource_restrictions={"R": 1}, stimulus_id="compute"
)
)
assert instructions == [Execute(key="x", stimulus_id="compute")]
if request.param == "long-running":
ws.handle_stimulus(
SecedeEvent(key="x", compute_duration=1.0, stimulus_id="secede")
)
assert ws.tasks["x"].state == request.param
yield ws
@pytest.fixture()
def name_of_test(request):
return f"{request.node.nodeid}"
@pytest.fixture()
def requires_default_ports(name_of_test):
start = time()
@contextmanager
def _bind_port(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", port))
s.listen(1)
yield s
default_ports = [8786]
while time() - start < _TEST_TIMEOUT:
try:
with contextlib.ExitStack() as stack:
for port in default_ports:
stack.enter_context(_bind_port(port=port))
break
except OSError as err:
if err.errno == errno.EADDRINUSE:
print(
f"Address already in use. Waiting before running test {name_of_test}"
)
sleep(1)
continue
else:
raise TimeoutError(f"Default ports didn't open up in time for {name_of_test}")
yield
async def fetch_metrics_body(port: int) -> str:
http_client = AsyncHTTPClient()
response = await http_client.fetch(f"http://localhost:{port}/metrics")
assert response.code == 200
return response.body.decode("utf8")
async def fetch_metrics(port: int, prefix: str | None = None) -> dict[str, Any]:
from prometheus_client.parser import text_string_to_metric_families
txt = await fetch_metrics_body(port)
families = {
family.name: family
for family in text_string_to_metric_families(txt)
if prefix is None or family.name.startswith(prefix)
}
return families
async def fetch_metrics_sample_names(port: int, prefix: str | None = None) -> set[str]:
"""
Get all the names of samples returned by Prometheus.
This mostly matches list of metric families, but when there's `foo` (gauge) and `foo_total` (count)
these will both have `foo` as the family.
"""
from prometheus_client.parser import text_string_to_metric_families
txt = await fetch_metrics_body(port)
sample_names = set().union(
*[
{sample.name for sample in family.samples}
for family in text_string_to_metric_families(txt)
if prefix is None or family.name.startswith(prefix)
]
)
return sample_names
def _get_gc_overhead():
class _CustomObject:
def __sizeof__(self):
return 0
return sys.getsizeof(_CustomObject())
_size_obj = _get_gc_overhead()
| BlockedKillNanny |
python | pypa__pip | tests/unit/test_resolution_legacy_resolver.py | {
"start": 845,
"end": 2441
} | class ____(BaseDistribution):
def __init__(self, metadata: email.message.Message) -> None:
self._canonical_name = cast(NormalizedName, "my-project")
self._metadata = metadata
def __str__(self) -> str:
return f"<distribution {self.canonical_name!r}>"
@property
def canonical_name(self) -> NormalizedName:
return self._canonical_name
@property
def metadata(self) -> email.message.Message:
return self._metadata
def make_fake_dist(
*, klass: type[BaseDistribution] = FakeDist, requires_python: str | None = None
) -> BaseDistribution:
metadata = email.message.Message()
metadata["Name"] = "my-project"
if requires_python is not None:
metadata["Requires-Python"] = requires_python
# Too many arguments for "BaseDistribution"
return klass(metadata) # type: ignore[call-arg]
def make_test_resolver(
monkeypatch: pytest.MonkeyPatch,
mock_candidates: list[InstallationCandidate],
) -> Resolver:
def _find_candidates(project_name: str) -> list[InstallationCandidate]:
return mock_candidates
finder = make_test_finder()
monkeypatch.setattr(finder, "find_all_candidates", _find_candidates)
return Resolver(
finder=finder,
preparer=mock.Mock(), # Not used.
make_install_req=install_req_from_line,
wheel_cache=None,
use_user_site=False,
force_reinstall=False,
ignore_dependencies=False,
ignore_installed=False,
ignore_requires_python=False,
upgrade_strategy="to-satisfy-only",
)
| FakeDist |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qgroupnorm_test.py | {
"start": 306,
"end": 1504
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, dims, num_groups, dtype):
X = (torch.rand(*dims) - 0.5) * 256
num_channels = dims[1]
scale = 1.0
zero_point = 0
self.inputs = {
"qX": torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype
),
"num_groups": num_groups,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5,
"Y_scale": 0.1,
"Y_zero_point": 0,
}
def forward(
self,
qX,
num_groups: int,
weight,
bias,
eps: float,
Y_scale: float,
Y_zero_point: int,
):
return torch.ops.quantized.group_norm(
qX,
num_groups,
weight=weight,
bias=bias,
eps=eps,
output_scale=Y_scale,
output_zero_point=Y_zero_point,
)
op_bench.generate_pt_test(groupnorm_configs_short, QGroupNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| QGroupNormBenchmark |
python | PrefectHQ__prefect | src/prefect/settings/models/server/services.py | {
"start": 8921,
"end": 13936
} | class ____(ServicesBaseSetting):
"""
Settings for controlling the scheduler service
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "services", "scheduler")
)
enabled: bool = Field(
default=True,
description="Whether or not to start the scheduler service in the server application.",
validation_alias=AliasChoices(
AliasPath("enabled"),
"prefect_server_services_scheduler_enabled",
"prefect_api_services_scheduler_enabled",
),
)
loop_seconds: float = Field(
default=60,
description="""
The scheduler loop interval, in seconds. This determines
how often the scheduler will attempt to schedule new flow runs, but has no
impact on how quickly either flow runs or task runs are actually executed.
Defaults to `60`.
""",
validation_alias=AliasChoices(
AliasPath("loop_seconds"),
"prefect_server_services_scheduler_loop_seconds",
"prefect_api_services_scheduler_loop_seconds",
),
)
deployment_batch_size: int = Field(
default=100,
description="""
The number of deployments the scheduler will attempt to
schedule in a single batch. If there are more deployments than the batch
size, the scheduler immediately attempts to schedule the next batch; it
does not sleep for `scheduler_loop_seconds` until it has visited every
deployment once. Defaults to `100`.
""",
validation_alias=AliasChoices(
AliasPath("deployment_batch_size"),
"prefect_server_services_scheduler_deployment_batch_size",
"prefect_api_services_scheduler_deployment_batch_size",
),
)
max_runs: int = Field(
default=100,
description="""
The scheduler will attempt to schedule up to this many
auto-scheduled runs in the future. Note that runs may have fewer than
this many scheduled runs, depending on the value of
`scheduler_max_scheduled_time`. Defaults to `100`.
""",
validation_alias=AliasChoices(
AliasPath("max_runs"),
"prefect_server_services_scheduler_max_runs",
"prefect_api_services_scheduler_max_runs",
),
)
min_runs: int = Field(
default=3,
description="""
The scheduler will attempt to schedule at least this many
auto-scheduled runs in the future. Note that runs may have more than
this many scheduled runs, depending on the value of
`scheduler_min_scheduled_time`. Defaults to `3`.
""",
validation_alias=AliasChoices(
AliasPath("min_runs"),
"prefect_server_services_scheduler_min_runs",
"prefect_api_services_scheduler_min_runs",
),
)
max_scheduled_time: timedelta = Field(
default=timedelta(days=100),
description="""
The scheduler will create new runs up to this far in the
future. Note that this setting will take precedence over
`scheduler_max_runs`: if a flow runs once a month and
`scheduler_max_scheduled_time` is three months, then only three runs will be
scheduled. Defaults to 100 days (`8640000` seconds).
""",
validation_alias=AliasChoices(
AliasPath("max_scheduled_time"),
"prefect_server_services_scheduler_max_scheduled_time",
"prefect_api_services_scheduler_max_scheduled_time",
),
)
min_scheduled_time: timedelta = Field(
default=timedelta(hours=1),
description="""
The scheduler will create new runs at least this far in the
future. Note that this setting will take precedence over `scheduler_min_runs`:
if a flow runs every hour and `scheduler_min_scheduled_time` is three hours,
then three runs will be scheduled even if `scheduler_min_runs` is 1. Defaults to
""",
validation_alias=AliasChoices(
AliasPath("min_scheduled_time"),
"prefect_server_services_scheduler_min_scheduled_time",
"prefect_api_services_scheduler_min_scheduled_time",
),
)
insert_batch_size: int = Field(
default=500,
description="""
The number of runs the scheduler will attempt to insert in a single batch.
Defaults to `500`.
""",
validation_alias=AliasChoices(
AliasPath("insert_batch_size"),
"prefect_server_services_scheduler_insert_batch_size",
"prefect_api_services_scheduler_insert_batch_size",
),
)
recent_deployments_loop_seconds: float = Field(
default=5,
description="""
The number of seconds the recent deployments scheduler will wait between checking for recently updated deployments. Defaults to `5`.
""",
)
| ServerServicesSchedulerSettings |
python | pydantic__pydantic | pydantic/types.py | {
"start": 9569,
"end": 13239
} | class ____(_fields.PydanticMetadata):
"""A field metadata class to indicate that a field should allow `-inf`, `inf`, and `nan`.
Use this class as an annotation via [`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated), as seen below.
Attributes:
allow_inf_nan: Whether to allow `-inf`, `inf`, and `nan`. Defaults to `True`.
Example:
```python
from typing import Annotated
from pydantic.types import AllowInfNan
LaxFloat = Annotated[float, AllowInfNan()]
```
"""
allow_inf_nan: bool = True
def __hash__(self) -> int:
return hash(self.allow_inf_nan)
def confloat(
*,
strict: bool | None = None,
gt: float | None = None,
ge: float | None = None,
lt: float | None = None,
le: float | None = None,
multiple_of: float | None = None,
allow_inf_nan: bool | None = None,
) -> type[float]:
"""
!!! warning "Discouraged"
This function is **discouraged** in favor of using
[`Annotated`](https://docs.python.org/3/library/typing.html#typing.Annotated) with
[`Field`][pydantic.fields.Field] instead.
This function will be **deprecated** in Pydantic 3.0.
The reason is that `confloat` returns a type, which doesn't play well with static analysis tools.
=== ":x: Don't do this"
```python
from pydantic import BaseModel, confloat
class Foo(BaseModel):
bar: confloat(strict=True, gt=0)
```
=== ":white_check_mark: Do this"
```python
from typing import Annotated
from pydantic import BaseModel, Field
class Foo(BaseModel):
bar: Annotated[float, Field(strict=True, gt=0)]
```
A wrapper around `float` that allows for additional constraints.
Args:
strict: Whether to validate the float in strict mode.
gt: The value must be greater than this.
ge: The value must be greater than or equal to this.
lt: The value must be less than this.
le: The value must be less than or equal to this.
multiple_of: The value must be a multiple of this.
allow_inf_nan: Whether to allow `-inf`, `inf`, and `nan`.
Returns:
The wrapped float type.
```python
from pydantic import BaseModel, ValidationError, confloat
class ConstrainedExample(BaseModel):
constrained_float: confloat(gt=1.0)
m = ConstrainedExample(constrained_float=1.1)
print(repr(m))
#> ConstrainedExample(constrained_float=1.1)
try:
ConstrainedExample(constrained_float=0.9)
except ValidationError as e:
print(e.errors())
'''
[
{
'type': 'greater_than',
'loc': ('constrained_float',),
'msg': 'Input should be greater than 1',
'input': 0.9,
'ctx': {'gt': 1.0},
'url': 'https://errors.pydantic.dev/2/v/greater_than',
}
]
'''
```
""" # noqa: D212
return Annotated[ # pyright: ignore[reportReturnType]
float,
Strict(strict) if strict is not None else None,
annotated_types.Interval(gt=gt, ge=ge, lt=lt, le=le),
annotated_types.MultipleOf(multiple_of) if multiple_of is not None else None,
AllowInfNan(allow_inf_nan) if allow_inf_nan is not None else None,
]
PositiveFloat = Annotated[float, annotated_types.Gt(0)]
"""A float that must be greater than zero.
```python
from pydantic import BaseModel, PositiveFloat, ValidationError
| AllowInfNan |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 1673,
"end": 3245
} | class ____(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
Formula:
```python
loss = mean(square(y_true - y_pred))
```
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="mean_squared_error",
dtype=None,
):
super().__init__(
mean_squared_error, name=name, reduction=reduction, dtype=dtype
)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.MeanAbsoluteError")
| MeanSquaredError |
python | great-expectations__great_expectations | great_expectations/data_context/types/resource_identifiers.py | {
"start": 17673,
"end": 18210
} | class ____(Schema):
configuration_key = fields.Str()
# noinspection PyUnusedLocal
@post_load
def make_configuration_identifier(self, data, **kwargs):
return ConfigurationIdentifier(**data)
expectationSuiteIdentifierSchema = ExpectationSuiteIdentifierSchema()
validationResultIdentifierSchema = ValidationResultIdentifierSchema()
runIdentifierSchema = RunIdentifierSchema()
batchIdentifierSchema = BatchIdentifierSchema()
configurationIdentifierSchema = ConfigurationIdentifierSchema()
| ConfigurationIdentifierSchema |
python | optuna__optuna | optuna/terminator/terminator.py | {
"start": 669,
"end": 875
} | class ____(metaclass=abc.ABCMeta):
"""Base class for terminators."""
@abc.abstractmethod
def should_terminate(self, study: Study) -> bool:
pass
@experimental_class("3.2.0")
| BaseTerminator |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/serialization/serialized_data.py | {
"start": 4239,
"end": 4365
} | class ____:
asset_key: AssetKey
mapped_tasks: AbstractSet[TaskHandle]
@whitelist_for_serdes
@record
| KeyScopedTaskHandles |
python | TheAlgorithms__Python | data_structures/linked_list/doubly_linked_list_two.py | {
"start": 649,
"end": 815
} | class ____[DataType]:
data: DataType
previous: Self | None = None
next: Self | None = None
def __str__(self) -> str:
return f"{self.data}"
| Node |
python | graphql-python__graphene | graphene/relay/connection.py | {
"start": 4191,
"end": 6539
} | class ____(Field):
def __init__(self, type_, *args, **kwargs):
kwargs.setdefault("before", String())
kwargs.setdefault("after", String())
kwargs.setdefault("first", Int())
kwargs.setdefault("last", Int())
super(IterableConnectionField, self).__init__(type_, *args, **kwargs)
@property
def type(self):
type_ = super(IterableConnectionField, self).type
connection_type = type_
if isinstance(type_, NonNull):
connection_type = type_.of_type
if is_node(connection_type):
raise Exception(
"ConnectionFields now need a explicit ConnectionType for Nodes.\n"
"Read more: https://github.com/graphql-python/graphene/blob/v2.0.0/UPGRADE-v2.0.md#node-connections"
)
assert issubclass(
connection_type, Connection
), f'{self.__class__.__name__} type has to be a subclass of Connection. Received "{connection_type}".'
return type_
@classmethod
def resolve_connection(cls, connection_type, args, resolved):
if isinstance(resolved, connection_type):
return resolved
assert isinstance(resolved, Iterable), (
f"Resolved value from the connection field has to be an iterable or instance of {connection_type}. "
f'Received "{resolved}"'
)
connection = connection_from_array(
resolved,
args,
connection_type=partial(connection_adapter, connection_type),
edge_type=connection_type.Edge,
page_info_type=page_info_adapter,
)
connection.iterable = resolved
return connection
@classmethod
def connection_resolver(cls, resolver, connection_type, root, info, **args):
resolved = resolver(root, info, **args)
if isinstance(connection_type, NonNull):
connection_type = connection_type.of_type
on_resolve = partial(cls.resolve_connection, connection_type, args)
return maybe_thenable(resolved, on_resolve)
def wrap_resolve(self, parent_resolver):
resolver = super(IterableConnectionField, self).wrap_resolve(parent_resolver)
return partial(self.connection_resolver, resolver, self.type)
ConnectionField = IterableConnectionField
| IterableConnectionField |
python | Textualize__textual | src/textual/markup.py | {
"start": 674,
"end": 1682
} | class ____(Exception):
"""An error occurred parsing content markup."""
expect_markup_tag = (
Expect(
"markup style value",
end_tag=r"(?<!\\)\]",
key=r"[@a-zA-Z_-][a-zA-Z0-9_-]*=",
percent=PERCENT,
color=COLOR,
token=TOKEN,
variable_ref=VARIABLE_REF,
whitespace=r"\s+",
)
.expect_eof(True)
.expect_semicolon(False)
.extract_text(True)
)
expect_markup = Expect(
"markup tag",
open_closing_tag=r"(?<!\\)\[/",
open_tag=r"(?<!\\)\[",
).extract_text()
expect_markup_expression = (
Expect(
"markup value",
end_tag=r"(?<!\\)\]",
word=r"[\w\.]+",
round_start=r"\(",
round_end=r"\)",
square_start=r"\[",
square_end=r"\]",
curly_start=r"\{",
curly_end=r"\}",
comma=",",
whitespace=r"\s+",
double_string=r"\".*?\"",
single_string=r"'.*?'",
)
.expect_eof(True)
.expect_semicolon(False)
)
| MarkupError |
python | django__django | tests/signing/tests.py | {
"start": 200,
"end": 7892
} | class ____(SimpleTestCase):
def test_signature(self):
"signature() method should generate a signature"
signer = signing.Signer(key="predictable-secret")
signer2 = signing.Signer(key="predictable-secret2")
for s in (
b"hello",
b"3098247:529:087:",
"\u2019".encode(),
):
self.assertEqual(
signer.signature(s),
signing.base64_hmac(
signer.salt + "signer",
s,
"predictable-secret",
algorithm=signer.algorithm,
),
)
self.assertNotEqual(signer.signature(s), signer2.signature(s))
def test_signature_with_salt(self):
signer = signing.Signer(key="predictable-secret", salt="extra-salt")
self.assertEqual(
signer.signature("hello"),
signing.base64_hmac(
"extra-salt" + "signer",
"hello",
"predictable-secret",
algorithm=signer.algorithm,
),
)
self.assertNotEqual(
signing.Signer(key="predictable-secret", salt="one").signature("hello"),
signing.Signer(key="predictable-secret", salt="two").signature("hello"),
)
def test_custom_algorithm(self):
signer = signing.Signer(key="predictable-secret", algorithm="sha512")
self.assertEqual(
signer.signature("hello"),
"Usf3uVQOZ9m6uPfVonKR-EBXjPe7bjMbp3_Fq8MfsptgkkM1ojidN0BxYaT5HAEN1"
"VzO9_jVu7R-VkqknHYNvw",
)
def test_invalid_algorithm(self):
signer = signing.Signer(key="predictable-secret", algorithm="whatever")
msg = "'whatever' is not an algorithm accepted by the hashlib module."
with self.assertRaisesMessage(InvalidAlgorithm, msg):
signer.sign("hello")
def test_sign_unsign(self):
"sign/unsign should be reversible"
signer = signing.Signer(key="predictable-secret")
examples = [
"q;wjmbk;wkmb",
"3098247529087",
"3098247:529:087:",
"jkw osanteuh ,rcuh nthu aou oauh ,ud du",
"\u2019",
]
for example in examples:
signed = signer.sign(example)
self.assertIsInstance(signed, str)
self.assertNotEqual(example, signed)
self.assertEqual(example, signer.unsign(signed))
def test_sign_unsign_non_string(self):
signer = signing.Signer(key="predictable-secret")
values = [
123,
1.23,
True,
datetime.date.today(),
]
for value in values:
with self.subTest(value):
signed = signer.sign(value)
self.assertIsInstance(signed, str)
self.assertNotEqual(signed, value)
self.assertEqual(signer.unsign(signed), str(value))
def test_unsign_detects_tampering(self):
"unsign should raise an exception if the value has been tampered with"
signer = signing.Signer(key="predictable-secret")
value = "Another string"
signed_value = signer.sign(value)
transforms = (
lambda s: s.upper(),
lambda s: s + "a",
lambda s: "a" + s[1:],
lambda s: s.replace(":", ""),
)
self.assertEqual(value, signer.unsign(signed_value))
for transform in transforms:
with self.assertRaises(signing.BadSignature):
signer.unsign(transform(signed_value))
def test_sign_unsign_object(self):
signer = signing.Signer(key="predictable-secret")
tests = [
["a", "list"],
"a string \u2019",
{"a": "dictionary"},
]
for obj in tests:
with self.subTest(obj=obj):
signed_obj = signer.sign_object(obj)
self.assertNotEqual(obj, signed_obj)
self.assertEqual(obj, signer.unsign_object(signed_obj))
signed_obj = signer.sign_object(obj, compress=True)
self.assertNotEqual(obj, signed_obj)
self.assertEqual(obj, signer.unsign_object(signed_obj))
def test_dumps_loads(self):
"dumps and loads be reversible for any JSON serializable object"
objects = [
["a", "list"],
"a string \u2019",
{"a": "dictionary"},
]
for o in objects:
self.assertNotEqual(o, signing.dumps(o))
self.assertEqual(o, signing.loads(signing.dumps(o)))
self.assertNotEqual(o, signing.dumps(o, compress=True))
self.assertEqual(o, signing.loads(signing.dumps(o, compress=True)))
def test_decode_detects_tampering(self):
"loads should raise exception for tampered objects"
transforms = (
lambda s: s.upper(),
lambda s: s + "a",
lambda s: "a" + s[1:],
lambda s: s.replace(":", ""),
)
value = {
"foo": "bar",
"baz": 1,
}
encoded = signing.dumps(value)
self.assertEqual(value, signing.loads(encoded))
for transform in transforms:
with self.assertRaises(signing.BadSignature):
signing.loads(transform(encoded))
def test_works_with_non_ascii_keys(self):
binary_key = b"\xe7" # Set some binary (non-ASCII key)
s = signing.Signer(key=binary_key)
self.assertEqual(
"foo:EE4qGC5MEKyQG5msxYA0sBohAxLC0BJf8uRhemh0BGU",
s.sign("foo"),
)
def test_valid_sep(self):
separators = ["/", "*sep*", ","]
for sep in separators:
signer = signing.Signer(key="predictable-secret", sep=sep)
self.assertEqual(
"foo%sjZQoX_FtSO70jX9HLRGg2A_2s4kdDBxz1QoO_OpEQb0" % sep,
signer.sign("foo"),
)
def test_invalid_sep(self):
"""should warn on invalid separator"""
msg = (
"Unsafe Signer separator: %r (cannot be empty or consist of only A-z0-9-_=)"
)
separators = ["", "-", "abc"]
for sep in separators:
with self.assertRaisesMessage(ValueError, msg % sep):
signing.Signer(sep=sep)
def test_verify_with_non_default_key(self):
old_signer = signing.Signer(key="secret")
new_signer = signing.Signer(
key="newsecret", fallback_keys=["othersecret", "secret"]
)
signed = old_signer.sign("abc")
self.assertEqual(new_signer.unsign(signed), "abc")
def test_sign_unsign_multiple_keys(self):
"""The default key is a valid verification key."""
signer = signing.Signer(key="secret", fallback_keys=["oldsecret"])
signed = signer.sign("abc")
self.assertEqual(signer.unsign(signed), "abc")
@override_settings(
SECRET_KEY="secret",
SECRET_KEY_FALLBACKS=["oldsecret"],
)
def test_sign_unsign_ignore_secret_key_fallbacks(self):
old_signer = signing.Signer(key="oldsecret")
signed = old_signer.sign("abc")
signer = signing.Signer(fallback_keys=[])
with self.assertRaises(signing.BadSignature):
signer.unsign(signed)
@override_settings(
SECRET_KEY="secret",
SECRET_KEY_FALLBACKS=["oldsecret"],
)
def test_default_keys_verification(self):
old_signer = signing.Signer(key="oldsecret")
signed = old_signer.sign("abc")
signer = signing.Signer()
self.assertEqual(signer.unsign(signed), "abc")
| TestSigner |
python | kamyu104__LeetCode-Solutions | Python/convert-integer-to-the-sum-of-two-no-zero-integers.py | {
"start": 473,
"end": 693
} | class ____(object):
def getNoZeroIntegers(self, n):
"""
:type n: int
:rtype: List[int]
"""
return next([a, n-a] for a in xrange(1, n) if '0' not in '{}{}'.format(a, n-a))
| Solution2 |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/templates/panel.py | {
"start": 2271,
"end": 9475
} | class ____(Panel):
"""
A panel that lists all templates used during processing of a response.
"""
is_async = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.templates = []
# An associated list of dictionaries and their prettified
# representation.
self.pformat_layers = []
def _store_template_info(self, sender, **kwargs):
template, context = kwargs["template"], kwargs["context"]
# Skip templates that we are generating through the debug toolbar.
is_debug_toolbar_template = isinstance(template.name, str) and (
template.name.startswith("debug_toolbar/")
or template.name.startswith(
tuple(self.toolbar.config["SKIP_TEMPLATE_PREFIXES"])
)
)
if is_debug_toolbar_template:
return
kwargs["context"] = [
context_layer
for context_layer in context.dicts
if hasattr(context_layer, "items") and context_layer
]
kwargs["context_processors"] = getattr(context, "context_processors", None)
self.templates.append(kwargs)
# Implement the Panel API
nav_title = _("Templates")
@property
def title(self):
num_templates = len(self.get_stats()["templates"])
return _("Templates (%(num_templates)s rendered)") % {
"num_templates": num_templates
}
@property
def nav_subtitle(self):
templates = self.get_stats()["templates"]
if templates:
return templates[0]["template"]["name"]
return ""
template = "debug_toolbar/panels/templates.html"
@classmethod
def get_urls(cls):
return [path("template_source/", views.template_source, name="template_source")]
def enable_instrumentation(self):
template_rendered.connect(self._store_template_info)
def disable_instrumentation(self):
template_rendered.disconnect(self._store_template_info)
def process_context_list(self, context_layers):
context_list = []
for context_layer in context_layers:
# Check if the layer is in the cache.
pformatted = None
for key_values, _pformatted in self.pformat_layers:
if key_values == context_layer:
pformatted = _pformatted
break
if pformatted is None:
temp_layer = {}
for key, value in context_layer.items():
# Do not force evaluating LazyObject
if hasattr(value, "_wrapped"):
# SimpleLazyObject has __repr__ which includes actual value
# if it has been already evaluated
temp_layer[key] = repr(value)
# Replace any request elements - they have a large
# Unicode representation and the request data is
# already made available from the Request panel.
elif isinstance(value, http.HttpRequest):
temp_layer[key] = "<<request>>"
# Replace the debugging sql_queries element. The SQL
# data is already made available from the SQL panel.
elif key == "sql_queries" and isinstance(value, list):
temp_layer[key] = "<<sql_queries>>"
# Replace LANGUAGES, which is available in i18n context
# processor
elif key == "LANGUAGES" and isinstance(value, tuple):
temp_layer[key] = "<<languages>>"
# QuerySet would trigger the database: user can run the
# query from SQL Panel
elif isinstance(value, (QuerySet, RawQuerySet)):
temp_layer[key] = (
f"<<{value.__class__.__name__.lower()} of {value.model._meta.label}>>"
)
else:
token = allow_sql.set(False)
try:
saferepr(value) # this MAY trigger a db query
except SQLQueryTriggered:
temp_layer[key] = "<<triggers database query>>"
except UnicodeEncodeError:
temp_layer[key] = "<<Unicode encode error>>"
except Exception:
temp_layer[key] = "<<unhandled exception>>"
else:
temp_layer[key] = value
finally:
allow_sql.reset(token)
pformatted = pformat(temp_layer)
self.pformat_layers.append((context_layer, pformatted))
context_list.append(pformatted)
return context_list
def generate_stats(self, request, response):
template_context = []
for template_data in self.templates:
info = {}
# Clean up some info about templates
template = template_data["template"]
if hasattr(template, "origin") and template.origin and template.origin.name:
template.origin_name = template.origin.name
template.origin_hash = signing.dumps(template.origin.name)
else:
template.origin_name = _("No origin")
template.origin_hash = ""
info["template"] = {
"name": template.name,
"origin_name": template.origin_name,
"origin_hash": template.origin_hash,
}
# Clean up context for better readability
if self.toolbar.config["SHOW_TEMPLATE_CONTEXT"]:
if "context_list" not in template_data:
template_data["context_list"] = self.process_context_list(
template_data.get("context", [])
)
info["context"] = "\n".join(template_data["context_list"])
template_context.append(info)
# Fetch context_processors/template_dirs from any template
if self.templates:
context_processors = (
{
key: force_str(value)
for key, value in self.templates[0]["context_processors"].items()
}
if self.templates[0]["context_processors"]
else None
)
template = self.templates[0]["template"]
# django templates have the 'engine' attribute, while jinja
# templates use 'backend'
engine_backend = getattr(template, "engine", None) or template.backend
template_dirs = engine_backend.dirs
else:
context_processors = None
template_dirs = []
self.record_stats(
{
"templates": template_context,
"template_dirs": [normpath(x) for x in template_dirs],
"context_processors": context_processors,
}
)
| TemplatesPanel |
python | EpistasisLab__tpot | tpot/builtin_modules/arithmetictransformer.py | {
"start": 12152,
"end": 12798
} | class ____(TransformerMixin, BaseEstimator):
def __init__(self):
"""
A transformer that takes the minimum of all elements in a row.
"""
pass
def fit(self, X, y=None):
return self
def transform(self, X):
transformed_X = np.array(self.transform_helper(np.array(X)))
if transformed_X.dtype != float:
transformed_X = transformed_X.astype(float)
return transformed_X
def transform_helper(self, X):
X = np.array(X)
if len(X.shape) == 1:
X = np.expand_dims(X,0)
return np.expand_dims(np.amin(X,1),1)
| MinTransformer |
python | fluentpython__example-code-2e | 24-class-metaprog/metabunch/from3.6/bunch.py | {
"start": 1130,
"end": 2381
} | class ____(type): # <1>
def __new__(meta_cls, cls_name, bases, cls_dict): # <2>
defaults = {} # <3>
def __init__(self, **kwargs): # <4>
for name, default in defaults.items(): # <5>
setattr(self, name, kwargs.pop(name, default))
if kwargs: # <6>
extra = ', '.join(kwargs)
raise AttributeError(f'No slots left for: {extra!r}')
def __repr__(self): # <7>
rep = ', '.join(f'{name}={value!r}'
for name, default in defaults.items()
if (value := getattr(self, name)) != default)
return f'{cls_name}({rep})'
new_dict = dict(__slots__=[], __init__=__init__, __repr__=__repr__) # <8>
for name, value in cls_dict.items(): # <9>
if name.startswith('__') and name.endswith('__'): # <10>
if name in new_dict:
raise AttributeError(f"Can't set {name!r} in {cls_name!r}")
new_dict[name] = value
else: # <11>
new_dict['__slots__'].append(name)
defaults[name] = value
return super().__new__(meta_cls, cls_name, bases, new_dict) # <12>
| MetaBunch |
python | ray-project__ray | python/ray/dashboard/subprocesses/module.py | {
"start": 1393,
"end": 9187
} | class ____(abc.ABC):
"""
A Dashboard Head Module that runs in a subprocess as a standalone aiohttp server.
"""
def __init__(
self,
config: SubprocessModuleConfig,
):
"""
Initialize current module when DashboardHead loading modules.
:param dashboard_head: The DashboardHead instance.
"""
self._config = config
self._parent_process = multiprocessing.parent_process()
# Lazy init
self._gcs_client = None
self._aiogrpc_gcs_channel = None
self._parent_process_death_detection_task = None
self._http_session = None
async def _detect_parent_process_death(self):
"""
Detect parent process liveness. Only returns when parent process is dead.
"""
while True:
if not self._parent_process.is_alive():
logger.warning(
f"Parent process {self._parent_process.pid} died. Exiting..."
)
return
await asyncio.sleep(1)
@staticmethod
def is_minimal_module():
"""
Currently all SubprocessModule classes should be non-minimal.
We require this because SubprocessModuleHandle tracks aiohttp requests and
responses. To ease this, we can define another SubprocessModuleMinimalHandle
that doesn't track requests and responses, but still provides Queue interface
and health check.
TODO(ryw): If needed, create SubprocessModuleMinimalHandle.
"""
return False
async def run(self):
"""
Start running the module.
This method should be called first before the module starts receiving requests.
"""
app = aiohttp.web.Application(
client_max_size=ray_constants.DASHBOARD_CLIENT_MAX_SIZE,
)
routes: list[aiohttp.web.RouteDef] = [
aiohttp.web.get("/api/healthz", self._internal_module_health_check)
]
handlers = inspect.getmembers(
self,
lambda x: (
inspect.ismethod(x)
and hasattr(x, "__route_method__")
and hasattr(x, "__route_path__")
),
)
for _, handler in handlers:
routes.append(
aiohttp.web.route(
handler.__route_method__,
handler.__route_path__,
handler,
)
)
app.add_routes(routes)
runner = aiohttp.web.AppRunner(app, access_log=None)
await runner.setup()
module_name = self.__class__.__name__
if sys.platform == "win32":
named_pipe_path = get_named_pipe_path(
module_name, self._config.session_name
)
site = aiohttp.web.NamedPipeSite(runner, named_pipe_path)
logger.info(f"Started aiohttp server over {named_pipe_path}.")
else:
socket_path = get_socket_path(self._config.socket_dir, module_name)
site = aiohttp.web.UnixSite(runner, socket_path)
logger.info(f"Started aiohttp server over {socket_path}.")
await site.start()
@property
def gcs_client(self):
if self._gcs_client is None:
if not ray.experimental.internal_kv._internal_kv_initialized():
gcs_client = GcsClient(
address=self._config.gcs_address,
cluster_id=self._config.cluster_id_hex,
)
ray.experimental.internal_kv._initialize_internal_kv(gcs_client)
self._gcs_client = ray.experimental.internal_kv.internal_kv_get_gcs_client()
return self._gcs_client
@property
def aiogrpc_gcs_channel(self):
if self._aiogrpc_gcs_channel is None:
gcs_channel = GcsChannel(gcs_address=self._config.gcs_address, aio=True)
gcs_channel.connect()
self._aiogrpc_gcs_channel = gcs_channel.channel()
return self._aiogrpc_gcs_channel
@property
def session_name(self):
"""
Return the Ray session name. It's not related to the aiohttp session.
"""
return self._config.session_name
@property
def temp_dir(self):
return self._config.temp_dir
@property
def session_dir(self):
return self._config.session_dir
@property
def log_dir(self):
return self._config.log_dir
@property
def http_session(self):
if self._http_session is None:
self._http_session = aiohttp.ClientSession()
return self._http_session
@property
def gcs_address(self):
return self._config.gcs_address
async def _internal_module_health_check(self, request):
return aiohttp.web.Response(
text="success",
content_type="application/text",
)
async def run_module_inner(
cls: type[SubprocessModule],
config: SubprocessModuleConfig,
incarnation: int,
child_conn: multiprocessing.connection.Connection,
):
module_name = cls.__name__
logger.info(
f"Starting module {module_name} with incarnation {incarnation} and config {config}"
)
try:
module = cls(config)
module._parent_process_death_detection_task = asyncio.create_task(
module._detect_parent_process_death()
)
module._parent_process_death_detection_task.add_done_callback(
lambda _: sys.exit()
)
await module.run()
child_conn.send(None)
child_conn.close()
logger.info(f"Module {module_name} initialized, receiving messages...")
except Exception as e:
logger.exception(f"Error creating module {module_name}")
raise e
def run_module(
cls: type[SubprocessModule],
config: SubprocessModuleConfig,
incarnation: int,
child_conn: multiprocessing.connection.Connection,
):
"""
Entrypoint for a subprocess module.
"""
module_name = cls.__name__
current_proctitle = ray._raylet.getproctitle()
ray._raylet.setproctitle(
f"ray-dashboard-{module_name}-{incarnation} ({current_proctitle})"
)
logging_filename = module_logging_filename(module_name, config.logging_filename)
setup_component_logger(
logging_level=config.logging_level,
logging_format=config.logging_format,
log_dir=config.log_dir,
filename=logging_filename,
max_bytes=config.logging_rotate_bytes,
backup_count=config.logging_rotate_backup_count,
)
if config.logging_filename:
stdout_filename = module_logging_filename(
module_name, config.logging_filename, extension=".out"
)
stderr_filename = module_logging_filename(
module_name, config.logging_filename, extension=".err"
)
logging_utils.redirect_stdout_stderr_if_needed(
os.path.join(config.log_dir, stdout_filename),
os.path.join(config.log_dir, stderr_filename),
config.logging_rotate_bytes,
config.logging_rotate_backup_count,
)
loop = asyncio.new_event_loop()
task = loop.create_task(
run_module_inner(
cls,
config,
incarnation,
child_conn,
)
)
# TODO: do graceful shutdown.
# 1. define a stop token.
# 2. join the loop to wait for all pending tasks to finish, up until a timeout.
# 3. close the loop and exit.
def sigterm_handler(signum, frame):
logger.warning(f"Exiting with signal {signum} immediately...")
sys.exit(signum)
ray._private.utils.set_sigterm_handler(sigterm_handler)
loop.run_until_complete(task)
loop.run_forever()
| SubprocessModule |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 31063,
"end": 33957
} | class ____(NonStrictDataModel):
"""
:param cls: Augmentation class
:type cls: str
:param types: Augmentation type
:type types: Sequence[str]
:param strength: Augmentation strength. Range [0,).
:type strength: float
:param arguments: Arguments dictionary per custom augmentation type.
:type arguments: dict
"""
_schema = {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {"description": "Augmentation class", "type": ["string", "null"]},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, cls=None, types=None, strength=None, arguments=None, **kwargs):
super(DvAugmentationSet, self).__init__(**kwargs)
self.cls = cls
self.types = types
self.strength = strength
self.arguments = arguments
@schema_property("cls")
def cls(self):
return self._property_cls
@cls.setter
def cls(self, value):
if value is None:
self._property_cls = None
return
self.assert_isinstance(value, "cls", six.string_types)
self._property_cls = value
@schema_property("types")
def types(self):
return self._property_types
@types.setter
def types(self, value):
if value is None:
self._property_types = None
return
self.assert_isinstance(value, "types", (list, tuple))
self.assert_isinstance(value, "types", six.string_types, is_array=True)
self._property_types = value
@schema_property("strength")
def strength(self):
return self._property_strength
@strength.setter
def strength(self, value):
if value is None:
self._property_strength = None
return
self.assert_isinstance(value, "strength", six.integer_types + (float,))
self._property_strength = value
@schema_property("arguments")
def arguments(self):
return self._property_arguments
@arguments.setter
def arguments(self, value):
if value is None:
self._property_arguments = None
return
self.assert_isinstance(value, "arguments", (dict,))
self._property_arguments = value
| DvAugmentationSet |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 171045,
"end": 182186
} | class ____(_fixtures.FixtureTest):
run_setup_mappers = "each"
run_inserts = "each"
__sparse_driver_backend__ = True
def _eagerload_mappings(self, addresses_lazy=True, user_lazy=True):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
lazy=addresses_lazy,
backref=backref("user", lazy=user_lazy),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
def test_basic(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
q = iter(
sess.query(User)
.yield_per(1)
.from_statement(text("select * from users"))
)
ret = []
eq_(len(sess.identity_map), 0)
ret.append(next(q))
ret.append(next(q))
eq_(len(sess.identity_map), 2)
ret.append(next(q))
ret.append(next(q))
eq_(len(sess.identity_map), 4)
try:
next(q)
assert False
except StopIteration:
pass
def test_we_can_close_cursor(self):
"""test new usecase close() added along with #7274"""
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
stmt = select(User).execution_options(yield_per=15)
result = sess.execute(stmt)
with mock.patch.object(result.raw, "_soft_close") as mock_close:
two_results = result.fetchmany(2)
eq_(len(two_results), 2)
eq_(mock_close.mock_calls, [])
result.close()
eq_(mock_close.mock_calls, [mock.call(hard=True)])
with expect_raises(sa.exc.ResourceClosedError):
result.fetchmany(10)
with expect_raises(sa.exc.ResourceClosedError):
result.fetchone()
with expect_raises(sa.exc.ResourceClosedError):
result.all()
result.close()
@testing.combinations("fetchmany", "fetchone", "fetchall")
def test_cursor_is_closed_on_exhausted(self, fetch_method):
"""test #7274"""
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
stmt = select(User).execution_options(yield_per=15)
result = sess.execute(stmt)
with mock.patch.object(result.raw, "_soft_close") as mock_close:
# call assertions are implementation specific.
# test needs that _soft_close called at least once and without
# the hard=True flag
if fetch_method == "fetchmany":
while True:
buf = result.fetchmany(2)
if not buf:
break
eq_(mock_close.mock_calls, [mock.call()])
elif fetch_method == "fetchall":
eq_(len(result.all()), 4)
eq_(
mock_close.mock_calls, [mock.call(), mock.call(hard=False)]
)
elif fetch_method == "fetchone":
while True:
row = result.fetchone()
if row is None:
break
eq_(
mock_close.mock_calls, [mock.call(), mock.call(hard=False)]
)
else:
assert False
# soft closed, we can still get an empty result
eq_(result.all(), [])
# real closed
result.close()
assert_raises(sa.exc.ResourceClosedError, result.all)
def test_yield_per_close_on_interrupted_iteration_legacy(self):
"""test #8710"""
self._eagerload_mappings()
User = self.classes.User
asserted_result = None
class _Query(Query):
def _iter(self):
nonlocal asserted_result
asserted_result = super(_Query, self)._iter()
return asserted_result
sess = fixture_session(query_cls=_Query)
with expect_raises_message(Exception, "hi"):
for i, row in enumerate(sess.query(User).yield_per(1)):
assert not asserted_result._soft_closed
assert not asserted_result.closed
if i > 1:
raise Exception("hi")
gc_collect() # needed for pypy, #8762
assert asserted_result._soft_closed
assert not asserted_result.closed
def test_yield_per_close_on_interrupted_iteration(self):
"""test #8710"""
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
with expect_raises_message(Exception, "hi"):
result = sess.execute(select(User).execution_options(yield_per=1))
for i, row in enumerate(result):
assert not result._soft_closed
assert not result.closed
if i > 1:
raise Exception("hi")
gc_collect() # not apparently needed, but defensive for pypy re: #8762
assert not result._soft_closed
assert not result.closed
result.close()
assert result._soft_closed
assert result.closed
def test_yield_per_and_execution_options_legacy(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
@event.listens_for(sess, "do_orm_execute")
def check(ctx):
eq_(ctx.load_options._yield_per, 15)
return
eq_(
{
k: v
for k, v in ctx.execution_options.items()
if not k.startswith("_")
},
{
"yield_per": 15,
"foo": "bar",
},
)
q = sess.query(User).yield_per(15)
q = q.execution_options(foo="bar")
eq_(len(q.all()), 4)
def test_yield_per_and_execution_options(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
@event.listens_for(sess, "do_orm_execute")
def check(ctx):
eq_(ctx.load_options._yield_per, 15)
eq_(
{
k: v
for k, v in ctx.execution_options.items()
if not k.startswith("_")
},
{
"yield_per": 15,
},
)
stmt = select(User).execution_options(yield_per=15)
result = sess.execute(stmt)
assert isinstance(
result.raw.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy
)
eq_(result._yield_per, 15)
eq_(result.raw.cursor_strategy._max_row_buffer, 15)
eq_(len(result.all()), 4)
def test_no_joinedload_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
q = sess.query(User).options(joinedload(User.addresses)).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't use yield_per with eager loaders that require "
"uniquing or row buffering",
q.all,
)
def test_no_contains_eager_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
q = (
sess.query(User)
.join(User.addresses)
.options(contains_eager(User.addresses))
.yield_per(1)
)
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't use yield_per with eager loaders that require "
"uniquing or row buffering",
q.all,
)
def test_no_subqueryload_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
q = sess.query(User).options(subqueryload(User.addresses)).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't use yield_per with eager loaders that require "
"uniquing or row buffering",
q.all,
)
def test_no_subqueryload_mapping(self):
self._eagerload_mappings(addresses_lazy="subquery")
User = self.classes.User
sess = fixture_session()
q = sess.query(User).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't use yield_per with eager loaders that require "
"uniquing or row buffering",
q.all,
)
def test_joinedload_m2o_ok(self):
self._eagerload_mappings(user_lazy="joined")
Address = self.classes.Address
sess = fixture_session()
q = sess.query(Address).yield_per(1)
eq_(len(q.all()), 5)
def test_eagerload_opt_disable(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
q = (
sess.query(User)
.options(subqueryload(User.addresses))
.enable_eagerloads(False)
.yield_per(1)
)
eq_(len(q.all()), 4)
q = (
sess.query(User)
.options(joinedload(User.addresses))
.enable_eagerloads(False)
.yield_per(1)
)
eq_(len(q.all()), 4)
@testing.combinations(
"joined",
"subquery",
"selectin",
"select",
"immediate",
argnames="lazy",
)
def test_eagerload_config_disable(self, lazy):
self._eagerload_mappings(addresses_lazy=lazy)
User = self.classes.User
sess = fixture_session()
q = sess.query(User).enable_eagerloads(False).yield_per(1)
objs = q.all()
eq_(len(objs), 4)
for obj in objs:
assert "addresses" not in obj.__dict__
def test_m2o_joinedload_not_others(self):
self._eagerload_mappings(addresses_lazy="joined")
Address = self.classes.Address
sess = fixture_session()
q = (
sess.query(Address)
.options(lazyload("*"), joinedload(Address.user))
.yield_per(1)
.filter_by(id=1)
)
def go():
result = q.all()
assert result[0].user
self.assert_sql_count(testing.db, go, 1)
def test_no_unique_w_yield_per(self):
self._eagerload_mappings()
User = self.classes.User
sess = fixture_session()
stmt = select(User).execution_options(yield_per=10)
result = sess.execute(stmt).unique()
with expect_raises_message(
sa_exc.InvalidRequestError,
r"Can't use the ORM yield_per feature in "
r"conjunction with unique\(\)",
):
next(result)
| YieldTest |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 7405,
"end": 8005
} | class ____(_FusedModule):
r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert (
type_before_parametrizations(batch_norm) == BatchNorm3d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(batch_norm)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(batch_norm, relu)
| BNReLU3d |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 102863,
"end": 106356
} | class ____(Response):
"""
Response of events.get_scalar_metric_data endpoint.
:param events: task scalar metric events
:type events: Sequence[dict]
:param returned: amount of events returned
:type returned: int
:param total: amount of events in task
:type total: int
:param scroll_id: Scroll ID of previous call (used for getting more results)
:type scroll_id: str
"""
_service = "events"
_action = "get_scalar_metric_data"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"events": {
"description": "task scalar metric events",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "amount of events returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID of previous call (used for getting more results)",
"type": ["string", "null"],
},
"total": {
"description": "amount of events in task",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
events: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[int] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetScalarMetricDataResponse, self).__init__(**kwargs)
self.events = events
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("events")
def events(self) -> Optional[List[dict]]:
return self._property_events
@events.setter
def events(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (list, tuple))
self.assert_isinstance(value, "events", (dict,), is_array=True)
self._property_events = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[int]:
return self._property_total
@total.setter
def total(self, value: Optional[int]) -> None:
if value is None:
self._property_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total", six.integer_types)
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetScalarMetricDataResponse |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/pipes/context_injectors.py | {
"start": 392,
"end": 1906
} | class ____(PipesContextInjector):
"""A context injector that injects context by writing to a temporary GCS location.
Args:
bucket (str): The GCS bucket to write to.
client (google.cloud.storage.Client): A Google Cloud SDK client to use to write to GCS.
key_prefix (Optional[str]): An optional prefix to use for the GCS key.
Will be concatenated with a random string.
"""
def __init__(self, *, bucket: str, client: GCSClient, key_prefix: Optional[str] = None):
super().__init__()
self.bucket = check.str_param(bucket, "bucket")
self.key_prefix = check.opt_str_param(key_prefix, "key_prefix")
self.client = client
@contextmanager
def inject_context(self, context: PipesContextData) -> Iterator[PipesParams]: # pyright: ignore[reportIncompatibleMethodOverride]
key_prefix = (self.key_prefix or "") + "".join(random.choices(string.ascii_letters, k=30))
key = os.path.join(key_prefix, _CONTEXT_FILENAME)
self.client.get_bucket(self.bucket).blob(key).upload_from_string(json.dumps(context))
yield {"bucket": self.bucket, "key": key}
self.client.get_bucket(self.bucket).blob(key).delete()
def no_messages_debug_text(self) -> str:
return (
"Attempted to inject context via a temporary file in GCS. Expected"
" PipesGCSContextLoader to be explicitly passed to open_dagster_pipes in the external"
" process."
)
| PipesGCSContextInjector |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/needs_text_relocation/package.py | {
"start": 216,
"end": 860
} | class ____(Package):
"""A dumy package that encodes its prefix."""
homepage = "https://www.cmake.org"
url = "https://cmake.org/files/v3.4/cmake-3.4.3.tar.gz"
version("0.0.0", md5="12345678qwertyuiasdfghjkzxcvbnm0")
def install(self, spec, prefix):
mkdirp(prefix.bin)
exe = join_path(prefix.bin, "exe")
with open(exe, "w", encoding="utf-8") as f:
f.write(prefix)
set_executable(exe)
otherexe = join_path(prefix.bin, "otherexe")
with open(otherexe, "w", encoding="utf-8") as f:
f.write("Lorem Ipsum")
set_executable(otherexe)
| NeedsTextRelocation |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_gcs.py | {
"start": 2242,
"end": 6143
} | class ____:
def test_gcs_blob_trigger_serialization(self, trigger):
"""
Asserts that the GCSBlobTrigger correctly serializes its arguments
and classpath.
"""
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.gcs.GCSBlobTrigger"
assert kwargs == {
"bucket": TEST_BUCKET,
"object_name": TEST_OBJECT,
"use_glob": False,
"poke_interval": TEST_POLLING_INTERVAL,
"google_cloud_conn_id": TEST_GCP_CONN_ID,
"hook_params": TEST_HOOK_PARAMS,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.gcs.GCSBlobTrigger._object_exists")
async def test_gcs_blob_trigger_success(self, mock_object_exists, trigger):
"""
Tests that the GCSBlobTrigger is success case
"""
mock_object_exists.return_value = "success"
generator = trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "success", "message": "success"}) == actual
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.gcs.GCSBlobTrigger._object_exists")
async def test_gcs_blob_trigger_pending(self, mock_object_exists, trigger):
"""
Test that GCSBlobTrigger is in loop if file isn't found.
"""
mock_object_exists.return_value = "pending"
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.gcs.GCSBlobTrigger._object_exists")
async def test_gcs_blob_trigger_exception(self, mock_object_exists, trigger):
"""
Tests the GCSBlobTrigger does fire if there is an exception.
"""
mock_object_exists.side_effect = AsyncMock(side_effect=Exception("Test exception"))
task = [i async for i in trigger.run()]
assert len(task) == 1
assert TriggerEvent({"status": "error", "message": "Test exception"}) in task
@pytest.mark.asyncio
@pytest.mark.parametrize(
("exists", "response"),
[
(True, "success"),
(False, "pending"),
],
)
async def test_object_exists(self, exists, response, trigger):
"""
Tests to check if a particular object in Google Cloud Storage
is found or not
"""
hook = AsyncMock(GCSAsyncHook)
storage = AsyncMock(Storage)
hook.get_storage_client.return_value = storage
bucket = AsyncMock(Bucket)
storage.get_bucket.return_value = bucket
bucket.blob_exists.return_value = exists
res = await trigger._object_exists(hook, TEST_BUCKET, TEST_OBJECT)
assert res == response
bucket.blob_exists.assert_called_once_with(blob_name=TEST_OBJECT)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("blob_list", "response"),
[
([TEST_OBJECT], "success"),
([], "pending"),
],
)
async def test_object_exists_using_glob(self, blob_list, response, trigger_using_glob):
"""
Tests to check if a particular object in Google Cloud Storage
is found or not
"""
hook = AsyncMock(GCSAsyncHook)
storage = AsyncMock(Storage)
hook.get_storage_client.return_value = storage
bucket = AsyncMock(Bucket)
storage.get_bucket.return_value = bucket
bucket.list_blobs.return_value = blob_list
res = await trigger_using_glob._object_exists(hook, TEST_BUCKET, TEST_OBJECT)
assert res == response
bucket.list_blobs.assert_called_once_with(match_glob=TEST_OBJECT)
| TestGCSBlobTrigger |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/pkg_c/package.py | {
"start": 217,
"end": 686
} | class ____(Package):
"""Simple package with no dependencies"""
homepage = "http://www.example.com"
url = "http://www.example.com/c-1.0.tar.gz"
# Needed to test CDash reporting
phases = ["configure", "build", "install"]
version("1.0", md5="0123456789abcdef0123456789abcdef")
def configure(self, spec, prefix):
pass
def build(self, spec, prefix):
pass
def install(self, spec, prefix):
touch(prefix.pkg_c)
| PkgC |
python | kamyu104__LeetCode-Solutions | Python/design-excel-sum-formula.py | {
"start": 113,
"end": 2432
} | class ____(object):
def __init__(self, H, W):
"""
:type H: int
:type W: str
"""
self.__exl = [[0 for _ in xrange(ord(W)-ord('A')+1)] \
for _ in xrange(H+1)]
self.__fward = collections.defaultdict(lambda : collections.defaultdict(int))
self.__bward = collections.defaultdict(set)
def set(self, r, c, v):
"""
:type r: int
:type c: str
:type v: int
:rtype: void
"""
self.__reset_dependency(r, c)
self.__update_others(r, c, v)
def get(self, r, c):
"""
:type r: int
:type c: str
:rtype: int
"""
return self.__exl[r][ord(c) - ord('A')]
def sum(self, r, c, strs):
"""
:type r: int
:type c: str
:type strs: List[str]
:rtype: int
"""
self.__reset_dependency(r, c)
result = self.__calc_and_update_dependency(r, c, strs)
self.__update_others(r, c, result)
return result
def __reset_dependency(self, r, c):
key = (r, c)
if key in self.__bward.keys():
for k in self.__bward[key]:
self.__fward[k].pop(key, None)
self.__bward[key] = set()
def __calc_and_update_dependency(self, r, c, strs):
result = 0
for s in strs:
s, e = s.split(':')[0], s.split(':')[1] if ':' in s else s
left, right, top, bottom = ord(s[0])-ord('A'), ord(e[0])-ord('A'), int(s[1:]), int(e[1:])
for i in xrange(top, bottom+1):
for j in xrange(left, right+1):
result += self.__exl[i][j]
self.__fward[(i, chr(ord('A')+j))][(r, c)] += 1
self.__bward[(r, c)].add((i, chr(ord('A')+j)))
return result
def __update_others(self, r, c, v):
prev = self.__exl[r][ord(c)-ord('A')]
self.__exl[r][ord(c)-ord('A')] = v
q = collections.deque()
q.append(((r, c), v-prev))
while q:
key, diff = q.popleft()
if key in self.__fward:
for k, count in self.__fward[key].iteritems():
q.append((k, diff*count))
self.__exl[k[0]][ord(k[1])-ord('A')] += diff*count
| Excel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.