language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/domain/lease_domains.py | {
"start": 246,
"end": 535
} | class ____(Enum):
OSS_PreSignedUrl = "OSS.PreSignedUrl"
@classmethod
def from_value(cls, value):
for member in cls:
if member.value == value:
return member
raise ValueError(f"No enum member found for value '{value}'")
| FileUploadMethod |
python | walkccc__LeetCode | solutions/707. Design Linked List/707.py | {
"start": 47,
"end": 107
} | class ____:
val: int
next: ListNode | None = None
| ListNode |
python | fsspec__filesystem_spec | fsspec/implementations/chained.py | {
"start": 103,
"end": 680
} | class ____(AbstractFileSystem):
"""Chained filesystem base class.
A chained filesystem is designed to be layered over another FS.
This is useful to implement things like caching.
This base class does very little on its own, but is used as a marker
that the class is designed for chaining.
Right now this is only used in `url_to_fs` to provide the path argument
(`fo`) to the chained filesystem from the underlying filesystem.
Additional functionality may be added in the future.
"""
protocol: ClassVar[str] = "chained"
| ChainedFileSystem |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_basic.py | {
"start": 73064,
"end": 80647
} | class ____(fixtures.MappedTest):
"""test overriding of column attributes."""
@classmethod
def define_tables(cls, metadata):
global base, subtable, subtable_two
base = Table(
"base",
metadata,
Column(
"base_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", String(255)),
Column("sqlite_fixer", String(10)),
)
subtable = Table(
"subtable",
metadata,
Column(
"base_id",
Integer,
ForeignKey("base.base_id"),
primary_key=True,
),
Column("subdata", String(255)),
)
subtable_two = Table(
"subtable_two",
metadata,
Column("base_id", Integer, primary_key=True),
Column("fk_base_id", Integer, ForeignKey("base.base_id")),
Column("subdata", String(255)),
)
def test_plain(self):
# control case
class Base:
pass
class Sub(Base):
pass
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(Sub, subtable, inherits=Base)
# Sub gets a "base_id" property using the "base_id"
# column of both tables.
eq_(
class_mapper(Sub).get_property("base_id").columns,
[subtable.c.base_id, base.c.base_id],
)
def test_override_explicit(self):
# this pattern is what you see when using declarative
# in particular, here we do a "manual" version of
# what we'd like the mapper to do.
class Base:
pass
class Sub(Base):
pass
self.mapper_registry.map_imperatively(
Base, base, properties={"id": base.c.base_id}
)
self.mapper_registry.map_imperatively(
Sub,
subtable,
inherits=Base,
properties={
# this is the manual way to do it, is not really
# possible in declarative
"id": [base.c.base_id, subtable.c.base_id]
},
)
eq_(
class_mapper(Sub).get_property("id").columns,
[base.c.base_id, subtable.c.base_id],
)
s1 = Sub()
s1.id = 10
sess = fixture_session()
sess.add(s1)
sess.flush()
assert sess.get(Sub, 10) is s1
def test_override_onlyinparent(self):
class Base:
pass
class Sub(Base):
pass
self.mapper_registry.map_imperatively(
Base, base, properties={"id": base.c.base_id}
)
self.mapper_registry.map_imperatively(Sub, subtable, inherits=Base)
eq_(class_mapper(Sub).get_property("id").columns, [base.c.base_id])
eq_(
class_mapper(Sub).get_property("base_id").columns,
[subtable.c.base_id],
)
s1 = Sub()
s1.id = 10
s2 = Sub()
s2.base_id = 15
sess = fixture_session()
sess.add_all([s1, s2])
sess.flush()
# s1 gets '10'
assert sess.get(Sub, 10) is s1
# s2 gets a new id, base_id is overwritten by the ultimate
# PK col
assert s2.id == s2.base_id != 15
def test_override_implicit(self):
# this is originally [ticket:1111].
# the pattern here is now disallowed by [ticket:1892]
class Base:
pass
class Sub(Base):
pass
self.mapper_registry.map_imperatively(
Base, base, properties={"id": base.c.base_id}
)
with expect_raises_message(
sa_exc.InvalidRequestError,
"Implicitly combining column base.base_id with column "
"subtable.base_id under attribute 'id'. Please configure one "
"or more attributes for these same-named columns explicitly.",
):
self.mapper_registry.map_imperatively(
Sub,
subtable,
inherits=Base,
properties={"id": subtable.c.base_id},
)
def test_pk_fk_different(self):
class Base:
pass
class Sub(Base):
pass
self.mapper_registry.map_imperatively(Base, base)
def go():
self.mapper_registry.map_imperatively(
Sub, subtable_two, inherits=Base
)
assert_warns_message(
sa_exc.SAWarning,
"Implicitly combining column base.base_id with "
"column subtable_two.base_id under attribute 'base_id'",
go,
)
def test_plain_descriptor(self):
"""test that descriptors prevent inheritance from propagating
properties to subclasses."""
class Base:
pass
class Sub(Base):
@property
def data(self):
return "im the data"
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(Sub, subtable, inherits=Base)
s1 = Sub()
sess = fixture_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_custom_descriptor(self):
"""test that descriptors prevent inheritance from propagating
properties to subclasses."""
class MyDesc:
def __get__(self, instance, owner):
if instance is None:
return self
return "im the data"
class Base:
pass
class Sub(Base):
data = MyDesc()
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(Sub, subtable, inherits=Base)
s1 = Sub()
sess = fixture_session()
sess.add(s1)
sess.flush()
assert sess.query(Sub).one().data == "im the data"
def test_sub_columns_over_base_descriptors(self):
class Base:
@property
def subdata(self):
return "this is base"
class Sub(Base):
pass
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(Sub, subtable, inherits=Base)
sess = fixture_session()
b1 = Base()
assert b1.subdata == "this is base"
s1 = Sub()
s1.subdata = "this is sub"
assert s1.subdata == "this is sub"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.get(Base, b1.base_id).subdata == "this is base"
assert sess.get(Sub, s1.base_id).subdata == "this is sub"
def test_base_descriptors_over_base_cols(self):
class Base:
@property
def data(self):
return "this is base"
class Sub(Base):
pass
self.mapper_registry.map_imperatively(Base, base)
self.mapper_registry.map_imperatively(Sub, subtable, inherits=Base)
sess = fixture_session()
b1 = Base()
assert b1.data == "this is base"
s1 = Sub()
assert s1.data == "this is base"
sess.add_all([s1, b1])
sess.flush()
sess.expunge_all()
assert sess.get(Base, b1.base_id).data == "this is base"
assert sess.get(Sub, s1.base_id).data == "this is base"
| OverrideColKeyTest |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/data_adapter.py | {
"start": 38088,
"end": 47274
} | class ____(object):
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None,
distribute=True):
"""Initializes a `DataHandler`.
Arguments:
x: See `Model.fit`.
y: See `Model.fit`.
sample_weight: See `Model.fit`.
batch_size: See `Model.fit`.
steps_per_epoch: See `Model.fit`.
initial_epoch: See `Model.fit`.
epochs: See `Model.fit`.
shuffle: See `Model.fit`.
class_weight: See `Model.fit`.
max_queue_size: See `Model.fit`.
workers: See `Model.fit`.
use_multiprocessing: See `Model.fit`.
model: The `Model` instance. Needed in order to correctly `build` the
`Model` using generator-like inputs (see `GeneratorDataAdapter`).
steps_per_execution: See `Model.compile`.
distribute: Whether to distribute the `tf.dataset`.
`PreprocessingLayer.adapt` does not support distributed datasets,
`Model` should always set this to `True`.
"""
self._initial_epoch = initial_epoch
self._epochs = epochs
self._insufficient_data = False
self._model = model
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = 1
self._steps_per_execution_value = 1
else:
self._steps_per_execution = steps_per_execution
self._steps_per_execution_value = steps_per_execution.numpy().item()
adapter_cls = select_data_adapter(x, y)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=distribute_lib.get_strategy(),
model=model)
strategy = distribute_lib.get_strategy()
self._current_step = 0
self._step_increment = self._steps_per_execution_value - 1
self._insufficient_data = False
self._configure_dataset_and_inferred_steps(strategy, x, steps_per_epoch,
class_weight, distribute)
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
"""Configure the `_dataset` and `_inferred_steps` attributes."""
del x
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
# `PreprocessingLayer.adapt` does not currently support distributed
# datasets, so we pass `distribute=False` there.
if distribute and not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None and
self._steps_per_execution_value > self._inferred_steps)
original_value = self._steps_per_execution_value
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
self._steps_per_execution_value = self._inferred_steps
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
self._steps_per_execution_value = original_value
def sync(self):
context.async_wait()
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
self.sync()
except (StopIteration, errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate at "
"least `steps_per_epoch * epochs` batches (in this case, "
"{} batches). You may need to use the repeat() function "
"when building your dataset.".format(total_epochs *
self._inferred_steps))
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (self._inferred_steps is None or
self._current_step < self._inferred_steps):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
can_run_full_execution = (
self._steps_per_execution_value == 1 or
self._inferred_steps is None or
self._inferred_steps - self._current_step >=
self._steps_per_execution_value)
if can_run_full_execution:
self._step_increment = self._steps_per_execution_value - 1
yield self._current_step
self._current_step += self._steps_per_execution_value
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(self._steps_per_execution_value)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _log_indefinite_training_warning(self):
logging.warning("The training loop will run indefinitely since you have "
"set `steps_per_epoch=-1`. Please use batch-level "
"callbacks to save checkpoints or log training progress, "
"etc")
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps == -1:
self._log_indefinite_training_warning()
return None
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
size = cardinality.cardinality(dataset)
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When passing an infinitely repeating dataset, please specify a "
"`steps_per_epoch` value so that epoch level "
"callbacks continue to work. The value can be arbitrary, or a number "
"that you think correctly defines the size of an epoch. "
"Epoch-level callbacks will then be called at this interval.")
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if self._steps_per_execution_value > 1 and self._inferred_steps is None:
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of steps "
"to run.")
| DataHandler |
python | encode__starlette | starlette/middleware/wsgi.py | {
"start": 2389,
"end": 2705
} | class ____:
def __init__(self, app: Callable[..., Any]) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
assert scope["type"] == "http"
responder = WSGIResponder(self.app, scope)
await responder(receive, send)
| WSGIMiddleware |
python | huggingface__transformers | src/transformers/models/dinov3_vit/modeling_dinov3_vit.py | {
"start": 6053,
"end": 10823
} | class ____(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.base = config.rope_theta
self.head_dim = config.hidden_size // config.num_attention_heads
self.num_patches_h = config.image_size // config.patch_size
self.num_patches_w = config.image_size // config.patch_size
inv_freq = 1 / self.base ** torch.arange(0, 1, 4 / self.head_dim, dtype=torch.float32) # (head_dim / 4,)
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
_, _, height, width = pixel_values.shape
num_patches_h = height // self.config.patch_size
num_patches_w = width // self.config.patch_size
device = pixel_values.device
device_type = device.type if isinstance(device.type, str) and device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
# Although we could precompute static patch_coords from image_size and patch_size in the config,
# the model was trained with random_scale, so it can process images of varying sizes.
# Therefore, it's better to compute patch_coords dynamically (with lru_cache).
patch_coords = get_patches_center_coordinates(
num_patches_h, num_patches_w, dtype=torch.float32, device=device
)
if self.training:
patch_coords = augment_patches_center_coordinates(
patch_coords,
shift=self.config.pos_embed_shift,
jitter=self.config.pos_embed_jitter,
rescale=self.config.pos_embed_rescale,
)
# (height * width, 2, head_dim / 4) -> (height * width, head_dim / 2) -> (height * width, head_dim)
angles = 2 * math.pi * patch_coords[:, :, None] * self.inv_freq[None, None, :]
angles = angles.flatten(1, 2)
angles = angles.tile(2)
cos = torch.cos(angles)
sin = torch.sin(angles)
dtype = pixel_values.dtype
return cos.to(dtype=dtype), sin.to(dtype=dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, **kwargs
) -> tuple[torch.Tensor, torch.Tensor]:
"""Applies Rotary Position Embedding to the query and key tensors, but only to the patch tokens,
ignoring the prefix tokens (cls token and register tokens).
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
num_tokens = q.shape[-2]
num_patches = sin.shape[-2]
num_prefix_tokens = num_tokens - num_patches # cls token + register tokens
q_prefix_tokens, q_patches = q.split((num_prefix_tokens, num_patches), dim=-2)
k_prefix_tokens, k_patches = k.split((num_prefix_tokens, num_patches), dim=-2)
# apply rope only to patch tokens
q_patches = (q_patches * cos) + (rotate_half(q_patches) * sin)
k_patches = (k_patches * cos) + (rotate_half(k_patches) * sin)
q = torch.cat((q_prefix_tokens, q_patches), dim=-2)
k = torch.cat((k_prefix_tokens, k_patches), dim=-2)
return q, k
| DINOv3ViTRopePositionEmbedding |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 355856,
"end": 359091
} | class ____(torch.nn.Module):
def forward(self, a, b1, b2, c):
a: "b8[]"; b1: "i64[1]"; b2: "i64[1]"; c: "f32[10]";
a, b1, b2, c, = fx_pytree.tree_flatten_spec(([a, b1, b2, c], {}), self._in_spec)
_guards_fn = self._guards_fn(a, b1, b2, c); _guards_fn = None
true_graph_0 = self.true_graph_0
false_graph_0 = self.false_graph_0
cond = torch.ops.higher_order.cond(a, true_graph_0, false_graph_0, (c, b1, b2)); a = true_graph_0 = false_graph_0 = c = b1 = b2 = None
getitem: "f32[10]" = cond[0]; cond = None
mul: "f32[10]" = torch.ops.aten.mul.Tensor(getitem, 2); getitem = None
return pytree.tree_unflatten((mul,), self._out_spec)
class true_graph_0(torch.nn.Module):
def forward(self, c: "f32[10]", b1: "i64[1]", b2: "i64[1]"):
item: "Sym(u0)" = torch.ops.aten.item.default(b1); b1 = None
mul: "f32[10]" = torch.ops.aten.mul.Tensor(c, item); c = item = None
return (mul,)
class false_graph_0(torch.nn.Module):
def forward(self, c: "f32[10]", b1: "i64[1]", b2: "i64[1]"):
item: "Sym(u1)" = torch.ops.aten.item.default(b2); b2 = None
mul: "f32[10]" = torch.ops.aten.mul.Tensor(c, item); c = item = None
return (mul,)
""", # noqa: B950
)
def test_cond_merge_graph_preserves_ph_meta(self):
class M(torch.nn.Module):
def forward(self, x, y, z):
a = y.shape[0]
b = z.shape[0]
def true_fn(x):
return x + a
def false_fn(x):
return x + b * z
return torch.cond(x.sum() > 5, true_fn, false_fn, (x,))
backend = EagerAndRecordGraphs()
_ = torch.compile(M(), backend=backend)(
torch.randn(3, 4), torch.randn(3, 4), torch.randn(3, 4)
)
self.assertEqual(len(backend.graphs), 1)
gm = backend.graphs[0]
subgraph_attr = gm.graph.find_nodes(op="get_attr")[0]
subgm = getattr(gm, subgraph_attr.target)
for ph in subgm.graph.find_nodes(op="placeholder"):
self.assertTrue("example_value" in ph.meta)
@skipIfTorchDynamo("Skip because dynamo cannot trace torch.export.")
def test_cond_symint_closure(self):
from torch.export import Dim
class M(torch.nn.Module):
def forward(self, x, y, z):
a = y.shape[0]
b = z.shape[0]
def true_fn(x):
return x + a
def false_fn(x):
return x + b * z
# When exporting with non-strict: a and b are symints,
# so torch.compile need to wrap and trace symint inputs.
return torch.cond(x.shape[0] > 5, true_fn, false_fn, (x,))
args = (torch.ones(3, 3), torch.ones(5), torch.ones(3, 3))
model = M()
dynamic_shapes = {"x": {0: Dim("d")}, "y": {0: Dim("d1")}, "z": {0: Dim("d")}}
non_strict_graph_str = self._check_export_ret_graph_str(
model, args, dynamic_shapes
)
self.assertExpectedInline(
non_strict_graph_str,
"""\
| GraphModule |
python | PyCQA__pylint | tests/functional/c/consider/consider_using_dict_items.py | {
"start": 1369,
"end": 3762
} | class ____:
c_dict = {}
# Should emit warning when iterating over a dict attribute of a class
for k5 in Foo.c_dict: # [consider-using-dict-items]
val = Foo.c_dict[k5]
c_dict = {}
# Should NOT emit warning whey key used to access a different dict
for k5 in Foo.c_dict: # This is fine
val = B_DICT[k5]
for k5 in Foo.c_dict: # This is fine
val = c_dict[k5]
# Should emit warning within a list/dict comprehension
val = {k9: B_DICT[k9] for k9 in B_DICT} # [consider-using-dict-items]
val = [(k7, B_DICT[k7]) for k7 in B_DICT] # [consider-using-dict-items]
# Should emit warning even when using dict attribute of a class within comprehension
val = [(k7, Foo.c_dict[k7]) for k7 in Foo.c_dict] # [consider-using-dict-items]
val = any(True for k8 in Foo.c_dict if Foo.c_dict[k8]) # [consider-using-dict-items]
# Should emit warning when dict access done in ``if`` portion of comprehension
val = any(True for k8 in B_DICT if B_DICT[k8]) # [consider-using-dict-items]
# Should NOT emit warning whey key used to access a different dict
val = [(k7, B_DICT[k7]) for k7 in Foo.c_dict]
val = any(True for k8 in Foo.c_dict if B_DICT[k8])
# Should NOT emit warning, essentially same check as above
val = [(k7, c_dict[k7]) for k7 in Foo.c_dict]
val = any(True for k8 in Foo.c_dict if c_dict[k8])
# Should emit warning, using .keys() of Foo.c_dict
val = any(True for k8 in Foo.c_dict.keys() if Foo.c_dict[k8]) # [consider-iterating-dictionary,consider-using-dict-items]
# Test false positive described in #4630
# (https://github.com/pylint-dev/pylint/issues/4630)
D = {"key": "value"}
for k in D: # this is fine, with the reassignment of d[k], d[k] is necessary
D[k] += "123"
if "1" in D[k]: # index lookup necessary here, do not emit error
print("found 1")
for k in D: # if this gets rewritten to d.items(), we are back to the above problem
D[k] = D[k] + 1
if "1" in D[k]: # index lookup necessary here, do not emit error
print("found 1")
for k in D: # [consider-using-dict-items]
if "1" in D[k]: # index lookup necessary here, do not emit error
print("found 1")
# False positive in issue #9554
# https://github.com/pylint-dev/pylint/issues/9554
for var in os.environ.keys(): # [consider-iterating-dictionary]
if var.startswith("foo_"):
del os.environ[var] # index lookup necessary here, do not emit error
| Foo |
python | explosion__spaCy | spacy/lang/ur/__init__.py | {
"start": 161,
"end": 374
} | class ____(BaseDefaults):
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
| UrduDefaults |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1250378,
"end": 1250623
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.disable_two_factor_requirement event."""
__schema__ = github_schema
__field_names__ = ()
| OrgDisableTwoFactorRequirementAuditEntry |
python | google__jax | jaxlib/setup.py | {
"start": 1546,
"end": 3625
} | class ____(Distribution):
"""This class makes 'bdist_wheel' include an ABI tag on the wheel."""
def has_ext_modules(self):
return True
setup(
name=project_name,
version=__version__,
cmdclass=_cmdclass,
description='XLA library for JAX',
long_description=_long_description,
long_description_content_type='text/markdown',
author='JAX team',
author_email='jax-dev@google.com',
packages=['jaxlib'],
python_requires='>=3.11',
install_requires=[
'scipy>=1.13',
'numpy>=2.0',
'ml_dtypes>=0.5.0',
],
url='https://github.com/jax-ml/jax',
license='Apache-2.0',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Programming Language :: Python :: Free Threading :: 3 - Stable",
],
package_data={
'jaxlib': [
'*.so',
'*.dylib',
'*.dll',
'*.pyd*',
'py.typed',
'cpu/*',
'cuda/*',
'cuda/nvvm/libdevice/libdevice*',
'mosaic/*.py',
'mosaic/dialect/gpu/*.py',
'mosaic/gpu/*.so',
'mosaic/python/*.py',
'mosaic/python/*.so',
'mlir/*.py',
'mlir/*.pyi',
'mlir/dialects/*.py',
'mlir/dialects/gpu/*.py',
'mlir/dialects/gpu/passes/*.py',
'mlir/extras/*.py',
'mlir/_mlir_libs/*.dll',
'mlir/_mlir_libs/*.dylib',
'mlir/_mlir_libs/*.so',
'mlir/_mlir_libs/*.pyd',
'mlir/_mlir_libs/*.py',
'mlir/_mlir_libs/*.pyi',
'rocm/*',
'triton/*.py',
'triton/*.pyi',
'triton/*.pyd',
'triton/*.so',
'include/xla/ffi/api/*.h',
],
},
zip_safe=False,
distclass=BinaryDistribution,
)
| BinaryDistribution |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 28293,
"end": 29657
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self, name: str, aws_key_id: str, aws_secret_key: str, aws_region_name: str, start_date: str
):
"""Airbyte Source for Aws Cloudtrail.
Documentation can be found at https://docs.airbyte.com/integrations/sources/aws-cloudtrail
Args:
name (str): The name of the destination.
aws_key_id (str): AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.
aws_secret_key (str): AWS CloudTrail Access Key ID. See the docs for more information on how to obtain this key.
aws_region_name (str): The default AWS Region to use, for example, us-west-1 or us-west-2. When specifying a Region inline during client initialization, this property is named region_name.
start_date (str): The date you would like to replicate data. Data in AWS CloudTrail is available for last 90 days only. Format: YYYY-MM-DD.
"""
self.aws_key_id = check.str_param(aws_key_id, "aws_key_id")
self.aws_secret_key = check.str_param(aws_secret_key, "aws_secret_key")
self.aws_region_name = check.str_param(aws_region_name, "aws_region_name")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Aws Cloudtrail", name)
| AwsCloudtrailSource |
python | django__django | tests/template_tests/test_custom.py | {
"start": 1437,
"end": 9950
} | class ____(TagTestCase):
def test_simple_tags(self):
c = Context({"value": 42})
templates = [
("{% load custom %}{% no_params %}", "no_params - Expected result"),
("{% load custom %}{% one_param 37 %}", "one_param - Expected result: 37"),
(
"{% load custom %}{% explicit_no_context 37 %}",
"explicit_no_context - Expected result: 37",
),
(
"{% load custom %}{% no_params_with_context %}",
"no_params_with_context - Expected result (context value: 42)",
),
(
"{% load custom %}{% params_and_context 37 %}",
"params_and_context - Expected result (context value: 42): 37",
),
(
"{% load custom %}{% simple_two_params 37 42 %}",
"simple_two_params - Expected result: 37, 42",
),
(
"{% load custom %}{% simple_keyword_only_param kwarg=37 %}",
"simple_keyword_only_param - Expected result: 37",
),
(
"{% load custom %}{% simple_keyword_only_default %}",
"simple_keyword_only_default - Expected result: 42",
),
(
"{% load custom %}{% simple_keyword_only_default kwarg=37 %}",
"simple_keyword_only_default - Expected result: 37",
),
(
"{% load custom %}{% simple_one_default 37 %}",
"simple_one_default - Expected result: 37, hi",
),
(
'{% load custom %}{% simple_one_default 37 two="hello" %}',
"simple_one_default - Expected result: 37, hello",
),
(
'{% load custom %}{% simple_one_default one=99 two="hello" %}',
"simple_one_default - Expected result: 99, hello",
),
(
"{% load custom %}{% simple_one_default 37 42 %}",
"simple_one_default - Expected result: 37, 42",
),
(
"{% load custom %}{% simple_unlimited_args 37 %}",
"simple_unlimited_args - Expected result: 37, hi",
),
(
"{% load custom %}{% simple_unlimited_args 37 42 56 89 %}",
"simple_unlimited_args - Expected result: 37, 42, 56, 89",
),
(
"{% load custom %}{% simple_only_unlimited_args %}",
"simple_only_unlimited_args - Expected result: ",
),
(
"{% load custom %}{% simple_only_unlimited_args 37 42 56 89 %}",
"simple_only_unlimited_args - Expected result: 37, 42, 56, 89",
),
(
"{% load custom %}"
'{% simple_unlimited_args_kwargs 37 40|add:2 56 eggs="scrambled" '
"four=1|add:3 %}",
"simple_unlimited_args_kwargs - Expected result: 37, 42, 56 / "
"eggs=scrambled, four=4",
),
]
for entry in templates:
t = self.engine.from_string(entry[0])
self.assertEqual(t.render(c), entry[1])
for entry in templates:
t = self.engine.from_string(
"%s as var %%}Result: {{ var }}" % entry[0][0:-2]
)
self.assertEqual(t.render(c), "Result: %s" % entry[1])
def test_simple_tag_errors(self):
errors = [
(
"'simple_one_default' received unexpected keyword argument 'three'",
'{% load custom %}{% simple_one_default 99 two="hello" three="foo" %}',
),
(
"'simple_two_params' received too many positional arguments",
"{% load custom %}{% simple_two_params 37 42 56 %}",
),
(
"'simple_one_default' received too many positional arguments",
"{% load custom %}{% simple_one_default 37 42 56 %}",
),
(
"'simple_keyword_only_param' did not receive value(s) for the "
"argument(s): 'kwarg'",
"{% load custom %}{% simple_keyword_only_param %}",
),
(
"'simple_keyword_only_param' received multiple values for "
"keyword argument 'kwarg'",
"{% load custom %}{% simple_keyword_only_param kwarg=42 kwarg=37 %}",
),
(
"'simple_keyword_only_default' received multiple values for "
"keyword argument 'kwarg'",
"{% load custom %}{% simple_keyword_only_default kwarg=42 "
"kwarg=37 %}",
),
(
"'simple_unlimited_args_kwargs' received some positional argument(s) "
"after some keyword argument(s)",
"{% load custom %}"
"{% simple_unlimited_args_kwargs 37 40|add:2 "
'eggs="scrambled" 56 four=1|add:3 %}',
),
(
"'simple_unlimited_args_kwargs' received multiple values for keyword "
"argument 'eggs'",
"{% load custom %}"
"{% simple_unlimited_args_kwargs 37 "
'eggs="scrambled" eggs="scrambled" %}',
),
]
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string(entry[1])
for entry in errors:
with self.assertRaisesMessage(TemplateSyntaxError, entry[0]):
self.engine.from_string("%s as var %%}" % entry[1][0:-2])
def test_simple_tag_escaping_autoescape_off(self):
c = Context({"name": "Jack & Jill"}, autoescape=False)
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_naive_escaping(self):
c = Context({"name": "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_naive %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_explicit_escaping(self):
# Check we don't double escape
c = Context({"name": "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_explicit %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_format_html_escaping(self):
# Check we don't double escape
c = Context({"name": "Jack & Jill"})
t = self.engine.from_string("{% load custom %}{% escape_format_html %}")
self.assertEqual(t.render(c), "Hello Jack & Jill!")
def test_simple_tag_registration(self):
# The decorators preserve the decorated function's docstring, name,
# and attributes.
self.verify_tag(custom.no_params, "no_params")
self.verify_tag(custom.one_param, "one_param")
self.verify_tag(custom.explicit_no_context, "explicit_no_context")
self.verify_tag(custom.no_params_with_context, "no_params_with_context")
self.verify_tag(custom.params_and_context, "params_and_context")
self.verify_tag(
custom.simple_unlimited_args_kwargs, "simple_unlimited_args_kwargs"
)
self.verify_tag(
custom.simple_tag_without_context_parameter,
"simple_tag_without_context_parameter",
)
def test_simple_tag_missing_context(self):
# The 'context' parameter must be present when takes_context is True
msg = (
"'simple_tag_without_context_parameter' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string(
"{% load custom %}{% simple_tag_without_context_parameter 123 %}"
)
def test_simple_tag_missing_context_no_params(self):
msg = (
"'simple_tag_takes_context_without_params' is decorated with "
"takes_context=True so it must have a first argument of 'context'"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.from_string(
"{% load custom %}{% simple_tag_takes_context_without_params %}"
)
| SimpleTagTests |
python | lxml__lxml | src/lxml/html/_difflib.py | {
"start": 1741,
"end": 29497
} | class ____:
"""
SequenceMatcher is a flexible class for comparing pairs of sequences of
any type, so long as the sequence elements are hashable. The basic
algorithm predates, and is a little fancier than, an algorithm
published in the late 1980's by Ratcliff and Obershelp under the
hyperbolic name "gestalt pattern matching". The basic idea is to find
the longest contiguous matching subsequence that contains no "junk"
elements (R-O doesn't address junk). The same idea is then applied
recursively to the pieces of the sequences to the left and to the right
of the matching subsequence. This does not yield minimal edit
sequences, but does tend to yield matches that "look right" to people.
SequenceMatcher tries to compute a "human-friendly diff" between two
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
longest *contiguous* & junk-free matching subsequence. That's what
catches peoples' eyes. The Windows(tm) windiff has another interesting
notion, pairing up elements that appear uniquely in each sequence.
That, and the method here, appear to yield more intuitive difference
reports than does diff. This method appears to be the least vulnerable
to syncing up on blocks of "junk lines", though (like blank lines in
ordinary text files, or maybe "<P>" lines in HTML files). That may be
because this is the only method of the 3 that has a *concept* of
"junk" <wink>.
Example, comparing two strings, and considering blanks to be "junk":
>>> s = SequenceMatcher(lambda x: x == " ",
... "private Thread currentThread;",
... "private volatile Thread currentThread;")
>>>
.ratio() returns a float in [0, 1], measuring the "similarity" of the
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
sequences are close matches:
>>> print(round(s.ratio(), 3))
0.866
>>>
If you're only interested in where the sequences match,
.get_matching_blocks() is handy:
>>> for block in s.get_matching_blocks():
... print("a[%d] and b[%d] match for %d elements" % block)
a[0] and b[0] match for 8 elements
a[8] and b[17] match for 21 elements
a[29] and b[38] match for 0 elements
Note that the last tuple returned by .get_matching_blocks() is always a
dummy, (len(a), len(b), 0), and this is the only case in which the last
tuple element (number of elements matched) is 0.
If you want to know how to change the first sequence into the second,
use .get_opcodes():
>>> for opcode in s.get_opcodes():
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
equal a[0:8] b[0:8]
insert a[8:8] b[8:17]
equal a[8:29] b[17:38]
See the Differ class for a fancy human-friendly file differencer, which
uses SequenceMatcher both to compare sequences of lines, and to compare
sequences of characters within similar (near-matching) lines.
See also function get_close_matches() in this module, which shows how
simple code building on SequenceMatcher can be used to do useful work.
Timing: Basic R-O is cubic time worst case and quadratic time expected
case. SequenceMatcher is quadratic time for the worst case and has
expected-case behavior dependent in a complicated way on how many
elements the sequences have in common; best case time is linear.
"""
def __init__(self, isjunk=None, a='', b='', autojunk=True):
"""Construct a SequenceMatcher.
Optional arg isjunk is None (the default), or a one-argument
function that takes a sequence element and returns true iff the
element is junk. None is equivalent to passing "lambda x: 0", i.e.
no elements are considered to be junk. For example, pass
lambda x: x in " \\t"
if you're comparing lines as sequences of characters, and don't
want to synch up on blanks or hard tabs.
Optional arg a is the first of two sequences to be compared. By
default, an empty string. The elements of a must be hashable. See
also .set_seqs() and .set_seq1().
Optional arg b is the second of two sequences to be compared. By
default, an empty string. The elements of b must be hashable. See
also .set_seqs() and .set_seq2().
Optional arg autojunk should be set to False to disable the
"automatic junk heuristic" that treats popular elements as junk
(see module documentation for more information).
"""
# Members:
# a
# first sequence
# b
# second sequence; differences are computed as "what do
# we need to do to 'a' to change it into 'b'?"
# b2j
# for x in b, b2j[x] is a list of the indices (into b)
# at which x appears; junk and popular elements do not appear
# fullbcount
# for x in b, fullbcount[x] == the number of times x
# appears in b; only materialized if really needed (used
# only for computing quick_ratio())
# matching_blocks
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
# ascending & non-overlapping in i and in j; terminated by
# a dummy (len(a), len(b), 0) sentinel
# opcodes
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
# one of
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
# 'delete' a[i1:i2] should be deleted
# 'insert' b[j1:j2] should be inserted
# 'equal' a[i1:i2] == b[j1:j2]
# isjunk
# a user-supplied function taking a sequence element and
# returning true iff the element is "junk" -- this has
# subtle but helpful effects on the algorithm, which I'll
# get around to writing up someday <0.9 wink>.
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
# bjunk
# the items in b for which isjunk is True.
# bpopular
# nonjunk items in b treated as junk by the heuristic (if used).
self.isjunk = isjunk
self.a = self.b = None
self.autojunk = autojunk
self.set_seqs(a, b)
def set_seqs(self, a, b):
"""Set the two sequences to be compared.
>>> s = SequenceMatcher()
>>> s.set_seqs("abcd", "bcde")
>>> s.ratio()
0.75
"""
self.set_seq1(a)
self.set_seq2(b)
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None
def set_seq2(self, b):
"""Set the second sequence to be compared.
The first sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq2("abcd")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq1().
"""
if b is self.b:
return
self.b = b
self.matching_blocks = self.opcodes = None
self.fullbcount = None
self.__chain_b()
# For each element x in b, set b2j[x] to a list of the indices in
# b where x appears; the indices are in increasing order; note that
# the number of times x appears in b is len(b2j[x]) ...
# when self.isjunk is defined, junk elements don't show up in this
# map at all, which stops the central find_longest_match method
# from starting any matching block at a junk element ...
# b2j also does not contain entries for "popular" elements, meaning
# elements that account for more than 1 + 1% of the total elements, and
# when the sequence is reasonably large (>= 200 elements); this can
# be viewed as an adaptive notion of semi-junk, and yields an enormous
# speedup when, e.g., comparing program files with hundreds of
# instances of "return NULL;" ...
# note that this is only called when b changes; so for cross-product
# kinds of matches, it's best to call set_seq2 once, then set_seq1
# repeatedly
def __chain_b(self):
# Because isjunk is a user-defined (not C) function, and we test
# for junk a LOT, it's important to minimize the number of calls.
# Before the tricks described here, __chain_b was by far the most
# time-consuming routine in the whole module! If anyone sees
# Jim Roskind, thank him again for profile.py -- I never would
# have guessed that.
# The first trick is to build b2j ignoring the possibility
# of junk. I.e., we don't call isjunk at all yet. Throwing
# out the junk later is much cheaper than building b2j "right"
# from the start.
b = self.b
self.b2j = b2j = {}
for i, elt in enumerate(b):
indices = b2j.setdefault(elt, [])
indices.append(i)
# Purge junk elements
self.bjunk = junk = set()
isjunk = self.isjunk
if isjunk:
for elt in b2j.keys():
if isjunk(elt):
junk.add(elt)
for elt in junk: # separate loop avoids separate list of keys
del b2j[elt]
# Purge popular elements that are not junk
self.bpopular = popular = set()
n = len(b)
if self.autojunk and n >= 200:
ntest = n // 100 + 1
for elt, idxs in b2j.items():
if len(idxs) > ntest:
popular.add(elt)
for elt in popular: # ditto; as fast for 1% deletion
del b2j[elt]
def find_longest_match(self, alo=0, ahi_=None, blo=0, bhi_=None):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
By default it will find the longest match in the entirety of a and b.
If isjunk is not defined:
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
alo <= i <= i+k <= ahi
blo <= j <= j+k <= bhi
and for all (i',j',k') meeting those conditions,
k >= k'
i <= i'
and if i == i', j <= j'
In other words, of all maximal matching blocks, return one that
starts earliest in a, and of all those maximal matching blocks that
start earliest in a, return the one that starts earliest in b.
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=0, b=4, size=5)
If isjunk is defined, first the longest matching block is
determined as above, but with the additional restriction that no
junk element appears in the block. Then that block is extended as
far as possible by matching (only) junk elements on both sides. So
the resulting block never matches on junk except as identical junk
happens to be adjacent to an "interesting" match.
Here's the same example as before, but considering blanks to be
junk. That prevents " abcd" from matching the " abcd" at the tail
end of the second sequence directly. Instead only the "abcd" can
match, and matches the leftmost "abcd" in the second sequence:
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
>>> s.find_longest_match(0, 5, 0, 9)
Match(a=1, b=0, size=4)
If no blocks match, return (alo, blo, 0).
>>> s = SequenceMatcher(None, "ab", "c")
>>> s.find_longest_match(0, 2, 0, 1)
Match(a=0, b=0, size=0)
"""
# CAUTION: stripping common prefix or suffix would be incorrect.
# E.g.,
# ab
# acab
# Longest matching block is "ab", but if common prefix is
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
# strip, so ends up claiming that ab is changed to acab by
# inserting "ca" in the middle. That's minimal but unintuitive:
# "it's obvious" that someone inserted "ac" at the front.
# Windiff ends up at the same place as diff, but by pairing up
# the unique 'b's and then matching the first two 'a's.
bjunk: set = self.bjunk
a, b, b2j = self.a, self.b, self.b2j
ahi = len(a) if ahi_ is None else ahi_
bhi = len(b) if bhi_ is None else bhi_
besti, bestj, bestsize = alo, blo, 0
# find longest junk-free match
# during an iteration of the loop, j2len[j] = length of longest
# junk-free match ending with a[i-1] and b[j]
j2len = {}
nothing = []
for i in range(alo, ahi):
# look at all instances of a[i] in b; note that because
# b2j has no junk keys, the loop is skipped if a[i] is junk
newj2len = {}
for j in b2j.get(a[i], nothing):
# a[i] matches b[j]
if j < blo:
continue
if j >= bhi:
break
k = newj2len[j] = j2len.get(j-1, 0) + 1
if k > bestsize:
besti, bestj, bestsize = i-k+1, j-k+1, k
j2len = newj2len
# Extend the best by non-junk elements on each end. In particular,
# "popular" non-junk elements aren't in b2j, which greatly speeds
# the inner loop above, but also means "the best" match so far
# doesn't contain any junk *or* popular non-junk elements.
while besti > alo and bestj > blo and \
b[bestj-1] not in bjunk and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
b[bestj+bestsize] not in bjunk and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize += 1
# Now that we have a wholly interesting match (albeit possibly
# empty!), we may as well suck up the matching junk on each
# side of it too. Can't think of a good reason not to, and it
# saves post-processing the (possibly considerable) expense of
# figuring out what to do with it. In the case of an empty
# interesting match, this is clearly the right thing to do,
# because no other kind of match is possible in the regions.
while besti > alo and bestj > blo and \
b[bestj-1] in bjunk and \
a[besti-1] == b[bestj-1]:
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
while besti+bestsize < ahi and bestj+bestsize < bhi and \
b[bestj+bestsize] in bjunk and \
a[besti+bestsize] == b[bestj+bestsize]:
bestsize = bestsize + 1
return Match(besti, bestj, bestsize)
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> list(s.get_matching_blocks())
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = list(map(Match._make, non_adjacent))
return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
tuple preceding it, and likewise for j1 == the previous j2.
The tags are strings, with these meanings:
'replace': a[i1:i2] should be replaced by b[j1:j2]
'delete': a[i1:i2] should be deleted.
Note that j1==j2 in this case.
'insert': b[j1:j2] should be inserted at a[i1:i1].
Note that i1==i2 in this case.
'equal': a[i1:i2] == b[j1:j2]
>>> a = "qabxcd"
>>> b = "abycdf"
>>> s = SequenceMatcher(None, a, b)
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
delete a[0:1] (q) b[0:0] ()
equal a[1:3] (ab) b[0:2] (ab)
replace a[3:4] (x) b[2:3] (y)
equal a[4:6] (cd) b[3:5] (cd)
insert a[6:6] () b[5:6] (f)
"""
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = answer = []
for ai, bj, size in self.get_matching_blocks():
# invariant: we've pumped out correct diffs to change
# a[:i] into b[:j], and the next matching block is
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
# out a diff to change a[i:ai] into b[j:bj], pump out
# the matching block, and move (i,j) beyond the match
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
answer.append( (tag, i, ai, j, bj) )
i, j = ai+size, bj+size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
answer.append( ('equal', ai, i, bj, j) )
return answer
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = list(map(str, range(1,40)))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
def ratio(self):
"""Return a measure of the sequences' similarity (float in [0,1]).
Where T is the total number of elements in both sequences, and
M is the number of matches, this is 2.0*M / T.
Note that this is 1 if the sequences are identical, and 0 if
they have nothing in common.
.ratio() is expensive to compute if you haven't already computed
.get_matching_blocks() or .get_opcodes(), in which case you may
want to try .quick_ratio() or .real_quick_ratio() first to get an
upper bound.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.quick_ratio()
0.75
>>> s.real_quick_ratio()
1.0
"""
matches: cython.Py_ssize_t
matches = sum(triple[-1] for triple in self.get_matching_blocks())
return _calculate_ratio(matches, len(self.a) + len(self.b))
def quick_ratio(self):
"""Return an upper bound on ratio() relatively quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute.
"""
# viewing a and b as multisets, set matches to the cardinality
# of their intersection; this counts the number of matches
# without regard to order, so is clearly an upper bound
if self.fullbcount is None:
self.fullbcount = fullbcount = {}
for elt in self.b:
fullbcount[elt] = fullbcount.get(elt, 0) + 1
fullbcount = self.fullbcount
# avail[x] is the number of times x appears in 'b' less the
# number of times we've seen it in 'a' so far ... kinda
avail = {}
matches: cython.Py_ssize_t
matches = 0
for elt in self.a:
if elt in avail:
numb = avail[elt]
else:
numb = fullbcount.get(elt, 0)
avail[elt] = numb - 1
if numb > 0:
matches = matches + 1
return _calculate_ratio(matches, len(self.a) + len(self.b))
def real_quick_ratio(self):
"""Return an upper bound on ratio() very quickly.
This isn't defined beyond that it is an upper bound on .ratio(), and
is faster to compute than either .ratio() or .quick_ratio().
"""
la, lb = len(self.a), len(self.b)
# can't have more matches than the number of elements in the
# shorter sequence
return _calculate_ratio(min(la, lb), la + lb)
if GenericAlias is not None:
__class_getitem__ = classmethod(GenericAlias)
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
"""Use SequenceMatcher to return list of the best "good enough" matches.
word is a sequence for which close matches are desired (typically a
string).
possibilities is a list of sequences against which to match word
(typically a list of strings).
Optional arg n (default 3) is the maximum number of close matches to
return. n must be > 0.
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
that don't score at least that similar to word are ignored.
The best (no more than n) matches among the possibilities are returned
in a list, sorted by similarity score, most similar first.
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
['apple', 'ape']
>>> import keyword as _keyword
>>> get_close_matches("wheel", _keyword.kwlist)
['while']
>>> get_close_matches("Apple", _keyword.kwlist)
[]
>>> get_close_matches("accept", _keyword.kwlist)
['except']
"""
if not n > 0:
raise ValueError("n must be > 0: %r" % (n,))
if not 0.0 <= cutoff <= 1.0:
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
result = []
s = SequenceMatcher()
s.set_seq2(word)
for x in possibilities:
s.set_seq1(x)
if s.real_quick_ratio() >= cutoff and \
s.quick_ratio() >= cutoff and \
s.ratio() >= cutoff:
result.append((s.ratio(), x))
# Move the best scorers to head of list
result = _nlargest(n, result)
# Strip scores for the best n matches
return [x for score, x in result]
def _keep_original_ws(s, tag_s):
"""Replace whitespace with the original whitespace characters in `s`"""
return ''.join(
c if tag_c == " " and c.isspace() else tag_c
for c, tag_c in zip(s, tag_s)
)
| SequenceMatcher |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 40764,
"end": 43258
} | class ____(TestReferrerOnRedirect):
"""
Origin When Cross-Origin policy sends the full URL as "Referer",
unless the target's origin is different (different domain, different protocol)
in which case only the origin is sent.
Redirections to a different origin should strip the "Referer"
to the parent origin.
"""
settings = {"REFERRER_POLICY": POLICY_ORIGIN_WHEN_CROSS_ORIGIN}
scenarii = [
(
"http://scrapytest.org/101", # origin
"http://scrapytest.org/102", # target + redirection
(
# redirections: code, URL
(301, "http://scrapytest.org/103"),
(301, "http://scrapytest.org/104"),
),
b"http://scrapytest.org/101", # expected initial referer
b"http://scrapytest.org/101", # expected referer for the redirection request
),
(
"https://scrapytest.org/201",
"https://scrapytest.org/202",
(
# redirecting to non-secure URL: send origin
(301, "http://scrapytest.org/203"),
),
b"https://scrapytest.org/201",
b"https://scrapytest.org/",
),
(
"https://scrapytest.org/301",
"https://scrapytest.org/302",
(
# redirecting to non-secure URL (different domain): send origin
(301, "http://example.com/303"),
),
b"https://scrapytest.org/301",
b"https://scrapytest.org/",
),
(
"http://scrapy.org/401",
"http://example.com/402",
((301, "http://scrapytest.org/403"),),
b"http://scrapy.org/",
b"http://scrapy.org/",
),
(
"https://scrapy.org/501",
"https://example.com/502",
(
# all different domains: send origin
(301, "https://google.com/503"),
(301, "https://facebook.com/504"),
),
b"https://scrapy.org/",
b"https://scrapy.org/",
),
(
"https://scrapytest.org/301",
"http://scrapytest.org/302", # TLS to non-TLS: send origin
((301, "https://scrapytest.org/303"),), # TLS URL again: send origin (also)
b"https://scrapytest.org/",
b"https://scrapytest.org/",
),
]
| TestReferrerOnRedirectOriginWhenCrossOrigin |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 14168,
"end": 15146
} | class ____(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
| ThreadedVSOCKSocketStreamTest |
python | django__django | tests/update_only_fields/models.py | {
"start": 856,
"end": 1016
} | class ____(models.Model):
name = models.CharField(max_length=200)
salary = models.FloatField(default=1000.0)
non_concrete = NonConcreteField()
| Profile |
python | pennersr__django-allauth | allauth/account/migrations/0008_emailaddress_unique_primary_email_fixup.py | {
"start": 1421,
"end": 1649
} | class ____(migrations.Migration):
dependencies = [
("account", "0007_emailaddress_idx_email"),
]
operations = [
migrations.RunPython(code=forwards, reverse_code=migrations.RunPython.noop)
]
| Migration |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-vertex/llama_index/embeddings/vertex/base.py | {
"start": 963,
"end": 3834
} | class ____(str, Enum):
"""
VertexAI embedding mode.
Attributes:
DEFAULT_MODE (str): The default embedding mode, for older models before August 2023,
that does not support task_type
CLASSIFICATION_MODE (str): Optimizes embeddings for classification tasks.
CLUSTERING_MODE (str): Optimizes embeddings for clustering tasks.
SEMANTIC_SIMILARITY_MODE (str): Optimizes embeddings for tasks that require assessments of semantic similarity.
RETRIEVAL_MODE (str): Optimizes embeddings for retrieval tasks, including search and document retrieval.
"""
DEFAULT_MODE = "default"
CLASSIFICATION_MODE = "classification"
CLUSTERING_MODE = "clustering"
SEMANTIC_SIMILARITY_MODE = "similarity"
RETRIEVAL_MODE = "retrieval"
_TEXT_EMBED_TASK_TYPE_MAPPING: Dict[VertexEmbeddingMode, str] = {
VertexEmbeddingMode.CLASSIFICATION_MODE: "CLASSIFICATION",
VertexEmbeddingMode.CLUSTERING_MODE: "CLUSTERING",
VertexEmbeddingMode.SEMANTIC_SIMILARITY_MODE: "SEMANTIC_SIMILARITY",
VertexEmbeddingMode.RETRIEVAL_MODE: "RETRIEVAL_DOCUMENT",
}
_QUERY_EMBED_TASK_TYPE_MAPPING: Dict[VertexEmbeddingMode, str] = {
VertexEmbeddingMode.CLASSIFICATION_MODE: "CLASSIFICATION",
VertexEmbeddingMode.CLUSTERING_MODE: "CLUSTERING",
VertexEmbeddingMode.SEMANTIC_SIMILARITY_MODE: "SEMANTIC_SIMILARITY",
VertexEmbeddingMode.RETRIEVAL_MODE: "RETRIEVAL_QUERY",
}
_UNSUPPORTED_TASK_TYPE_MODEL = {"textembedding-gecko@001"}
def init_vertexai(
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> None:
"""
Init vertexai.
Args:
project: The default GCP project to use when making Vertex API calls.
location: The default location to use when making API calls.
credentials: The default custom
credentials to use when making API calls. If not provided credentials
will be ascertained from the environment.
"""
vertexai.init(
project=project,
location=location,
credentials=credentials,
)
def _get_embedding_request(
texts: List[str], embed_mode: VertexEmbeddingMode, is_query: bool, model_name: str
) -> List[Union[str, TextEmbeddingInput]]:
if model_name in _UNSUPPORTED_TASK_TYPE_MODEL:
# omit the task_type but still return TextEmbeddingInput
texts = [TextEmbeddingInput(text=text) for text in texts]
elif embed_mode != VertexEmbeddingMode.DEFAULT_MODE:
mapping = (
_QUERY_EMBED_TASK_TYPE_MAPPING
if is_query
else _TEXT_EMBED_TASK_TYPE_MAPPING
)
texts = [
TextEmbeddingInput(text=text, task_type=mapping[embed_mode])
for text in texts
]
return texts
| VertexEmbeddingMode |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/plugins/test_listener.py | {
"start": 92056,
"end": 98730
} | class ____:
def setup_method(self):
date = timezone.datetime(2022, 1, 1)
self.dag = DAG(
"test_selective_enable",
schedule=None,
start_date=date,
)
def simple_callable(**kwargs):
return None
self.task_1 = PythonOperator(
task_id="test_task_selective_enable_1", dag=self.dag, python_callable=simple_callable
)
self.task_2 = PythonOperator(
task_id="test_task_selective_enable_2", dag=self.dag, python_callable=simple_callable
)
run_id = str(uuid.uuid1())
self.dagrun = self.dag.create_dagrun(
run_id=run_id,
data_interval=(date, date),
run_type=types.DagRunType.MANUAL,
state=DagRunState.QUEUED,
execution_date=date,
)
self.task_instance_1 = TaskInstance(self.task_1, run_id=run_id, map_index=-1)
self.task_instance_2 = TaskInstance(self.task_2, run_id=run_id, map_index=-1)
self.task_instance_1.dag_run = self.task_instance_2.dag_run = self.dagrun
def teardown_method(self):
clear_db_runs()
@pytest.mark.parametrize(
("selective_enable", "enable_dag", "expected_call_count"),
[
("True", True, 3),
("False", True, 3),
("True", False, 0),
("False", False, 3),
],
)
def test_listener_with_dag_enabled(self, selective_enable, enable_dag, expected_call_count):
"""Tests listener's behaviour with selective-enable on DAG level."""
if enable_dag:
enable_lineage(self.dag)
with conf_vars({("openlineage", "selective_enable"): selective_enable}):
listener = OpenLineageListener()
listener._executor = mock.Mock()
# run all three DagRun-related hooks
listener.on_dag_run_running(self.dagrun, msg="test running")
listener.on_dag_run_failed(self.dagrun, msg="test failure")
listener.on_dag_run_success(self.dagrun, msg="test success")
@pytest.mark.parametrize(
("selective_enable", "enable_task", "expected_dag_call_count", "expected_task_call_count"),
[
("True", True, 3, 3),
("False", True, 3, 3),
("True", False, 0, 0),
("False", False, 3, 3),
],
)
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_listener_with_task_enabled(
self, selective_enable, enable_task, expected_dag_call_count, expected_task_call_count
):
"""Tests listener's behaviour with selective-enable on task level."""
if enable_task:
enable_lineage(self.task_1)
on_task_failed_kwargs = {"error": ValueError("test")}
with conf_vars({("openlineage", "selective_enable"): selective_enable}):
listener = OpenLineageListener()
listener._executor = mock.Mock()
listener.extractor_manager = mock.Mock()
listener.adapter = mock.Mock()
# run all three DagRun-related hooks
listener.on_dag_run_running(self.dagrun, msg="test running")
listener.on_dag_run_failed(self.dagrun, msg="test failure")
listener.on_dag_run_success(self.dagrun, msg="test success")
assert expected_dag_call_count == listener._executor.submit.call_count
# run TaskInstance-related hooks for lineage enabled task
listener.on_task_instance_running(None, self.task_instance_1, None)
listener.on_task_instance_success(None, self.task_instance_1, None)
listener.on_task_instance_failed(
previous_state=None, task_instance=self.task_instance_1, **on_task_failed_kwargs, session=None
)
assert expected_task_call_count == listener.extractor_manager.extract_metadata.call_count
# run TaskInstance-related hooks for lineage disabled task
listener.on_task_instance_running(None, self.task_instance_2, None)
listener.on_task_instance_success(None, self.task_instance_2, None)
listener.on_task_instance_failed(
previous_state=None, task_instance=self.task_instance_2, **on_task_failed_kwargs, session=None
)
# with selective-enable disabled both task_1 and task_2 should trigger metadata extraction
if selective_enable == "False":
expected_task_call_count *= 2
assert expected_task_call_count == listener.extractor_manager.extract_metadata.call_count
@pytest.mark.parametrize(
("selective_enable", "enable_task", "expected_call_count", "expected_task_call_count"),
[
("True", True, 3, 3),
("False", True, 3, 3),
("True", False, 0, 0),
("False", False, 3, 3),
],
)
@mock.patch(
"airflow.providers.openlineage.plugins.listener.OpenLineageListener._execute", new=regular_call
)
def test_listener_with_dag_disabled_task_enabled(
self, selective_enable, enable_task, expected_call_count, expected_task_call_count
):
"""Tests listener's behaviour with selective-enable on task level with DAG disabled."""
disable_lineage(self.dag)
if enable_task:
enable_lineage(self.task_1)
on_task_failed_kwargs = {"error": ValueError("test")}
with conf_vars({("openlineage", "selective_enable"): selective_enable}):
listener = OpenLineageListener()
listener._executor = mock.Mock()
listener.extractor_manager = mock.Mock()
listener.adapter = mock.Mock()
# run all three DagRun-related hooks
listener.on_dag_run_running(self.dagrun, msg="test running")
listener.on_dag_run_failed(self.dagrun, msg="test failure")
listener.on_dag_run_success(self.dagrun, msg="test success")
# run TaskInstance-related hooks for lineage enabled task
listener.on_task_instance_running(None, self.task_instance_1, session=None)
listener.on_task_instance_success(None, self.task_instance_1, session=None)
listener.on_task_instance_failed(
previous_state=None, task_instance=self.task_instance_1, **on_task_failed_kwargs, session=None
)
assert expected_call_count == listener._executor.submit.call_count
assert expected_task_call_count == listener.extractor_manager.extract_metadata.call_count
| TestOpenLineageSelectiveEnableAirflow2 |
python | coleifer__peewee | tests/fields.py | {
"start": 33115,
"end": 33218
} | class ____(TestModel):
id = UUIDField(default=uuid.uuid4, primary_key=True)
name = TextField()
| UU1 |
python | google__python-fire | fire/decorators_test.py | {
"start": 1850,
"end": 2045
} | class ____:
@decorators.SetParseFn(str)
def example7(self, arg1, arg2=None, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
return arg1, arg2, varargs, kwargs
| WithVarArgs |
python | fluentpython__example-code-2e | 22-dyn-attr-prop/bulkfood/bulkfood_v1.py | {
"start": 683,
"end": 933
} | class ____:
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# end::LINEITEM_V1[]
| LineItem |
python | getsentry__sentry | src/sentry_plugins/github/plugin.py | {
"start": 10613,
"end": 17375
} | class ____(CorePluginMixin, RepositoryProvider):
name = "GitHub"
auth_provider = "github"
logger = logging.getLogger("sentry.plugins.github")
def message_from_error(self, exc: Exception) -> str:
return _message_from_error(exc)
def get_client(self, user: User | RpcUser | AnonymousUser) -> GithubPluginClient:
if not user.is_authenticated:
raise PluginError(API_ERRORS[401])
auth = self.get_auth(user=user)
if auth is None:
raise PluginError(API_ERRORS[401])
else:
return GithubPluginClient(auth=auth)
def get_config(self):
return [
{
"name": "name",
"label": "Repository Name",
"type": "text",
"placeholder": "e.g. getsentry/sentry",
"help": "Enter your repository name, including the owner.",
"required": True,
}
]
def validate_config(self, organization, config, actor=None):
"""
```
if config['foo'] and not config['bar']:
raise PluginError('You cannot configure foo with bar')
return config
```
"""
if config.get("name"):
try:
with self.get_client(actor) as client:
repo = client.get_repo(config["name"])
except Exception as e:
self.raise_error(e)
else:
config["external_id"] = str(repo["id"])
return config
def get_webhook_secret(self, organization):
lock = locks.get(
f"github:webhook-secret:{organization.id}", duration=60, name="github_webhook_secret"
)
with lock.acquire():
# TODO(dcramer): get_or_create would be a useful native solution
secret = OrganizationOption.objects.get_value(
organization=organization, key="github:webhook_secret"
)
if secret is None:
secret = uuid4().hex + uuid4().hex
OrganizationOption.objects.set_value(
organization=organization, key="github:webhook_secret", value=secret
)
return secret
def _build_webhook_config(self, organization):
return {
"name": "web",
"active": True,
"events": WEBHOOK_EVENTS,
"config": {
"url": absolute_uri(f"/plugins/github/organizations/{organization.id}/webhook/"),
"content_type": "json",
"secret": self.get_webhook_secret(organization),
},
}
def _create_webhook(self, client, organization, repo_name):
return client.create_hook(repo_name, self._build_webhook_config(organization))
def _update_webhook(self, client, organization, repo_name, webhook_id):
return client.update_hook(repo_name, webhook_id, self._build_webhook_config(organization))
def create_repository(self, organization, data, actor=None):
if actor is None:
raise NotImplementedError("Cannot create a repository anonymously")
with self.get_client(actor) as client:
try:
resp = self._create_webhook(client, organization, data["name"])
except Exception as e:
self.logger.exception(
"github.webhook.create-failure",
extra={
"organization_id": organization.id,
"repository": data["name"],
"status_code": getattr(e, "code", None),
},
)
self.raise_error(e)
else:
return {
"name": data["name"],
"external_id": data["external_id"],
"url": f"https://github.com/{data['name']}",
"config": {
"name": data["name"],
"webhook_id": resp["id"],
"webhook_events": resp["events"],
},
}
# TODO(dcramer): let's make this core functionality and move the actual database
# updates into Sentry core
def update_repository(self, repo, actor=None):
if actor is None:
raise NotImplementedError("Cannot update a repository anonymously")
org = Organization.objects.get(id=repo.organization_id)
webhook_id = repo.config.get("webhook_id")
with self.get_client(actor) as client:
if not webhook_id:
resp = self._create_webhook(client, org, repo.config["name"])
else:
resp = self._update_webhook(
client, org, repo.config["name"], repo.config["webhook_id"]
)
repo.config.update({"webhook_id": resp["id"], "webhook_events": resp["events"]})
repo.update(config=repo.config)
def delete_repository(self, repo, actor=None):
if actor is None:
raise NotImplementedError("Cannot delete a repository anonymously")
if "webhook_id" in repo.config:
try:
with self.get_client(actor) as client:
client.delete_hook(repo.config["name"], repo.config["webhook_id"])
except ApiError as exc:
if exc.code == 404:
return
raise
def _format_commits(self, repo, commit_list):
return [
{
"id": c["sha"],
"repository": repo.name,
"author_email": c["commit"]["author"].get("email"),
"author_name": c["commit"]["author"].get("name"),
"message": c["commit"]["message"],
}
for c in commit_list
]
def compare_commits(self, repo, start_sha, end_sha, actor=None):
if actor is None:
raise NotImplementedError("Cannot fetch commits anonymously")
# use config name because that is kept in sync via webhooks
name = repo.config["name"]
with self.get_client(actor) as client:
if start_sha is None:
try:
res = client.get_last_commits(name, end_sha)
except Exception as e:
self.raise_error(e)
else:
return self._format_commits(repo, res[:10])
else:
try:
res = client.compare_commits(name, start_sha, end_sha)
except Exception as e:
self.raise_error(e)
else:
return self._format_commits(repo, res["commits"])
| GitHubRepositoryProvider |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_simple.py | {
"start": 183,
"end": 218
} | class ____(int):
pass
| IntSubClass |
python | huggingface__transformers | src/transformers/models/bloom/modeling_bloom.py | {
"start": 6323,
"end": 6588
} | class ____(nn.Module):
"""
Partly copied from Megatron-DeepSpeed code and adapted for our needs
"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return GeLUFunction.apply(x)
| BloomGelu |
python | pytorch__pytorch | test/test_multiprocessing_spawn.py | {
"start": 8484,
"end": 9929
} | class ____(TestCase):
@unittest.skipIf(
sys.version_info >= (3, 13, 8),
"Python 3.13.8+ changed forkserver module caching behavior",
# https://docs.python.org/3.13/whatsnew/changelog.html
# gh-126631
)
def test_forkserver_perf(self):
start_method = 'forkserver'
expensive = Expensive()
nprocs = 4
orig_paralell_env_val = os.environ.get(mp.ENV_VAR_PARALLEL_START)
# test the non parallel case
os.environ[mp.ENV_VAR_PARALLEL_START] = "0"
start = time.perf_counter()
mp.start_processes(expensive.my_call, nprocs=nprocs, start_method=start_method)
elapsed = time.perf_counter() - start
# the elapsed time should be at least {nprocs}x the sleep time
self.assertGreaterEqual(elapsed, Expensive.SLEEP_SECS * nprocs)
# test the parallel case
os.environ[mp.ENV_VAR_PARALLEL_START] = "1"
start = time.perf_counter()
mp.start_processes(expensive.my_call, nprocs=nprocs, start_method=start_method)
elapsed = time.perf_counter() - start
# the elapsed time should be less than {nprocs}x the sleep time
self.assertLess(elapsed, Expensive.SLEEP_SECS * nprocs)
if orig_paralell_env_val is None:
del os.environ[mp.ENV_VAR_PARALLEL_START]
else:
os.environ[mp.ENV_VAR_PARALLEL_START] = orig_paralell_env_val
| ParallelForkServerPerfTest |
python | plotly__plotly.py | plotly/graph_objs/choroplethmap/_hoverlabel.py | {
"start": 233,
"end": 11283
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmap"
_path_str = "choroplethmap.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.choroplethmap.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.choroplethmap.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmap.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmap.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmap.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | doocs__leetcode | solution/0700-0799/0792.Number of Matching Subsequences/Solution3.py | {
"start": 0,
"end": 445
} | class ____:
def numMatchingSubseq(self, s: str, words: List[str]) -> int:
def check(w):
i = -1
for c in w:
j = bisect_right(d[c], i)
if j == len(d[c]):
return False
i = d[c][j]
return True
d = defaultdict(list)
for i, c in enumerate(s):
d[c].append(i)
return sum(check(w) for w in words)
| Solution |
python | viewflow__viewflow | viewflow/workflow/flow/views/list.py | {
"start": 1490,
"end": 2625
} | class ____(
mixins.StoreRequestPathMixin,
mixins.ProcessViewTemplateNames,
ListModelView,
):
"""List of current user available tasks of a flow"""
columns = ("task_id", "task_title", "brief", "created")
filterset_class = filters.FlowUserTaskListFilter
flow_class = None
template_filename = "process_tasks_list.html"
title = _("Queue")
def task_id(self, task):
task_url = task.flow_task.reverse("index", args=[task.process_id, task.pk])
return mark_safe(f'<a href="{task_url}">#{task.process_id}/{task.pk}</a>')
task_id.short_description = _("#")
def task_title(self, obj):
return obj.title
task_title.short_description = _("Task")
@property
def model(self):
return self.flow_class.task_class
@viewprop
def queryset(self):
"""List of tasks available to the current user."""
queryset = self.model._default_manager.all()
return (
queryset.user_queue(self.request.user, flow_class=self.flow_class)
.filter(status=STATUS.NEW)
.order_by("-created")
)
| FlowQueueListView |
python | pydantic__pydantic | pydantic/_internal/_fields.py | {
"start": 1318,
"end": 1528
} | class ____(Representation):
"""Base class for annotation markers like `Strict`."""
__slots__ = ()
@dataclasses.dataclass(**slots_true) # TODO: make kw_only when we drop support for 3.9.
| PydanticMetadata |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 20176,
"end": 20344
} | class ____(models.Model):
date = models.DateField()
text = models.CharField(max_length=10)
class Meta:
ordering = ['-date']
| DjangoFilterOrderingModel |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 9497,
"end": 9891
} | class ____(_FusedModule):
r"""This is a sequential container which calls the Conv2d modules with extra Add.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, add):
super().__init__(conv)
self.add = add
def forward(self, x1, x2): # type: ignore[override]
return self.add(self[0](x1), x2)
| ConvAdd2d |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_init.py | {
"start": 19126,
"end": 21083
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 4
@skip_if_lt_x_gpu(1)
def test_shard_dtensor_parameters(self):
dp_size = 2 if self.world_size > 2 else 1
global_mesh = init_device_mesh(
device_type.type,
(dp_size, self.world_size // dp_size),
mesh_dim_names=("dp", "tp"),
)
dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
# Use odd dim sizes to test uneven shards
model = MLP(9, dim_multiplier=3)
orig_params = [param.detach().clone() for param in model.parameters()]
orig_param_names = [param_name for param_name, _ in model.named_parameters()]
parallelize_module(
model,
tp_mesh,
{"in_proj": ColwiseParallel(), "out_proj": RowwiseParallel()},
)
fully_shard(model, mesh=dp_mesh)
sharded_params = list(model.parameters())
self.assertEqual(len(orig_params), len(sharded_params))
for orig_param_name, orig_param, sharded_param in zip(
orig_param_names, orig_params, sharded_params
):
self.assertIsInstance(sharded_param, DTensor)
self.assertEqual(sharded_param.device_mesh, global_mesh)
self.assertEqual(sharded_param.size(), orig_param.size())
self.assertEqual(sharded_param.stride(), orig_param.stride())
if "in_proj" in orig_param_name:
expected_placements = (
_StridedShard(0, split_factor=tp_mesh.size()),
Shard(0),
)
elif "out_proj" in orig_param_name and "weight" in orig_param_name:
expected_placements = (Shard(0), Shard(1))
else:
expected_placements = (Shard(0), Replicate())
self.assertEqual(sharded_param._spec.placements, expected_placements)
| TestFullyShardShardedParameterDTensor |
python | doocs__leetcode | solution/2400-2499/2460.Apply Operations to an Array/Solution.py | {
"start": 0,
"end": 377
} | class ____:
def applyOperations(self, nums: List[int]) -> List[int]:
n = len(nums)
for i in range(n - 1):
if nums[i] == nums[i + 1]:
nums[i] <<= 1
nums[i + 1] = 0
ans = [0] * n
i = 0
for x in nums:
if x:
ans[i] = x
i += 1
return ans
| Solution |
python | optuna__optuna | optuna/artifacts/_gcs.py | {
"start": 388,
"end": 2812
} | class ____:
"""An artifact backend for Google Cloud Storage (GCS).
Args:
bucket_name:
The name of the bucket to store artifacts.
client:
A google-cloud-storage ``Client`` to use for storage operations. If not specified, a
new client will be created with default settings.
Example:
.. code-block:: python
import optuna
from optuna.artifacts import GCSArtifactStore, upload_artifact
artifact_backend = GCSArtifactStore("my-bucket")
def objective(trial: optuna.Trial) -> float:
... = trial.suggest_float("x", -10, 10)
file_path = generate_example(...)
upload_artifact(
artifact_store=artifact_store,
file_path=file_path,
study_or_trial=trial,
)
return ...
Before running this code, you will have to install ``gcloud`` and run
.. code-block:: bash
gcloud auth application-default login
so that the Cloud Storage library can automatically find the credential.
"""
def __init__(
self,
bucket_name: str,
client: google.cloud.storage.Client | None = None,
) -> None:
_imports.check()
self.bucket_name = bucket_name
self.client = client or google.cloud.storage.Client()
self.bucket_obj = self.client.bucket(bucket_name)
def open_reader(self, artifact_id: str) -> "BinaryIO":
blob = self.bucket_obj.get_blob(artifact_id)
if blob is None:
raise ArtifactNotFound(
f"Artifact storage with bucket: {self.bucket_name}, artifact_id: {artifact_id} was"
" not found"
)
body = blob.download_as_bytes()
return BytesIO(body)
def write(self, artifact_id: str, content_body: "BinaryIO") -> None:
blob = self.bucket_obj.blob(artifact_id)
data = content_body.read()
blob.upload_from_string(data)
def remove(self, artifact_id: str) -> None:
self.bucket_obj.delete_blob(artifact_id)
if TYPE_CHECKING:
# A mypy-runtime assertion to ensure that GCS3ArtifactStore implements all abstract methods
# in ArtifactStore.
from optuna.artifacts._protocol import ArtifactStore
_: ArtifactStore = GCSArtifactStore("")
| GCSArtifactStore |
python | bokeh__bokeh | tests/unit/bokeh/test_objects.py | {
"start": 4991,
"end": 10058
} | class ____:
def setup_method(self) -> None:
self.maxDiff = None
def test_init(self) -> None:
obj = SomeModel.__new__(SomeModel, id=ID("test_id"))
Model.__init__(obj)
assert obj.id == "test_id"
testObject2 = SomeModel()
assert testObject2.id is not None
assert set(obj.properties()) == {
"name",
"tags",
"js_property_callbacks",
"js_event_callbacks",
"subscribed_events",
"syncable",
"some",
}
assert obj.properties_with_values(include_defaults=True) == dict(
name=None,
tags=[],
js_property_callbacks={},
js_event_callbacks={},
subscribed_events=set(),
syncable=True,
some=0,
)
assert obj.properties_with_values(include_defaults=False) == {}
def test_references_by_ref_by_value(self) -> None:
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Instance, Int
class T(SomeModel):
t = Int(0)
class Y(SomeModel):
t1 = Instance(T)
class Z1(HasProps):
t2 = Instance(T)
class Z2(SomeModel):
t2 = Instance(T)
class X1(SomeModel):
y = Instance(Y)
z1 = Instance(Z1)
class X2(SomeModel):
y = Instance(Y)
z2 = Instance(Z2)
t1, t2 = T(t=1), T(t=2)
y = Y(t1=t1)
z1, z2 = Z1(t2=t2), Z2(t2=t2)
x1 = X1(y=y, z1=z1)
x2 = X2(y=y, z2=z2)
assert x1.references() == {t1, y, t2, x1}
assert x2.references() == {t1, y, t2, z2, x2}
def test_references_in_containers(self) -> None:
from bokeh.core.properties import (
Dict,
Instance,
Int,
List,
String,
Tuple,
)
# XXX: can't use Y, because of:
#
# Warning: Duplicate __view_model__ declaration of 'Y' for class Y.
# Previous definition: <class 'bokeh.tests.test_objects.Y'>
class U(SomeModel):
a = Int()
class V(SomeModel):
u1 = Instance(U)
u2 = List(Instance(U))
u3 = Tuple(Int, Instance(U))
u4 = Dict(String, Instance(U))
u5 = Dict(String, List(Instance(U)))
u1, u2, u3, u4, u5 = U(a=1), U(a=2), U(a=3), U(a=4), U(a=5)
v = V(u1=u1, u2=[u2], u3=(3, u3), u4={"4": u4}, u5={"5": [u5]})
assert v.references() == {v, u1, u2, u3, u4, u5}
def test_list_default(self) -> None:
class HasListDefault(Model):
value = List(String, default=["hello"])
obj = HasListDefault()
assert obj.value == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value.append("world")
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
def test_dict_default(self) -> None:
class HasDictDefault(Model):
value = Dict(String, Int, default=dict(hello=42))
obj = HasDictDefault()
assert obj.value == obj.value
assert dict(hello=42) == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value['world'] = 57
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
assert dict(hello=42, world=57) == obj.value
def test_func_default_with_counter(self) -> None:
counter = 0
def next_value() -> int:
nonlocal counter
counter += 1
return counter
class HasFuncDefaultInt(Model):
value = Int(default=next_value)
obj1 = HasFuncDefaultInt()
obj2 = HasFuncDefaultInt()
assert counter == 2
assert obj2.value == obj1.value + 1
# 'value' is a default, but it gets included as a
# non-default because it's unstable.
assert 'value' in obj1.properties_with_values(include_defaults=False)
def test_func_default_with_model(self) -> None:
class HasFuncDefaultModel(Model):
child = Instance(Model, lambda: SomeModel())
obj1 = HasFuncDefaultModel()
obj2 = HasFuncDefaultModel()
assert obj1.child.id != obj2.child.id
# 'child' is a default, but it gets included as a
# non-default because it's unstable.
assert 'child' in obj1.properties_with_values(include_defaults=False)
| TestModel |
python | redis__redis-py | redis/retry.py | {
"start": 383,
"end": 1871
} | class ____(Generic[E], abc.ABC):
"""Retry a specific number of times after a failure"""
_supported_errors: Tuple[Type[E], ...]
def __init__(
self,
backoff: "AbstractBackoff",
retries: int,
supported_errors: Tuple[Type[E], ...],
):
"""
Initialize a `Retry` object with a `Backoff` object
that retries a maximum of `retries` times.
`retries` can be negative to retry forever.
You can specify the types of supported errors which trigger
a retry with the `supported_errors` parameter.
"""
self._backoff = backoff
self._retries = retries
self._supported_errors = supported_errors
@abc.abstractmethod
def __eq__(self, other: Any) -> bool:
return NotImplemented
def __hash__(self) -> int:
return hash((self._backoff, self._retries, frozenset(self._supported_errors)))
def update_supported_errors(self, specified_errors: Iterable[Type[E]]) -> None:
"""
Updates the supported errors with the specified error types
"""
self._supported_errors = tuple(
set(self._supported_errors + tuple(specified_errors))
)
def get_retries(self) -> int:
"""
Get the number of retries.
"""
return self._retries
def update_retries(self, value: int) -> None:
"""
Set the number of retries.
"""
self._retries = value
| AbstractRetry |
python | huggingface__transformers | src/transformers/models/aimv2/modular_aimv2.py | {
"start": 15563,
"end": 15621
} | class ____(CLIPTextEmbeddings):
pass
| Aimv2TextEmbeddings |
python | spack__spack | lib/spack/spack/llnl/util/link_tree.py | {
"start": 14892,
"end": 20324
} | class ____:
"""Class to create trees of symbolic links from a source directory.
LinkTree objects are constructed with a source root. Their
methods allow you to create and delete trees of symbolic links
back to the source tree in specific destination directories.
Trees comprise symlinks only to files; directories are never
symlinked to, to prevent the source directory from ever being
modified.
"""
def __init__(self, source_root):
if not os.path.exists(source_root):
raise OSError("No such file or directory: '%s'", source_root)
self._root = source_root
def find_conflict(self, dest_root, ignore=None, ignore_file_conflicts=False):
"""Returns the first file in dest that conflicts with src"""
ignore = ignore or (lambda x: False)
conflicts = self.find_dir_conflicts(dest_root, ignore)
if not ignore_file_conflicts:
conflicts.extend(
dst
for src, dst in self.get_file_map(dest_root, ignore).items()
if os.path.exists(dst)
)
if conflicts:
return conflicts[0]
def find_dir_conflicts(self, dest_root, ignore):
conflicts = []
kwargs = {"follow_nonexisting": False, "ignore": ignore}
for src, dest in fs.traverse_tree(self._root, dest_root, **kwargs):
if os.path.isdir(src):
if os.path.exists(dest) and not os.path.isdir(dest):
conflicts.append("File blocks directory: %s" % dest)
elif os.path.exists(dest) and os.path.isdir(dest):
conflicts.append("Directory blocks directory: %s" % dest)
return conflicts
def get_file_map(self, dest_root, ignore):
merge_map = {}
kwargs = {"follow_nonexisting": True, "ignore": ignore}
for src, dest in fs.traverse_tree(self._root, dest_root, **kwargs):
if not os.path.isdir(src):
merge_map[src] = dest
return merge_map
def merge_directories(self, dest_root, ignore):
for src, dest in fs.traverse_tree(self._root, dest_root, ignore=ignore):
if os.path.isdir(src):
if not os.path.exists(dest):
fs.mkdirp(dest)
continue
if not os.path.isdir(dest):
raise ValueError("File blocks directory: %s" % dest)
# mark empty directories so they aren't removed on unmerge.
if not os.listdir(dest):
marker = os.path.join(dest, empty_file_name)
fs.touch(marker)
def unmerge_directories(self, dest_root, ignore):
for src, dest in fs.traverse_tree(self._root, dest_root, ignore=ignore, order="post"):
if os.path.isdir(src):
if not os.path.exists(dest):
continue
elif not os.path.isdir(dest):
raise ValueError("File blocks directory: %s" % dest)
# remove directory if it is empty.
if not os.listdir(dest):
shutil.rmtree(dest, ignore_errors=True)
# remove empty dir marker if present.
marker = os.path.join(dest, empty_file_name)
if os.path.exists(marker):
os.remove(marker)
def merge(
self,
dest_root,
ignore_conflicts: bool = False,
ignore: Optional[Callable[[str], bool]] = None,
link: Callable = fs.symlink,
relative: bool = False,
):
"""Link all files in src into dest, creating directories if necessary.
Arguments:
ignore_conflicts: if True, do not break when the target exists; return a list of files
that could not be linked
ignore: callable that returns True if a file is to be ignored in the merge (by default
ignore nothing)
link: function to create links with (defaults to
``spack.llnl.util.filesystem.symlink``)
relative: create all symlinks relative to the target (default False)
"""
if ignore is None:
ignore = lambda x: False
conflict = self.find_conflict(
dest_root, ignore=ignore, ignore_file_conflicts=ignore_conflicts
)
if conflict:
raise SingleMergeConflictError(conflict)
self.merge_directories(dest_root, ignore)
existing = []
for src, dst in self.get_file_map(dest_root, ignore).items():
if os.path.exists(dst):
existing.append(dst)
elif relative:
abs_src = os.path.abspath(src)
dst_dir = os.path.dirname(os.path.abspath(dst))
rel = os.path.relpath(abs_src, dst_dir)
link(rel, dst)
else:
link(src, dst)
for c in existing:
tty.warn("Could not merge: %s" % c)
def unmerge(self, dest_root, ignore=None, remove_file=remove_link):
"""Unlink all files in dest that exist in src.
Unlinks directories in dest if they are empty.
"""
if ignore is None:
ignore = lambda x: False
for src, dst in self.get_file_map(dest_root, ignore).items():
remove_file(src, dst)
self.unmerge_directories(dest_root, ignore)
| LinkTree |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 80579,
"end": 86516
} | class ____:
"""Use this factory class to generate the correct object for use when using the `collections.create()` method. E.g., `.multi_tenancy()` will return a `MultiTenancyConfigCreate` object to be used in the `multi_tenancy_config` argument.
Each class method provides options specific to the named configuration type in the function's name. Under-the-hood data validation steps
will ensure that any mis-specifications are caught before the request is sent to Weaviate.
"""
Generative = _Generative
Reranker = _Reranker
Vectorizer = _Vectorizer
VectorIndex = _VectorIndex
NamedVectors = _NamedVectors
Vectors = _Vectors
MultiVectors = _MultiVectors
@staticmethod
def inverted_index(
bm25_b: Optional[float] = None,
bm25_k1: Optional[float] = None,
cleanup_interval_seconds: Optional[int] = None,
index_timestamps: Optional[bool] = None,
index_property_length: Optional[bool] = None,
index_null_state: Optional[bool] = None,
stopwords_preset: Optional[StopwordsPreset] = None,
stopwords_additions: Optional[List[str]] = None,
stopwords_removals: Optional[List[str]] = None,
) -> _InvertedIndexConfigCreate:
"""Create an `InvertedIndexConfigCreate` object to be used when defining the configuration of the keyword searching algorithm of Weaviate.
Args:
See [the docs](https://weaviate.io/developers/weaviate/configuration/indexes#configure-the-inverted-index) for details!
""" # noqa: D417 (missing argument descriptions in the docstring)
if bm25_b is None and bm25_k1 is not None or bm25_k1 is None and bm25_b is not None:
raise ValueError("bm25_b and bm25_k1 must be specified together")
return _InvertedIndexConfigCreate(
bm25=(
_BM25ConfigCreate(b=bm25_b, k1=bm25_k1)
if bm25_b is not None and bm25_k1 is not None
else None
),
cleanupIntervalSeconds=cleanup_interval_seconds,
indexTimestamps=index_timestamps,
indexPropertyLength=index_property_length,
indexNullState=index_null_state,
stopwords=_StopwordsCreate(
preset=stopwords_preset,
additions=stopwords_additions,
removals=stopwords_removals,
),
)
@staticmethod
def multi_tenancy(
enabled: bool = True,
auto_tenant_creation: Optional[bool] = None,
auto_tenant_activation: Optional[bool] = None,
) -> _MultiTenancyConfigCreate:
"""Create a `MultiTenancyConfigCreate` object to be used when defining the multi-tenancy configuration of Weaviate.
Args:
enabled: Whether multi-tenancy is enabled. Defaults to `True`.
auto_tenant_creation: Automatically create nonexistent tenants during object creation. Defaults to `None`, which uses the server-defined default.
auto_tenant_activation: Automatically turn tenants implicitly HOT when they are accessed. Defaults to `None`, which uses the server-defined default.
"""
return _MultiTenancyConfigCreate(
enabled=enabled,
autoTenantCreation=auto_tenant_creation,
autoTenantActivation=auto_tenant_activation,
)
@staticmethod
def replication(
factor: Optional[int] = None,
async_enabled: Optional[bool] = None,
deletion_strategy: Optional[ReplicationDeletionStrategy] = None,
) -> _ReplicationConfigCreate:
"""Create a `ReplicationConfigCreate` object to be used when defining the replication configuration of Weaviate.
NOTE: `async_enabled` is only available with WeaviateDB `>=v1.26.0`
Args:
factor: The replication factor.
async_enabled: Enabled async replication.
deletion_strategy: How conflicts between different nodes about deleted objects are resolved.
"""
return _ReplicationConfigCreate(
factor=factor,
asyncEnabled=async_enabled,
deletionStrategy=deletion_strategy,
)
@staticmethod
def sharding(
virtual_per_physical: Optional[int] = None,
desired_count: Optional[int] = None,
actual_count: Optional[int] = None,
desired_virtual_count: Optional[int] = None,
actual_virtual_count: Optional[int] = None,
) -> _ShardingConfigCreate:
"""Create a `ShardingConfigCreate` object to be used when defining the sharding configuration of Weaviate.
NOTE: You can only use one of Sharding or Replication, not both.
See [the docs](https://weaviate.io/developers/weaviate/concepts/replication-architecture#replication-vs-sharding) for more details.
Args:
virtual_per_physical: The number of virtual shards per physical shard.
desired_count: The desired number of physical shards.
actual_count: The actual number of physical shards. This is a read-only field so has no effect.
It is kept for backwards compatibility but will be removed in a future release.
desired_virtual_count: The desired number of virtual shards.
actual_virtual_count: The actual number of virtual shards. This is a read-only field so has no effect.
It is kept for backwards compatibility but will be removed in a future release.
"""
if actual_count is not None:
_Warnings.sharding_actual_count_is_deprecated("actual_count")
if actual_virtual_count is not None:
_Warnings.sharding_actual_count_is_deprecated("actual_virtual_count")
return _ShardingConfigCreate(
virtualPerPhysical=virtual_per_physical,
desiredCount=desired_count,
desiredVirtualCount=desired_virtual_count,
)
| Configure |
python | pyca__cryptography | src/cryptography/x509/ocsp.py | {
"start": 6568,
"end": 12699
} | class ____:
def __init__(
self,
response: _SingleResponse | None = None,
responder_id: tuple[x509.Certificate, OCSPResponderEncoding]
| None = None,
certs: list[x509.Certificate] | None = None,
extensions: list[x509.Extension[x509.ExtensionType]] = [],
):
self._response = response
self._responder_id = responder_id
self._certs = certs
self._extensions = extensions
def add_response(
self,
cert: x509.Certificate,
issuer: x509.Certificate,
algorithm: hashes.HashAlgorithm,
cert_status: OCSPCertStatus,
this_update: datetime.datetime,
next_update: datetime.datetime | None,
revocation_time: datetime.datetime | None,
revocation_reason: x509.ReasonFlags | None,
) -> OCSPResponseBuilder:
if self._response is not None:
raise ValueError("Only one response per OCSPResponse.")
if not isinstance(cert, x509.Certificate) or not isinstance(
issuer, x509.Certificate
):
raise TypeError("cert and issuer must be a Certificate")
singleresp = _SingleResponse(
(cert, issuer),
None,
algorithm,
cert_status,
this_update,
next_update,
revocation_time,
revocation_reason,
)
return OCSPResponseBuilder(
singleresp,
self._responder_id,
self._certs,
self._extensions,
)
def add_response_by_hash(
self,
issuer_name_hash: bytes,
issuer_key_hash: bytes,
serial_number: int,
algorithm: hashes.HashAlgorithm,
cert_status: OCSPCertStatus,
this_update: datetime.datetime,
next_update: datetime.datetime | None,
revocation_time: datetime.datetime | None,
revocation_reason: x509.ReasonFlags | None,
) -> OCSPResponseBuilder:
if self._response is not None:
raise ValueError("Only one response per OCSPResponse.")
if not isinstance(serial_number, int):
raise TypeError("serial_number must be an integer")
utils._check_bytes("issuer_name_hash", issuer_name_hash)
utils._check_bytes("issuer_key_hash", issuer_key_hash)
_verify_algorithm(algorithm)
if algorithm.digest_size != len(
issuer_name_hash
) or algorithm.digest_size != len(issuer_key_hash):
raise ValueError(
"issuer_name_hash and issuer_key_hash must be the same length "
"as the digest size of the algorithm"
)
singleresp = _SingleResponse(
None,
(issuer_name_hash, issuer_key_hash, serial_number),
algorithm,
cert_status,
this_update,
next_update,
revocation_time,
revocation_reason,
)
return OCSPResponseBuilder(
singleresp,
self._responder_id,
self._certs,
self._extensions,
)
def responder_id(
self, encoding: OCSPResponderEncoding, responder_cert: x509.Certificate
) -> OCSPResponseBuilder:
if self._responder_id is not None:
raise ValueError("responder_id can only be set once")
if not isinstance(responder_cert, x509.Certificate):
raise TypeError("responder_cert must be a Certificate")
if not isinstance(encoding, OCSPResponderEncoding):
raise TypeError(
"encoding must be an element from OCSPResponderEncoding"
)
return OCSPResponseBuilder(
self._response,
(responder_cert, encoding),
self._certs,
self._extensions,
)
def certificates(
self, certs: Iterable[x509.Certificate]
) -> OCSPResponseBuilder:
if self._certs is not None:
raise ValueError("certificates may only be set once")
certs = list(certs)
if len(certs) == 0:
raise ValueError("certs must not be an empty list")
if not all(isinstance(x, x509.Certificate) for x in certs):
raise TypeError("certs must be a list of Certificates")
return OCSPResponseBuilder(
self._response,
self._responder_id,
certs,
self._extensions,
)
def add_extension(
self, extval: x509.ExtensionType, critical: bool
) -> OCSPResponseBuilder:
if not isinstance(extval, x509.ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = x509.Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return OCSPResponseBuilder(
self._response,
self._responder_id,
self._certs,
[*self._extensions, extension],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: hashes.HashAlgorithm | None,
) -> OCSPResponse:
if self._response is None:
raise ValueError("You must add a response before signing")
if self._responder_id is None:
raise ValueError("You must add a responder_id before signing")
return ocsp.create_ocsp_response(
OCSPResponseStatus.SUCCESSFUL, self, private_key, algorithm
)
@classmethod
def build_unsuccessful(
cls, response_status: OCSPResponseStatus
) -> OCSPResponse:
if not isinstance(response_status, OCSPResponseStatus):
raise TypeError(
"response_status must be an item from OCSPResponseStatus"
)
if response_status is OCSPResponseStatus.SUCCESSFUL:
raise ValueError("response_status cannot be SUCCESSFUL")
return ocsp.create_ocsp_response(response_status, None, None, None)
load_der_ocsp_request = ocsp.load_der_ocsp_request
load_der_ocsp_response = ocsp.load_der_ocsp_response
| OCSPResponseBuilder |
python | huggingface__transformers | src/transformers/quantizers/quantizer_mxfp4.py | {
"start": 1102,
"end": 20447
} | class ____(HfQuantizer):
"""
FP4 quantization using fbgemm kernels
"""
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
self.triton_kernels_hub = None
def _lazy_import_kernels(self):
"""Lazy import and initialize kernels only when needed"""
if self.triton_kernels_hub is None:
try:
from kernels import get_kernel
self.triton_kernels_hub = get_kernel("kernels-community/triton_kernels")
except ImportError:
raise ImportError("kernels package is required for MXFP4 quantization")
return self.triton_kernels_hub
def validate_environment(self, *args, **kwargs):
if not is_torch_available():
raise ImportError(
"Using mxfp4 quantization requires torch"
"Please install the latest version of torch ( pip install --upgrade torch )"
)
if self.quantization_config.dequantize:
return
if not (torch.cuda.is_available() or torch.xpu.is_available()):
if self.pre_quantized:
logger.warning_once(
"Using MXFP4 quantized models requires a GPU, we will default to dequantizing the model to bf16"
)
self.quantization_config.dequantize = True
return
else:
raise RuntimeError("Quantizing a model using MXFP4 requires a GPU")
if not is_accelerate_available():
raise ImportError("Using mxfp4 requires Accelerate: `pip install accelerate`")
if torch.xpu.is_available():
gpu_is_supported = True
kernels_available = is_triton_available("3.5.0") and is_kernels_available()
else:
compute_capability = torch.cuda.get_device_capability()
gpu_is_supported = compute_capability >= (7, 5)
kernels_available = is_triton_available("3.4.0") and is_kernels_available()
if self.pre_quantized:
# On unsupported GPUs or without kernels, we will dequantize the model to bf16
if not gpu_is_supported:
logger.warning_once(
"MXFP4 quantization is only supported on GPUs with compute capability >= 7.5 (e.g T4, A100, L4, H100, or B200) or XPUs (e.g Intel® Data Center GPU Max Series) "
"We will default to dequantizing the model to bf16."
)
self.quantization_config.dequantize = True
return
if not kernels_available:
logger.warning_once(
"MXFP4 quantization requires Triton and kernels installed: CUDA requires Triton >= 3.4.0, XPU requires Triton >= 3.5.0, we will default to dequantizing the model to bf16"
)
self.quantization_config.dequantize = True
return
elif not gpu_is_supported:
# we can't quantize the model in this case so we raise an error
raise ValueError(
"MXFP4 quantization is only supported on GPUs with compute capability >= 7.5 (e.g T4, A100, L4, H100, or B200) or XPUs (e.g Intel® Data Center GPU Max Series) "
)
elif not kernels_available:
# we can't quantize the model in this case so we raise an error
raise ValueError(
"MXFP4 quantization requires Triton and kernels installed: CUDA requires Triton >= 3.4.0, XPU requires Triton >= 3.5.0"
)
if not self.pre_quantized:
self._lazy_import_kernels()
device_map = kwargs.get("device_map")
if device_map is None:
logger.warning_once(
"You have loaded an FP4 model on CPU and have a CUDA/XPU device available, make sure to set "
"your model on a GPU/XPU device in order to run your model. To remove this warning, pass device_map = 'cuda' or device_map = 'xpu'. "
)
elif device_map is not None:
if (
not self.pre_quantized
and isinstance(device_map, dict)
and ("cpu" in device_map.values() or "disk" in device_map.values())
):
raise ValueError(
"You are attempting to load an FP4 model with a device_map that contains a CPU or disk device."
"This is not supported when the model is quantized on the fly. "
"Please use a quantized checkpoint or remove the CPU or disk device from the device_map."
)
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
dtype = torch.bfloat16
logger.info(
"Overriding dtype=%s with `dtype=torch.bfloat16` due to "
"requirements of `fbgemm-gpu` to enable model loading in fp4. "
"Pass your own dtype to specify the dtype of the remaining non-linear layers or pass"
" dtype=torch.bfloat16 to remove this warning.",
dtype,
)
return dtype
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
from ..integrations import Mxfp4GptOssExperts
from ..models.gpt_oss.modeling_gpt_oss import GptOssExperts
if self.pre_quantized:
return False
# if we are dequantizing, the model doesn't have scales, and blocks only params like gate_up_proj and down_proj so we need to handle this case differently
if self.quantization_config.dequantize and ("blocks" in param_name or "scales" in param_name):
module, tensor_name = get_module_from_name(model, param_name[: -len("_blocks")])
else:
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, Mxfp4GptOssExperts) or (
isinstance(module, GptOssExperts) and self.quantization_config.dequantize
):
if tensor_name in ["down_proj_bias", "gate_up_proj_bias"]:
return False
return True
return False
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
**kwargs,
):
from ..integrations import (
Mxfp4GptOssExperts,
dequantize,
load_and_swizzle_mxfp4,
quantize_to_mxfp4,
swizzle_mxfp4,
)
from ..models.gpt_oss.modeling_gpt_oss import GptOssExperts
if not self.pre_quantized:
triton_kernels_hub = self._lazy_import_kernels()
module, _ = get_module_from_name(model, param_name)
with torch.device(target_device):
if isinstance(module, Mxfp4GptOssExperts):
triton_weight_tensor, weight_scale = quantize_to_mxfp4(param_value, triton_kernels_hub)
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
triton_weight_tensor, weight_scale = swizzle_mxfp4(
triton_weight_tensor, weight_scale, triton_kernels_hub
)
proj = "gate_up_proj" if "gate_up_proj" in param_name else "down_proj"
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
delattr(module, f"{proj}_blocks")
delattr(module, f"{proj}_scales")
# The params going here are either gate_up_proj_blocks, or down_proj_blocks, or gate_up_proj_scales, or down_proj_scales
else:
# This is when loading a quantized model (blocks and scales exist)
empty_param = kwargs.get("empty_param")
casting_dtype = kwargs.get("casting_dtype")
to_contiguous = kwargs.get("to_contiguous")
rank = kwargs.get("rank")
device_mesh = kwargs.get("device_mesh")
if ("blocks" in param_name or "scales" in param_name) and self.quantization_config.dequantize:
# blocks and scales have the same length that's why this works for both
module, _ = get_module_from_name(model, param_name[: -len("_blocks")])
else:
module, _ = get_module_from_name(model, param_name)
shard_kwargs = {
"empty_param": empty_param,
"casting_dtype": casting_dtype,
"to_contiguous": to_contiguous,
"rank": rank,
"device_mesh": device_mesh,
"model": model,
}
if isinstance(module, Mxfp4GptOssExperts) or (
isinstance(module, GptOssExperts) and self.quantization_config.dequantize
):
if self.quantization_config.dequantize:
# dq_param_name is the name of the parameter without the blocks or scales suffix, it's used in this case since we don't switch linears
# so we only have the original param name
dq_param_name = param_name[: -len("_blocks")]
dequantize(module, param_name, param_value, target_device, dq_param_name, **shard_kwargs)
else:
load_and_swizzle_mxfp4(
module,
param_name,
param_value,
target_device,
self._lazy_import_kernels(),
**shard_kwargs,
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
# we are not really dequantizing, we are just removing everything related to quantization here
if self.quantization_config.dequantize:
self.remove_quantization_config(model)
# clean cache due to triton ops
if torch.cuda.is_available():
torch.cuda.empty_cache()
elif torch.xpu.is_available():
torch.xpu.empty_cache()
def update_expected_keys(self, model: "PreTrainedModel", expected_keys: list[str], checkpoint_keys: list[str]):
# Replace expected_keys for experts' gate_up_proj and down_proj with their _blocks and _scales variants
new_expected_keys = []
for key in expected_keys:
if key.endswith(".mlp.experts.gate_up_proj"):
base = key[: -len("gate_up_proj")]
new_expected_keys.append(base + "gate_up_proj_blocks")
new_expected_keys.append(base + "gate_up_proj_scales")
elif key.endswith(".mlp.experts.down_proj"):
base = key[: -len("down_proj")]
new_expected_keys.append(base + "down_proj_blocks")
new_expected_keys.append(base + "down_proj_scales")
elif not self.pre_quantized:
# in this case, we are quantizing the model so we need to update the keys as we changed the layers
if key.endswith(".mlp.experts.down_proj_blocks"):
base = key[: -len("down_proj_blocks")]
new_expected_keys.append(base + "down_proj")
elif key.endswith(".mlp.experts.gate_up_proj_blocks"):
base = key[: -len("gate_up_proj_blocks")]
new_expected_keys.append(base + "gate_up_proj")
elif key.endswith("scales"):
# we remove it the scales as the checkpoint don't contain them
continue
else:
new_expected_keys.append(key)
else:
new_expected_keys.append(key)
return new_expected_keys
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: list[str] | None = None,
**kwargs,
):
from ..integrations import replace_with_mxfp4_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
use_kernels = kwargs.get("use_kernels", False)
# if we are using kernels, we can't use the quantized model, since the forward pass is different and needs special handling
if use_kernels:
logger.warning_once(
"You are using full precision kernels, we will dequantize the model to bf16. "
"To use the quantized model with quantization kernels, please set use_kernels=False"
)
self.quantization_config.dequantize = True
config = model.config
model = replace_with_mxfp4_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
config=config,
)
model.config.quantization_config = self.quantization_config
def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]:
from ..integrations import Mxfp4GptOssExperts
not_missing_keys = []
for name, module in model.named_modules():
if isinstance(module, Mxfp4GptOssExperts):
for missing in missing_keys:
if (
(name in missing or name in f"{prefix}.{missing}")
and not missing.endswith(".weight")
and not missing.endswith(".bias")
):
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]
def update_tp_plan(self, config):
if "GptOssConfig" in config.__class__.__name__:
if getattr(config, "base_model_tp_plan", None) is not None:
config.base_model_tp_plan.update(
{
"layers.*.mlp.experts.gate_up_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.gate_up_proj_scales": "grouped_gemm",
"layers.*.mlp.experts.down_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.down_proj_scales": "grouped_gemm",
}
)
return config
def update_ep_plan(self, config):
if "GptOssConfig" in config.__class__.__name__:
if getattr(config, "base_model_ep_plan", None) is not None:
config.base_model_ep_plan.update(
{
"layers.*.mlp.experts.gate_up_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.gate_up_proj_scales": "grouped_gemm",
"layers.*.mlp.experts.down_proj_blocks": "grouped_gemm",
"layers.*.mlp.experts.down_proj_scales": "grouped_gemm",
}
)
return config
def get_param_name(self, param_name: str) -> str:
if self.quantization_config.dequantize:
if "_blocks" in param_name:
return param_name.replace("_blocks", "")
elif "_scales" in param_name:
return param_name.replace("_scales", "")
elif not self.pre_quantized:
if param_name.endswith("gate_up_proj"):
return param_name.replace("gate_up_proj", "gate_up_proj_blocks")
if param_name.endswith("down_proj"):
return param_name.replace("down_proj", "down_proj_blocks")
return param_name
def get_state_dict_and_metadata(self, model, safe_serialization: bool = False):
from ..integrations import Mxfp4GptOssExperts
state_dict = model.state_dict()
# Get num_local_experts from model config
num_local_experts = getattr(model.config, "num_local_experts", 32)
hidden_size = getattr(model.config, "hidden_size", 2880)
for name, module in model.named_modules():
if (
isinstance(module, Mxfp4GptOssExperts)
and hasattr(module, "gate_up_proj")
and hasattr(module, "down_proj")
):
state_dict[f"{name}.gate_up_proj_blocks"] = (
module.gate_up_proj.storage.layout.unswizzle_data(module.gate_up_proj.storage.data)
.transpose(-1, -2)
.reshape(num_local_experts, -1, 90, 16)
)
state_dict[f"{name}.gate_up_proj_scales"] = (
module.gate_up_proj_precision_config.weight_scale.storage.layout.unswizzle_data(
module.gate_up_proj_precision_config.weight_scale.storage.data
).transpose(-1, -2)
)
state_dict[f"{name}.down_proj_blocks"] = (
module.down_proj.storage.layout.unswizzle_data(module.down_proj.storage.data)
.transpose(-1, -2)
.reshape(num_local_experts, hidden_size, 90, -1)
)
state_dict[f"{name}.down_proj_scales"] = (
module.down_proj_precision_config.weight_scale.storage.layout.unswizzle_data(
module.down_proj_precision_config.weight_scale.storage.data
).transpose(-1, -2)
)
metadata = {}
return state_dict, metadata
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
logger.warning_once(
"MXFP4 quantization don't support training, please consider dequantizing the model first by passing quantization_config=Mxfp4Config(dequantize=True) to .from_pretrained()"
)
return False
def get_quantize_ops(self):
from ..integrations.mxfp4 import Mxfp4Quantize
return Mxfp4Quantize(self)
def get_weight_conversions(self):
from ..integrations.mxfp4 import Mxfp4Dequantize, Mxfp4Deserialize
if self.pre_quantized:
if self.quantization_config.dequantize:
return [
WeightConverter(
source_patterns=["_blocks", "_scales"],
target_patterns="",
operations=[Mxfp4Dequantize(self)],
)
]
else:
return [
WeightConverter(
source_patterns=["_blocks", "_scales"],
target_patterns="",
operations=[Mxfp4Deserialize(self)],
)
]
return []
| Mxfp4HfQuantizer |
python | doocs__leetcode | solution/0000-0099/0035.Search Insert Position/Solution.py | {
"start": 0,
"end": 284
} | class ____:
def searchInsert(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums)
while l < r:
mid = (l + r) >> 1
if nums[mid] >= target:
r = mid
else:
l = mid + 1
return l
| Solution |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/git_top_level/package.py | {
"start": 217,
"end": 413
} | class ____(Package):
"""Mock package that uses git for fetching."""
homepage = "http://www.git-fetch-example.com"
git = "https://example.com/some/git/repo"
version("1.0")
| GitTopLevel |
python | sqlalchemy__sqlalchemy | test/orm/test_subquery_relations.py | {
"start": 103674,
"end": 105392
} | class ____(fixtures.DeclarativeMappedTest):
"""test for [ticket:2887]"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(ComparableEntity, Base):
__tablename__ = "parent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(20))
children = relationship(
"Child", back_populates="parent", lazy="raise"
)
class Child(ComparableEntity, Base):
__tablename__ = "child"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(20))
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship(
"Parent", back_populates="children", lazy="joined"
)
@classmethod
def insert_data(cls, connection):
Parent = cls.classes.Parent
Child = cls.classes.Child
s = Session(connection)
s.add(Parent(name="parent", children=[Child(name="c1")]))
s.commit()
def test_subqueryload_on_joined_noload(self):
Parent = self.classes.Parent
Child = self.classes.Child
s = fixture_session()
# here we have
# Parent->subqueryload->Child->joinedload->parent->noload->children.
# the actual subqueryload has to emit *after* we've started populating
# Parent->subqueryload->child.
parent = (
s.query(Parent).options([subqueryload(Parent.children)]).first()
)
eq_(parent.children, [Child(name="c1")])
| JoinedNoLoadConflictTest |
python | scikit-image__scikit-image | tests/skimage/morphology/test_skeletonize.py | {
"start": 365,
"end": 8308
} | class ____:
@pytest.mark.parametrize("method", ["zhang", "lee"])
def test_no_foreground(self, method):
image = np.zeros((5, 5))
result = skeletonize(image, method=method)
assert_array_equal(result, np.zeros((5, 5)))
@pytest.mark.parametrize(
"ndim,method", [(1, "zhang"), (3, "zhang"), (1, "lee"), (4, "lee")]
)
def test_wrong_ndim(self, ndim, method):
image = np.zeros((5,) * ndim, dtype=bool)
with pytest.raises(ValueError):
skeletonize(image, method=method)
def test_wrong_method(self):
image = np.ones((5, 5), dtype=bool)
with pytest.raises(ValueError):
skeletonize(image, method="foo")
@pytest.mark.parametrize("method", ["zhang", "lee"])
def test_skeletonize_all_foreground(self, method):
image = np.ones((3, 4), dtype=bool)
result = skeletonize(image, method=method)
if method == "zhang":
expected = np.array([[0, 0, 1, 0], [1, 1, 0, 0], [0, 0, 0, 0]], dtype=bool)
else: # "lee"
expected = np.array([[0, 0, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0]], dtype=bool)
assert_array_equal(result, expected)
@pytest.mark.parametrize("method", ["zhang", "lee"])
def test_single_point(self, method):
image = np.zeros((5, 5), dtype=bool)
image[3, 3] = 1
result = skeletonize(image, method=method)
assert_array_equal(result, image)
@pytest.mark.parametrize("method", ["zhang", "lee"])
def test_vec_1d(self, method):
# Corner case of a 2D image, which is a 1D vector
image = np.ones((5, 1), dtype=bool)
result = skeletonize(image, method=method)
assert_array_equal(result, image)
@pytest.mark.parametrize("method", ["zhang", "lee"])
def test_already_thinned(self, method):
image = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 1, 1, 0],
[1, 0, 0, 0, 0],
],
dtype=bool,
)
result = skeletonize(image, method=method)
assert_array_equal(result, image)
def test_output(self):
image = io.imread(fetch("data/bw_text.png"), as_gray=True)
# make black the foreground
image = image == 0
result = skeletonize(image)
expected = np.load(fetch("data/bw_text_skeleton.npy"))
assert_array_equal(result, expected)
@pytest.mark.parametrize("method", ["zhang", "lee"])
@pytest.mark.parametrize("dtype", [bool, float, int])
def test_num_neighbors(self, method, dtype):
# an empty image
image = np.zeros((300, 300), dtype=dtype)
# foreground object 1
image[10:-10, 10:100] = 1
image[-100:-10, 10:-10] = 2
image[10:-10, -100:-10] = 3
# foreground object 2
rs, cs = draw.line(250, 150, 10, 280)
for i in range(10):
image[rs + i, cs] = 4
rs, cs = draw.line(10, 150, 250, 280)
for i in range(20):
image[rs + i, cs] = 5
# foreground object 3
ir, ic = np.indices(image.shape)
circle1 = (ic - 135) ** 2 + (ir - 150) ** 2 < 30**2
circle2 = (ic - 135) ** 2 + (ir - 150) ** 2 < 20**2
image[circle1] = 1
image[circle2] = 0
result = skeletonize(image, method=method).astype(np.uint8)
# there should never be a 2x2 block of foreground pixels in a skeleton
mask = np.array([[1, 1], [1, 1]], np.uint8)
blocks = ndi.correlate(result, mask, mode="constant")
assert not np.any(blocks == 4)
def test_lut_fix(self):
image = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
result = skeletonize(image)
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
assert np.all(result == expected)
@pytest.mark.parametrize("ndim,method", [(2, "zhang"), (2, "lee"), (3, "lee")])
@pytest.mark.parametrize("dtype", [bool, np.uint8])
def test_input_not_modified(self, method, ndim, dtype):
# Skeletonize must not modify the input image
image = np.ones((3,) * ndim, dtype=dtype)
image = np.pad(image, 1)
original = image.copy()
_ = skeletonize(image, method=method)
np.testing.assert_array_equal(image, original)
@pytest.mark.parametrize("method", ["zhang", "lee"])
def test_input_float_conv(self, method):
# Check that the floats are correctly handled. Also check non-contiguous input
image = np.random.random((16, 16))[::2, ::2]
image[image < 0.5] = 0.0
original = image.copy()
result = skeletonize(image, method=method)
assert result.dtype == bool
assert_array_equal(image, original)
def test_two_hole_image_vs_fiji(self):
# Test a simple 2D image against FIJI
image = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
expected = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
result = skeletonize(image, method="lee")
assert_array_equal(result, expected)
def test_3d_vs_fiji(self):
# Generate an image with blobs and compare its skeleton
# to the one generated by FIJI (Plugins>Skeleton->Skeletonize)
image = binary_blobs(32, 0.05, n_dim=3, rng=1234)
image = image[:-2, ...]
result = skeletonize(image)
expected = io.imread(fetch("data/_blobs_3d_fiji_skeleton.tif")).astype(bool)
assert_array_equal(result, expected)
| TestSkeletonize |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_get_value.py | {
"start": 71,
"end": 679
} | class ____:
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=range(4))
with pytest.raises(KeyError, match=r"^0$"):
df._get_value(0, 1)
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
assert result == expected
| TestGetValue |
python | django__django | tests/test_utils/tests.py | {
"start": 39541,
"end": 43011
} | class ____(SimpleTestCase):
def test_needle_msg(self):
msg = (
"False is not true : Couldn't find '<b>Hello</b>' in the following "
"response\n'<p>Test</p>'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML("<b>Hello</b>", "<p>Test</p>")
def test_msg_prefix(self):
msg = (
"False is not true : Prefix: Couldn't find '<b>Hello</b>' in the following "
'response\n\'<input type="text" name="Hello" />\''
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML(
"<b>Hello</b>",
'<input type="text" name="Hello" />',
msg_prefix="Prefix",
)
def test_count_msg_prefix(self):
msg = (
"2 != 1 : Prefix: Found 2 instances of '<b>Hello</b>' (expected 1) in the "
"following response\n'<b>Hello</b><b>Hello</b>'"
""
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML(
"<b>Hello</b>",
"<b>Hello</b><b>Hello</b>",
count=1,
msg_prefix="Prefix",
)
def test_base(self):
haystack = "<p><b>Hello</b> <span>there</span>! Hi <span>there</span>!</p>"
self.assertInHTML("<b>Hello</b>", haystack=haystack)
msg = f"Couldn't find '<p>Howdy</p>' in the following response\n{haystack!r}"
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML("<p>Howdy</p>", haystack)
self.assertInHTML("<span>there</span>", haystack=haystack, count=2)
msg = (
"Found 1 instances of '<b>Hello</b>' (expected 2) in the following response"
f"\n{haystack!r}"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML("<b>Hello</b>", haystack=haystack, count=2)
def test_long_haystack(self):
haystack = (
"<p>This is a very very very very very very very very long message which "
"exceeds the max limit of truncation.</p>"
)
msg = f"Couldn't find '<b>Hello</b>' in the following response\n{haystack!r}"
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML("<b>Hello</b>", haystack)
msg = (
"Found 0 instances of '<b>This</b>' (expected 3) in the following response"
f"\n{haystack!r}"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertInHTML("<b>This</b>", haystack, 3)
def test_assert_not_in_html(self):
haystack = "<p><b>Hello</b> <span>there</span>! Hi <span>there</span>!</p>"
self.assertNotInHTML("<b>Hi</b>", haystack=haystack)
msg = (
"'<b>Hello</b>' unexpectedly found in the following response"
f"\n{haystack!r}"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertNotInHTML("<b>Hello</b>", haystack=haystack)
def test_assert_not_in_html_msg_prefix(self):
haystack = "<p>Hello</p>"
msg = (
"1 != 0 : Prefix: '<p>Hello</p>' unexpectedly found in the following "
f"response\n{haystack!r}"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertNotInHTML("<p>Hello</p>", haystack=haystack, msg_prefix="Prefix")
| InHTMLTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis14.py | {
"start": 315,
"end": 2149
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis14.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [43814272, 54517760]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
}
)
chart.set_y_axis({"min": 0, "max": 30})
chart.set_x_axis({"min": 39083, "max": 39087})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/integrations/repository/issue_alert.py | {
"start": 1927,
"end": 3044
} | class ____(BaseNewNotificationMessage):
rule_fire_history_id: int | None = None
rule_action_uuid: str | None = None
open_period_start: datetime | None = None
def get_validation_error(self) -> Exception | None:
error = super().get_validation_error()
if error is not None:
return error
if self.message_identifier is not None:
# If a message_identifier exists, that means a successful notification happened for a rule action and fire
# This means that neither of them can be empty
if self.rule_fire_history_id is None or self.rule_action_uuid is None:
return RuleFireHistoryAndRuleActionUuidActionValidationError()
# We can create a NotificationMessage if it has both, or neither, of rule fire history and action.
# The following is an XNOR check for rule fire history and action
if (self.rule_fire_history_id is not None) != (self.rule_action_uuid is not None):
return RuleFireHistoryAndRuleActionUuidActionValidationError()
return None
| NewIssueAlertNotificationMessage |
python | python-pillow__Pillow | Tests/test_color_lut.py | {
"start": 16319,
"end": 18289
} | class ____:
def test_wrong_channels_count(self) -> None:
with pytest.raises(ValueError, match="3 or 4 output channels"):
ImageFilter.Color3DLUT.generate(
5, channels=2, callback=lambda r, g, b: (r, g, b)
)
with pytest.raises(ValueError, match="should have either channels"):
ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b, r))
with pytest.raises(ValueError, match="should have either channels"):
ImageFilter.Color3DLUT.generate(
5, channels=4, callback=lambda r, g, b: (r, g, b)
)
def test_3_channels(self) -> None:
lut = ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b))
assert tuple(lut.size) == (5, 5, 5)
assert lut.name == "Color 3D LUT"
# fmt: off
assert lut.table[:24] == [
0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.5, 0.0, 0.0, 0.75, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.25, 0.25, 0.0, 0.5, 0.25, 0.0]
# fmt: on
def test_4_channels(self) -> None:
lut = ImageFilter.Color3DLUT.generate(
5, channels=4, callback=lambda r, g, b: (b, r, g, (r + g + b) / 2)
)
assert tuple(lut.size) == (5, 5, 5)
assert lut.name == "Color 3D LUT"
# fmt: off
assert lut.table[:24] == [
0.0, 0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.125, 0.0, 0.5, 0.0, 0.25,
0.0, 0.75, 0.0, 0.375, 0.0, 1.0, 0.0, 0.5, 0.0, 0.0, 0.25, 0.125
]
# fmt: on
def test_apply(self) -> None:
lut = ImageFilter.Color3DLUT.generate(5, lambda r, g, b: (r, g, b))
g = Image.linear_gradient("L")
im = Image.merge(
"RGB",
[
g,
g.transpose(Image.Transpose.ROTATE_90),
g.transpose(Image.Transpose.ROTATE_180),
],
)
assert im == im.filter(lut)
| TestGenerateColorLut3D |
python | python__mypy | mypy/test/test_find_sources.py | {
"start": 308,
"end": 1921
} | class ____(FileSystemCache):
def __init__(self, files: set[str]) -> None:
self.files = {os.path.abspath(f) for f in files}
def isfile(self, path: str) -> bool:
return path in self.files
def isdir(self, path: str) -> bool:
if not path.endswith(os.sep):
path += os.sep
return any(f.startswith(path) for f in self.files)
def listdir(self, path: str) -> list[str]:
if not path.endswith(os.sep):
path += os.sep
return list({f[len(path) :].split(os.sep)[0] for f in self.files if f.startswith(path)})
def init_under_package_root(self, path: str) -> bool:
return False
def normalise_path(path: str) -> str:
path = os.path.splitdrive(path)[1]
path = path.replace(os.sep, "/")
return path
def normalise_build_source_list(sources: list[BuildSource]) -> list[tuple[str, str | None]]:
return sorted(
(s.module, (normalise_path(s.base_dir) if s.base_dir is not None else None))
for s in sources
)
def crawl(finder: SourceFinder, f: str) -> tuple[str, str]:
module, base_dir = finder.crawl_up(f)
return module, normalise_path(base_dir)
def find_sources_in_dir(finder: SourceFinder, f: str) -> list[tuple[str, str | None]]:
return normalise_build_source_list(finder.find_sources_in_dir(os.path.abspath(f)))
def find_sources(
paths: list[str], options: Options, fscache: FileSystemCache
) -> list[tuple[str, str | None]]:
paths = [os.path.abspath(p) for p in paths]
return normalise_build_source_list(create_source_list(paths, options, fscache))
| FakeFSCache |
python | plotly__plotly.py | plotly/graph_objs/layout/slider/_transition.py | {
"start": 235,
"end": 3438
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.slider"
_path_str = "layout.slider.transition"
_valid_props = {"duration", "easing"}
@property
def duration(self):
"""
Sets the duration of the slider transition
The 'duration' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["duration"]
@duration.setter
def duration(self, val):
self["duration"] = val
@property
def easing(self):
"""
Sets the easing function of the slider transition
The 'easing' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'quad', 'cubic', 'sin', 'exp', 'circle',
'elastic', 'back', 'bounce', 'linear-in', 'quad-in',
'cubic-in', 'sin-in', 'exp-in', 'circle-in', 'elastic-in',
'back-in', 'bounce-in', 'linear-out', 'quad-out',
'cubic-out', 'sin-out', 'exp-out', 'circle-out',
'elastic-out', 'back-out', 'bounce-out', 'linear-in-out',
'quad-in-out', 'cubic-in-out', 'sin-in-out', 'exp-in-out',
'circle-in-out', 'elastic-in-out', 'back-in-out',
'bounce-in-out']
Returns
-------
Any
"""
return self["easing"]
@easing.setter
def easing(self, val):
self["easing"] = val
@property
def _prop_descriptions(self):
return """\
duration
Sets the duration of the slider transition
easing
Sets the easing function of the slider transition
"""
def __init__(self, arg=None, duration=None, easing=None, **kwargs):
"""
Construct a new Transition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Transition`
duration
Sets the duration of the slider transition
easing
Sets the easing function of the slider transition
Returns
-------
Transition
"""
super().__init__("transition")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.slider.Transition
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Transition`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("duration", arg, duration)
self._set_property("easing", arg, easing)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Transition |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 14877,
"end": 15393
} | class ____(object):
"""*
jina gRPC service to expose information about running jina version and environment.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self._status = channel.unary_unary(
'/jina.JinaInfoRPC/_status',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=jina__pb2.JinaInfoProto.FromString,
)
| JinaInfoRPCStub |
python | doocs__leetcode | solution/3100-3199/3163.String Compression III/Solution.py | {
"start": 0,
"end": 302
} | class ____:
def compressedString(self, word: str) -> str:
g = groupby(word)
ans = []
for c, v in g:
k = len(list(v))
while k:
x = min(9, k)
ans.append(str(x) + c)
k -= x
return "".join(ans)
| Solution |
python | altair-viz__altair | altair/utils/data.py | {
"start": 3387,
"end": 7037
} | class ____(Exception):
"""Raised when a data model has too many rows."""
def __init__(self, message: str, /) -> None:
self.message = message
super().__init__(self.message)
@classmethod
def from_limit_rows(cls, user_rows: int, max_rows: int, /) -> MaxRowsError:
msg = (
f"The number of rows in your dataset ({user_rows}) is greater "
f"than the maximum allowed ({max_rows}).\n\n"
"Try enabling the VegaFusion data transformer which "
"raises this limit by pre-evaluating data\n"
"transformations in Python.\n"
" >> import altair as alt\n"
' >> alt.data_transformers.enable("vegafusion")\n\n'
"Or, see https://altair-viz.github.io/user_guide/large_datasets.html "
"for additional information\n"
"on how to plot large datasets."
)
return cls(msg)
@overload
def limit_rows(data: None = ..., max_rows: int | None = ...) -> partial: ...
@overload
def limit_rows(data: DataType, max_rows: int | None = ...) -> DataType: ...
def limit_rows(
data: DataType | None = None, max_rows: int | None = 5000
) -> partial | DataType:
"""
Raise MaxRowsError if the data model has more than max_rows.
If max_rows is None, then do not perform any check.
"""
if data is None:
return partial(limit_rows, max_rows=max_rows)
check_data_type(data)
if isinstance(data, SupportsGeoInterface):
if data.__geo_interface__["type"] == "FeatureCollection":
values = data.__geo_interface__["features"]
else:
values = data.__geo_interface__
elif isinstance(data, dict):
if "values" in data:
values = data["values"]
else:
return data
else:
data = to_eager_narwhals_dataframe(data)
values = data
n = len(values)
if max_rows is not None and n > max_rows:
raise MaxRowsError.from_limit_rows(n, max_rows)
return data
@overload
def sample(
data: None = ..., n: int | None = ..., frac: float | None = ...
) -> partial: ...
@overload
def sample(
data: TIntoDataFrame, n: int | None = ..., frac: float | None = ...
) -> TIntoDataFrame: ...
@overload
def sample(
data: DataType, n: int | None = ..., frac: float | None = ...
) -> SampleReturnType: ...
def sample(
data: DataType | None = None,
n: int | None = None,
frac: float | None = None,
) -> partial | SampleReturnType:
"""Reduce the size of the data model by sampling without replacement."""
if data is None:
return partial(sample, n=n, frac=frac)
check_data_type(data)
if is_pandas_dataframe(data):
return data.sample(n=n, frac=frac)
elif isinstance(data, dict):
if "values" in data:
values = data["values"]
if not n:
if frac is None:
msg = "frac cannot be None if n is None and data is a dictionary"
raise ValueError(msg)
n = int(frac * len(values))
values = random.sample(values, n)
return {"values": values}
else:
# Maybe this should raise an error or return something useful?
return None
data = nw.from_native(data, eager_only=True)
if not n:
if frac is None:
msg = "frac cannot be None if n is None with this data input type"
raise ValueError(msg)
n = int(frac * len(data))
indices = random.sample(range(len(data)), n)
return data[indices].to_native()
_FormatType = Literal["csv", "json"]
| MaxRowsError |
python | huggingface__transformers | src/transformers/models/umt5/modeling_umt5.py | {
"start": 16061,
"end": 17069
} | class ____(nn.Module):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.SelfAttention = UMT5Attention(config, has_relative_attention_bias=True, layer_idx=layer_idx)
self.layer_norm = UMT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
past_key_values=None,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
| UMT5LayerSelfAttention |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | {
"start": 18336,
"end": 19252
} | class ____(Parser, metaclass=abc.ABCMeta):
"""Base class for key/value composite argument parsers."""
@abc.abstractmethod
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
parsers = self.get_parsers(state)
keys = list(parsers)
with state.delimit(PAIR_DELIMITER, required=False) as pair: # type: ParserBoundary
while pair.ready:
with state.delimit(ASSIGNMENT_DELIMITER):
key = ChoicesParser(keys).parse(state)
value = parsers[key].parse(state)
setattr(namespace, key, value)
keys.remove(key)
return namespace
| KeyValueParser |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/node_definition.py | {
"start": 1215,
"end": 8729
} | class ____(NamedConfigurableDefinition):
_name: str
_description: Optional[str]
_tags: Mapping[str, str]
_input_defs: Sequence["InputDefinition"]
_input_dict: Mapping[str, "InputDefinition"]
_output_defs: Sequence["OutputDefinition"]
_output_dict: Mapping[str, "OutputDefinition"]
_positional_inputs: Sequence[str]
def __init__(
self,
name: str,
input_defs: Sequence["InputDefinition"],
output_defs: Sequence["OutputDefinition"],
description: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
positional_inputs: Optional[Sequence[str]] = None,
):
self._name = check_valid_name(name)
self._description = check.opt_str_param(description, "description")
self._tags = normalize_tags(tags)
self._input_defs = input_defs
self._input_dict = {input_def.name: input_def for input_def in input_defs}
check.invariant(len(self._input_defs) == len(self._input_dict), "Duplicate input def names")
self._output_defs = output_defs
self._output_dict = {output_def.name: output_def for output_def in output_defs}
check.invariant(
len(self._output_defs) == len(self._output_dict), "Duplicate output def names"
)
check.opt_sequence_param(positional_inputs, "positional_inputs", str)
self._positional_inputs = (
positional_inputs
if positional_inputs is not None
else [inp.name for inp in self._input_defs]
)
@property
@abstractmethod
def node_type_str(self) -> str: ...
@property
@abstractmethod
def is_graph_job_op_node(self) -> bool: ...
@abstractmethod
def all_dagster_types(self) -> Iterable["DagsterType"]: ...
@property
def name(self) -> str:
return self._name
def describe_node(self) -> str:
return f"{self.node_type_str} '{self.name}'"
@property
def description(self) -> Optional[str]:
return self._description
@property
def tags(self) -> Mapping[str, str]:
return self._tags
@property
def positional_inputs(self) -> Sequence[str]:
return self._positional_inputs
@property
def input_defs(self) -> Sequence["InputDefinition"]:
return self._input_defs
@property
def input_dict(self) -> Mapping[str, "InputDefinition"]:
return self._input_dict
def resolve_input_name_at_position(self, idx: int) -> Optional[str]:
if idx >= len(self._positional_inputs):
if not (
len(self._input_defs) - len(self._positional_inputs) == 1
and idx == len(self._input_defs) - 1
):
return None
# handle special case where there is only 1 non-positional arg that we could resolve to
names = [
inp.name for inp in self._input_defs if inp.name not in self._positional_inputs
]
check.invariant(len(names) == 1, "if check above should prevent this")
return names[0]
return self._positional_inputs[idx]
@property
def output_defs(self) -> Sequence["OutputDefinition"]:
return self._output_defs
@property
def output_dict(self) -> Mapping[str, "OutputDefinition"]:
return self._output_dict
def has_input(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._input_dict
def input_def_named(self, name: str) -> "InputDefinition":
check.str_param(name, "name")
return self._input_dict[name]
def has_output(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._output_dict
def output_def_named(self, name: str) -> "OutputDefinition":
check.str_param(name, "name")
if name not in self._output_dict:
raise DagsterInvariantViolationError(f"{self._name} has no output named {name}.")
return self._output_dict[name]
@abstractmethod
def iterate_node_defs(self) -> Iterable["NodeDefinition"]: ...
@abstractmethod
def iterate_op_defs(self) -> Iterable["OpDefinition"]: ...
@abstractmethod
def resolve_output_to_origin(
self,
output_name: str,
handle: Optional["NodeHandle"],
) -> tuple["OutputDefinition", Optional["NodeHandle"]]: ...
@abstractmethod
def resolve_output_to_origin_op_def(self, output_name: str) -> "OpDefinition": ...
@abstractmethod
def resolve_input_to_destinations(
self, input_handle: "NodeInputHandle"
) -> Sequence["NodeInputHandle"]:
"""Recursively follow input mappings to find all op inputs that correspond to the given input
to this graph.
"""
@abstractmethod
def input_has_default(self, input_name: str) -> bool: ...
@abstractmethod
def default_value_for_input(self, input_name: str) -> object: ...
@abstractmethod
def input_supports_dynamic_output_dep(self, input_name: str) -> bool: ...
def all_input_output_types(self) -> Iterator["DagsterType"]:
for input_def in self._input_defs:
yield input_def.dagster_type
yield from input_def.dagster_type.inner_types
for output_def in self._output_defs:
yield output_def.dagster_type
yield from output_def.dagster_type.inner_types
def get_pending_invocation(
self,
given_alias: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
retry_policy: Optional[RetryPolicy] = None,
) -> "PendingNodeInvocation":
from dagster._core.definitions.composition import PendingNodeInvocation
return PendingNodeInvocation(
node_def=self,
given_alias=given_alias,
tags=normalize_tags(tags) if tags else None,
hook_defs=hook_defs,
retry_policy=retry_policy,
)
def __call__(self, *args: object, **kwargs: object) -> object:
return self.get_pending_invocation()(*args, **kwargs)
def alias(self, name: str) -> "PendingNodeInvocation":
return self.get_pending_invocation(given_alias=name)
def tag(self, tags: Optional[Mapping[str, str]]) -> "PendingNodeInvocation":
return self.get_pending_invocation(tags=tags)
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PendingNodeInvocation":
hook_defs = frozenset(check.set_param(hook_defs, "hook_defs", of_type=HookDefinition))
return self.get_pending_invocation(hook_defs=hook_defs)
def with_retry_policy(self, retry_policy: RetryPolicy) -> "PendingNodeInvocation":
return self.get_pending_invocation(retry_policy=retry_policy)
@abstractmethod
def get_inputs_must_be_resolved_top_level(
self, asset_layer: "AssetLayer", handle: Optional["NodeHandle"] = None
) -> Sequence["InputDefinition"]: ...
@abstractmethod
def resolve_output_to_destinations(
self, output_name: str, handle: Optional["NodeHandle"]
) -> Sequence["NodeInputHandle"]: ...
@abstractmethod
def get_op_handles(self, parent: "NodeHandle") -> AbstractSet["NodeHandle"]: ...
@abstractmethod
def get_op_output_handles(
self, parent: Optional["NodeHandle"]
) -> AbstractSet["NodeOutputHandle"]: ...
@property
@abstractmethod
def pools(self) -> Set[str]: ...
| NodeDefinition |
python | kamyu104__LeetCode-Solutions | Python/number-of-integers-with-popcount-depth-equal-to-k-i.py | {
"start": 482,
"end": 1158
} | class ____(object):
def popcountDepth(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
def count(c):
result = cnt = 0
for i in reversed(xrange(n.bit_length())):
if not (n&(1<<i)):
continue
if 0 <= c-cnt <= i:
result += NCR[i][c-cnt]
cnt += 1
if cnt == c:
result += 1
return result
if k == 0:
return 1
if k == 1:
return n.bit_length()-1
return sum(count(c) for c in xrange(2, n.bit_length()+1) if D[c] == k-1)
| Solution |
python | numba__llvmlite | llvmlite/binding/newpassmanagers.py | {
"start": 3302,
"end": 3541
} | class ____(IntFlag):
PER_BB = 0b0001 # noqa: E221
DIAMOND = 0b0010 # noqa: E221
FANOUT = 0b0100 # noqa: E221
FANOUT_RAISE = 0b1000
ALL = PER_BB | DIAMOND | FANOUT | FANOUT_RAISE
| RefPruneSubpasses |
python | fastai__fastai | fastai/learner.py | {
"start": 23626,
"end": 24099
} | class ____(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean()), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
# %% ../nbs/13a_learner.ipynb 129
| AvgSmoothLoss |
python | lazyprogrammer__machine_learning_examples | rl3/es_flappy.py | {
"start": 488,
"end": 1805
} | class ____:
def __init__(self):
self.game = FlappyBird(pipe_gap=125)
self.env = PLE(self.game, fps=30, display_screen=False)
self.env.init()
self.env.getGameState = self.game.getGameState # maybe not necessary
# by convention we want to use (0,1)
# but the game uses (None, 119)
self.action_map = self.env.getActionSet() #[None, 119]
def step(self, action):
action = self.action_map[action]
reward = self.env.act(action)
done = self.env.game_over()
obs = self.get_observation()
# don't bother returning an info dictionary like gym
return obs, reward, done
def reset(self):
self.env.reset_game()
return self.get_observation()
def get_observation(self):
# game state returns a dictionary which describes
# the meaning of each value
# we only want the values
obs = self.env.getGameState()
return np.array(list(obs.values()))
def set_display(self, boolean_value):
self.env.display_screen = boolean_value
# make a global environment to be used throughout the script
env = Env()
### neural network
# hyperparameters
D = len(env.reset())*HISTORY_LENGTH
M = 50
K = 2
def softmax(a):
c = np.max(a, axis=1, keepdims=True)
e = np.exp(a - c)
return e / e.sum(axis=-1, keepdims=True)
def relu(x):
return x * (x > 0)
| Env |
python | dagster-io__dagster | docs/sphinx/_ext/dagster-sphinx/dagster_sphinx/docstring_flags.py | {
"start": 3494,
"end": 4565
} | class ____(nodes.Element):
local_attributes = [*nodes.Element.local_attributes, *FLAG_ATTRS]
def visit_flag(self, node: flag):
flag_type, message = [node.attributes[k] for k in FLAG_ATTRS]
# We are currently not parsing the content of the message, so manually sub
# all `references` with `<cite>` tags, which is what the HTML writer does
# for parsed RST.
message = re.sub(r"`(\S+?)`", r"<cite>\1</cite>", message)
header, *body = message.splitlines()
processed_lines = [header, *(f"<p>{line}</>" for line in body)]
message_html = "\n".join(processed_lines)
# The "hidden" elements are not visible on screen, but are picked up by the search
# crawler to provide better structure to search results.
html = f"""
<div class="flag">
<p>
<span class="flag {flag_type}">
<span class="hidden">(</span>
{flag_type}
<span class="hidden">)</span>
</span>
</>
{message_html}
</div>
"""
self.body.append(html)
def depart_flag(self, node: flag): ...
| flag |
python | pypa__setuptools | setuptools/config/setupcfg.py | {
"start": 7607,
"end": 16863
} | class ____(Generic[Target]):
"""Handles metadata supplied in configuration files."""
section_prefix: str
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases: ClassVar[dict[str, str]] = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(
self,
target_obj: Target,
options: AllCommandOptions,
ignore_option_errors,
ensure_discovered: expand.EnsurePackagesDiscovered,
) -> None:
self.ignore_option_errors = ignore_option_errors
self.target_obj: Target = target_obj
self.sections = dict(self._section_options(options))
self.set_options: list[str] = []
self.ensure_discovered = ensure_discovered
self._referenced_files = set[str]()
"""After parsing configurations, this property will enumerate
all files referenced by the "file:" directive. Private API for setuptools only.
"""
@classmethod
def _section_options(
cls, options: AllCommandOptions
) -> Iterator[tuple[str, SingleCommandOptions]]:
for full_name, value in options.items():
pre, _sep, name = full_name.partition(cls.section_prefix)
if pre:
continue
yield name.lstrip('.'), value
@property
@abstractmethod
def parsers(self) -> dict[str, Callable]:
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
f'{self.__class__.__name__} must provide .parsers property'
)
def __setitem__(self, option_name, value) -> None:
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
try:
current_value = getattr(target_obj, option_name)
except AttributeError as e:
raise KeyError(option_name) from e
if current_value:
# Already inhabited. Skipping.
return
try:
parsed = self.parsers.get(option_name, lambda x: x)(value)
except (Exception,) * self.ignore_option_errors:
return
simple_setter = functools.partial(target_obj.__setattr__, option_name)
setter = getattr(target_obj, f"set_{option_name}", simple_setter)
setter(parsed)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise OptionError(f"Unable to parse option value to dict: {value}")
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError(
f'Only strings are accepted for the {key} field, '
'files are not accepted'
)
return _static.Str(value)
return parser
def _parse_file(self, value, root_dir: StrPath | None):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = 'file:'
if not isinstance(value, str):
return value
if not value.startswith(include_directive):
return _static.Str(value)
spec = value[len(include_directive) :]
filepaths = [path.strip() for path in spec.split(',')]
self._referenced_files.update(filepaths)
# XXX: Is marking as static contents coming from files too optimistic?
return _static.Str(expand.read_files(filepaths, root_dir))
def _parse_attr(self, value, package_dir, root_dir: StrPath):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return _static.Str(value)
attr_desc = value.replace(attr_directive, '')
# Make sure package_dir is populated correctly, so `attr:` directives can work
package_dir.update(self.ensure_discovered.package_dir)
return expand.read_attr(attr_desc, package_dir, root_dir)
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict_with_key(cls, section_options, values_parser):
"""Parses section options into a dictionary.
Applies a given parser to each option in a section.
:param dict section_options:
:param callable values_parser: function with 2 args corresponding to key, value
:rtype: dict
"""
value = {}
for key, (_, val) in section_options.items():
value[key] = values_parser(key, val)
return value
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to each value.
:param dict section_options:
:param callable values_parser: function with 1 arg corresponding to option value
:rtype: dict
"""
parser = (lambda _, v: values_parser(v)) if values_parser else (lambda _, v: v)
return cls._parse_section_to_dict_with_key(section_options, parser)
def parse_section(self, section_options) -> None:
"""Parses configuration file section.
:param dict section_options:
"""
for name, (_, value) in section_options.items():
with contextlib.suppress(KeyError):
# Keep silent for a new option may appear anytime.
self[name] = value
def parse(self) -> None:
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = f"_{section_name}"
section_parser_method: Callable | None = getattr(
self,
# Dots in section names are translated into dunderscores.
f'parse_section{method_postfix}'.replace('.', '__'),
None,
)
if section_parser_method is None:
raise OptionError(
"Unsupported distribution option section: "
f"[{self.section_prefix}.{section_name}]"
)
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, **kw):
"""this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
kw.setdefault("stacklevel", 2)
_DeprecatedConfig.emit("Deprecated config in `setup.cfg`", msg, **kw)
return func(*args, **kwargs)
return config_handler
| ConfigHandler |
python | kamyu104__LeetCode-Solutions | Python/minimum-adjacent-swaps-to-make-a-valid-array.py | {
"start": 45,
"end": 383
} | class ____(object):
def minimumSwaps(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
min_idx = min(xrange(len(nums)), key=nums.__getitem__)
max_idx = max(reversed(xrange(len(nums))), key=nums.__getitem__)
return ((len(nums)-1)-max_idx)+min_idx-int(max_idx < min_idx)
| Solution |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 1413,
"end": 1620
} | class ____(CookiecutterException):
"""
Exception for invalid configuration file.
Raised if the global configuration file is not valid YAML or is
badly constructed.
"""
| InvalidConfiguration |
python | keon__algorithms | tests/test_dfs.py | {
"start": 2049,
"end": 2511
} | class ____(unittest.TestCase):
def test_walls_and_gates(self):
rooms = [[float("inf"), -1, 0, float("inf")],
[float("inf"), float("inf"), float("inf"), -1],
[float("inf"), -1, float("inf"), -1],
[0, -1, float("inf"), float("inf")]]
walls_and_gates(rooms)
self.assertEqual([[3, -1, 0, 1], [2, 2, 1, -1], [1, -1, 2, -1],
[0, -1, 3, 4]], rooms)
| TestWallsAndGates |
python | pydata__xarray | asv_bench/benchmarks/interp.py | {
"start": 1945,
"end": 2122
} | class ____(Interpolation):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.ds = self.ds.chunk({"t": 50})
| InterpolationDask |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_dynamic/_mock.py | {
"start": 542,
"end": 2794
} | class ____:
"""Used by autodoc_mock_imports."""
__display_name__ = '_MockObject'
__name__ = ''
__sphinx_mock__ = True
__sphinx_decorator_args__: tuple[Any, ...] = ()
# Attributes listed here should not be mocked and rather raise an Attribute error:
__sphinx_empty_attrs__: Set[str] = frozenset(('__typing_subst__',))
def __new__(cls, *args: Any, **kwargs: Any) -> Any: # NoQA: ARG004
if len(args) == 3 and isinstance(args[1], tuple):
superclass = args[1][-1].__class__
if superclass is cls:
# subclassing MockObject
return _make_subclass(
args[0],
superclass.__display_name__,
superclass=superclass,
attributes=args[2],
)
return super().__new__(cls)
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.__qualname__ = self.__name__
def __len__(self) -> int:
return 0
def __contains__(self, key: str) -> bool:
return False
def __iter__(self) -> Iterator[Any]:
return iter(())
def __mro_entries__(self, bases: tuple[Any, ...]) -> tuple[type, ...]:
return (self.__class__,)
def __getitem__(self, key: Any) -> _MockObject:
return _make_subclass(str(key), self.__display_name__, self.__class__)()
def __getattr__(self, key: str) -> _MockObject:
if key in self.__sphinx_empty_attrs__:
raise AttributeError
return _make_subclass(key, self.__display_name__, self.__class__)()
def __call__(self, *args: Any, **kwargs: Any) -> Any:
call = self.__class__()
call.__sphinx_decorator_args__ = args
return call
def __repr__(self) -> str:
return self.__display_name__
def _make_subclass(
name: str,
module: str,
superclass: Any = _MockObject,
attributes: Any = None,
decorator_args: tuple[Any, ...] = (),
) -> Any:
attrs = {
'__module__': module,
'__display_name__': module + '.' + name,
'__name__': name,
'__sphinx_decorator_args__': decorator_args,
}
attrs.update(attributes or {})
return type(name, (superclass,), attrs)
| _MockObject |
python | falconry__falcon | tests/test_status_codes.py | {
"start": 73,
"end": 906
} | class ____:
@pytest.mark.skipif(
sys.version_info < (3, 13), reason='Outdated http statuses definitions'
)
@pytest.mark.parametrize('status', status_codes.__all__)
def test_statuses_are_in_compliance_with_http_from_python313(self, status):
status_code, message = self._status_code_and_message(status)
if status_code >= 700:
pytest.skip('Codes above 700 are not defined in http package')
http_status = http.HTTPStatus(status_code)
if status_code in [418, 422]:
assert http_status.phrase != message
else:
assert http_status.phrase == message
def _status_code_and_message(self, status: str):
status = getattr(status_codes, status)
value, message = status.split(' ', 1)
return int(value), message
| TestStatusCodes |
python | django__django | tests/admin_scripts/tests.py | {
"start": 125502,
"end": 126135
} | class ____(AdminScriptTestCase):
def setUp(self):
super().setUp()
self.write_settings("settings.py")
def test_suggestions(self):
args = ["rnserver", "--settings=test_project.settings"]
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'rnserver'. Did you mean runserver?")
def test_no_suggestions(self):
args = ["abcdef", "--settings=test_project.settings"]
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertNotInOutput(err, "Did you mean")
| DjangoAdminSuggestions |
python | tensorflow__tensorflow | tensorflow/python/summary/plugin_asset_test.py | {
"start": 1394,
"end": 2749
} | class ____(test_util.TensorFlowTestCase):
def testGetPluginAsset(self):
epa = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
self.assertIsInstance(epa, _ExamplePluginAsset)
epa2 = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
self.assertIs(epa, epa2)
opa = plugin_asset.get_plugin_asset(_OtherExampleAsset)
self.assertIsNot(epa, opa)
def testUnnamedPluginFails(self):
with self.assertRaises(ValueError):
plugin_asset.get_plugin_asset(_UnnamedPluginAsset)
def testPluginCollisionDetected(self):
plugin_asset.get_plugin_asset(_ExamplePluginAsset)
with self.assertRaises(ValueError):
plugin_asset.get_plugin_asset(_ExamplePluginThatWillCauseCollision)
def testGetAllPluginAssets(self):
epa = plugin_asset.get_plugin_asset(_ExamplePluginAsset)
opa = plugin_asset.get_plugin_asset(_OtherExampleAsset)
self.assertItemsEqual(plugin_asset.get_all_plugin_assets(), [epa, opa])
def testRespectsGraphArgument(self):
g1 = ops.Graph()
g2 = ops.Graph()
e1 = plugin_asset.get_plugin_asset(_ExamplePluginAsset, g1)
e2 = plugin_asset.get_plugin_asset(_ExamplePluginAsset, g2)
self.assertEqual(e1, plugin_asset.get_all_plugin_assets(g1)[0])
self.assertEqual(e2, plugin_asset.get_all_plugin_assets(g2)[0])
if __name__ == "__main__":
googletest.main()
| PluginAssetTest |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 74288,
"end": 79887
} | class ____(BaseDataset):
def test_get_access_list(self):
""" Test H5Dget_access_plist """
ds = self.f.create_dataset(make_name(), (4,))
p_list = ds.id.get_access_plist()
def test_dapl(self):
""" Test the dapl keyword to h5d.open """
name = make_name()
dapl = h5py.h5p.create(h5py.h5p.DATASET_ACCESS)
dset = self.f.create_dataset(name, (100,))
del dset
dsid = h5py.h5d.open(self.f.id, name.encode('utf-8'), dapl)
self.assertIsInstance(dsid, h5py.h5d.DatasetID)
def test_get_chunk_details():
from io import BytesIO
buf = BytesIO()
name = make_name()
with h5py.File(buf, 'w') as fout:
fout.create_dataset(name, shape=(100, 100), chunks=(10, 10), dtype='i4')
fout[name][:] = 1
buf.seek(0)
with h5py.File(buf, 'r') as fin:
ds = fin[name].id
assert ds.get_num_chunks() == 100
for j in range(100):
offset = tuple(np.array(np.unravel_index(j, (10, 10))) * 10)
si = ds.get_chunk_info(j)
assert si.chunk_offset == offset
assert si.filter_mask == 0
assert si.byte_offset is not None
assert si.size > 0
si = ds.get_chunk_info_by_coord((0, 0))
assert si.chunk_offset == (0, 0)
assert si.filter_mask == 0
assert si.byte_offset is not None
assert si.size > 0
@ut.skipUnless(h5py.version.hdf5_version_tuple >= (1, 12, 3) or
(h5py.version.hdf5_version_tuple >= (1, 10, 10) and h5py.version.hdf5_version_tuple < (1, 10, 99)),
"chunk iteration requires HDF5 1.10.10 and later 1.10, or 1.12.3 and later")
def test_chunk_iter():
"""H5Dchunk_iter() for chunk information"""
from io import BytesIO
buf = BytesIO()
name = make_name()
with h5py.File(buf, 'w') as f:
f.create_dataset(name, shape=(100, 100), chunks=(10, 10), dtype='i4')
f[name][:] = 1
buf.seek(0)
with h5py.File(buf, 'r') as f:
dsid = f[name].id
num_chunks = dsid.get_num_chunks()
assert num_chunks == 100
ci = {}
for j in range(num_chunks):
si = dsid.get_chunk_info(j)
ci[si.chunk_offset] = si
def callback(chunk_info):
known = ci[chunk_info.chunk_offset]
assert chunk_info.chunk_offset == known.chunk_offset
assert chunk_info.filter_mask == known.filter_mask
assert chunk_info.byte_offset == known.byte_offset
assert chunk_info.size == known.size
dsid.chunk_iter(callback)
def test_empty_shape(writable_file):
ds = writable_file.create_dataset(make_name(), dtype='int32')
assert ds.shape is None
assert ds.maxshape is None
def test_zero_storage_size():
# https://github.com/h5py/h5py/issues/1475
from io import BytesIO
buf = BytesIO()
with h5py.File(buf, 'w') as fout:
fout.create_dataset('empty', dtype='uint8')
buf.seek(0)
with h5py.File(buf, 'r') as fin:
assert fin['empty'].chunks is None
assert fin['empty'].id.get_offset() is None
assert fin['empty'].id.get_storage_size() == 0
def test_python_int_uint64(writable_file):
# https://github.com/h5py/h5py/issues/1547
data = [np.iinfo(np.int64).max, np.iinfo(np.int64).max + 1]
# Check creating a new dataset
ds = writable_file.create_dataset(make_name(), data=data, dtype=np.uint64)
assert ds.dtype == np.dtype(np.uint64)
np.testing.assert_array_equal(ds[:], np.array(data, dtype=np.uint64))
# Check writing to an existing dataset
ds[:] = data
np.testing.assert_array_equal(ds[:], np.array(data, dtype=np.uint64))
def test_setitem_fancy_indexing(writable_file):
# https://github.com/h5py/h5py/issues/1593
arr = writable_file.create_dataset(make_name(), (5, 1000, 2), dtype=np.uint8)
block = np.random.randint(255, size=(5, 3, 2))
arr[:, [0, 2, 4], ...] = block
def test_vlen_spacepad():
with File(get_data_file_path("vlen_string_dset.h5")) as f:
assert f["DS1"][0] == b"Parting"
def test_vlen_nullterm():
with File(get_data_file_path("vlen_string_dset_utc.h5")) as f:
assert f["ds1"][0] == b"2009-12-20T10:16:18.662409Z"
def test_allow_unknown_filter(writable_file):
# apparently 256-511 are reserved for testing purposes
fake_filter_id = 256
ds = writable_file.create_dataset(
make_name(), shape=(10, 10), dtype=np.uint8, compression=fake_filter_id,
allow_unknown_filter=True
)
assert str(fake_filter_id) in ds._filters
def test_dset_chunk_cache():
"""Chunk cache configuration for individual datasets."""
from io import BytesIO
buf = BytesIO()
name = make_name()
with h5py.File(buf, 'w') as fout:
ds = fout.create_dataset(
name, shape=(10, 20), chunks=(5, 4), dtype='i4',
rdcc_nbytes=2 * 1024 * 1024, rdcc_w0=0.2, rdcc_nslots=997)
ds_chunk_cache = ds.id.get_access_plist().get_chunk_cache()
assert fout.id.get_access_plist().get_cache()[1:] != ds_chunk_cache
assert ds_chunk_cache == (997, 2 * 1024 * 1024, 0.2)
buf.seek(0)
with h5py.File(buf, 'r') as fin:
ds = fin.require_dataset(
name, shape=(10, 20), dtype='i4',
rdcc_nbytes=3 * 1024 * 1024, rdcc_w0=0.67, rdcc_nslots=709)
ds_chunk_cache = ds.id.get_access_plist().get_chunk_cache()
assert fin.id.get_access_plist().get_cache()[1:] != ds_chunk_cache
assert ds_chunk_cache == (709, 3 * 1024 * 1024, 0.67)
| TestLowOpen |
python | doocs__leetcode | lcci/03.06.Animal Shelter/Solution.py | {
"start": 0,
"end": 786
} | class ____:
def __init__(self):
self.q = [deque(), deque()]
def enqueue(self, animal: List[int]) -> None:
i, j = animal
self.q[j].append(i)
def dequeueAny(self) -> List[int]:
if not self.q[0] or (self.q[1] and self.q[1][0] < self.q[0][0]):
return self.dequeueDog()
return self.dequeueCat()
def dequeueDog(self) -> List[int]:
return [-1, -1] if not self.q[1] else [self.q[1].popleft(), 1]
def dequeueCat(self) -> List[int]:
return [-1, -1] if not self.q[0] else [self.q[0].popleft(), 0]
# Your AnimalShelf object will be instantiated and called as such:
# obj = AnimalShelf()
# obj.enqueue(animal)
# param_2 = obj.dequeueAny()
# param_3 = obj.dequeueDog()
# param_4 = obj.dequeueCat()
| AnimalShelf |
python | allegroai__clearml | clearml/utilities/proxy_object.py | {
"start": 14687,
"end": 17819
} | class ____(six.with_metaclass(WrapperBase)):
# This class acts as a proxy for the wrapped instance it is passed. All
# access to its attributes are delegated to the wrapped class, except
# those contained in __overrides__.
__slots__ = ["_wrapped", "_callback", "_remote_reference", "__weakref__"]
_remote_reference_calls = []
def __init__(self, callback: Callable, remote_reference: Optional[Callable] = None) -> None:
object.__setattr__(self, "_wrapped", None)
object.__setattr__(self, "_callback", callback)
object.__setattr__(self, "_remote_reference", remote_reference)
if remote_reference:
LazyEvalWrapper._remote_reference_calls.append(remote_reference)
def _remoteref(self) -> Any:
func = object.__getattribute__(self, "_remote_reference")
if func and func in LazyEvalWrapper._remote_reference_calls:
LazyEvalWrapper._remote_reference_calls.remove(func)
return func() if callable(func) else func
def __getattribute__(self, attr: str) -> Any:
if attr in ("__isabstractmethod__",):
return None
if attr in ("_remoteref", "_remote_reference"):
return object.__getattribute__(self, attr)
return getattr(LazyEvalWrapper._load_object(self), attr)
def __setattr__(self, attr: str, value: Any) -> None:
setattr(LazyEvalWrapper._load_object(self), attr, value)
def __delattr__(self, attr: str) -> None:
delattr(LazyEvalWrapper._load_object(self), attr)
def __nonzero__(self) -> bool:
return bool(LazyEvalWrapper._load_object(self))
def __bool__(self) -> bool:
return bool(LazyEvalWrapper._load_object(self))
@staticmethod
def _load_object(self) -> Any:
obj = object.__getattribute__(self, "_wrapped")
if obj is None:
cb = object.__getattribute__(self, "_callback")
obj = cb()
object.__setattr__(self, "_wrapped", obj)
return obj
@classmethod
def trigger_all_remote_references(cls) -> None:
for func in cls._remote_reference_calls:
if callable(func):
func()
cls._remote_reference_calls = []
def lazy_eval_wrapper_spec_class(class_type: type) -> type:
class TypedLazyEvalWrapper(six.with_metaclass(WrapperBase)):
_base_class_ = class_type
__slots__ = ["_wrapped", "_callback", "__weakref__"]
def __init__(self, callback: Callable) -> None:
object.__setattr__(self, "_wrapped", None)
object.__setattr__(self, "_callback", callback)
def __nonzero__(self) -> bool:
return bool(LazyEvalWrapper._load_object(self))
def __bool__(self) -> bool:
return bool(LazyEvalWrapper._load_object(self))
def __getattribute__(self, attr: str) -> Any:
if attr == "__isabstractmethod__":
return None
if attr == "__class__":
return class_type
return getattr(LazyEvalWrapper._load_object(self), attr)
return TypedLazyEvalWrapper
| LazyEvalWrapper |
python | apache__airflow | providers/teradata/src/airflow/providers/teradata/operators/teradata.py | {
"start": 2613,
"end": 3719
} | class ____(BaseOperator):
"""
Executes stored procedure in a specific Teradata database.
:param procedure: name of stored procedure to call (templated)
:param teradata_conn_id: The :ref:`Teradata connection id <howto/connection:teradata>`
reference to a specific Teradata database.
:param parameters: (optional, templated) the parameters provided in the call
"""
template_fields: Sequence[str] = (
"procedure",
"parameters",
)
ui_color = "#ededed"
def __init__(
self,
*,
procedure: str,
teradata_conn_id: str = TeradataHook.default_conn_name,
parameters: dict | list | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.teradata_conn_id = teradata_conn_id
self.procedure = procedure
self.parameters = parameters
def execute(self, context: Context):
hook = TeradataHook(teradata_conn_id=self.teradata_conn_id)
return hook.callproc(self.procedure, autocommit=True, parameters=self.parameters)
| TeradataStoredProcedureOperator |
python | ansible__ansible | lib/ansible/plugins/callback/default.py | {
"start": 925,
"end": 17522
} | class ____(CallbackBase):
"""
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
self._last_task_name = None
self._task_type_cache = {}
super(CallbackModule, self).__init__()
def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
host_label = self.host_label(result)
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._handle_warnings_and_exception(result)
# FIXME: this method should not exist, delegate "suggested keys to display" to the plugin or something... As-is, the placement of this
# call obliterates `results`, which causes a task summary to be printed on loop failures, which we don't do anywhere else.
self._clean_results(result.result, result.task.action)
if result.task.loop and 'results' in result.result:
self._process_items(result)
else:
if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
self._print_task_path(result.task)
msg = "fatal: [%s]: FAILED! => %s" % (host_label, self._dump_results(result.result))
self._display.display(msg, color=C.COLOR_ERROR, stderr=self.get_option('display_failed_stderr'))
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
host_label = self.host_label(result)
if isinstance(result.task, TaskInclude):
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
return
elif result.result.get('changed', False):
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
msg = "changed: [%s]" % (host_label,)
color = C.COLOR_CHANGED
else:
if not self.get_option('display_ok_hosts'):
return
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
msg = "ok: [%s]" % (host_label,)
color = C.COLOR_OK
self._handle_warnings_and_exception(result)
if result.task.loop and 'results' in result.result:
self._process_items(result)
else:
self._clean_results(result.result, result.task.action)
if self._run_is_verbose(result):
msg += " => %s" % (self._dump_results(result.result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
if self.get_option('display_skipped_hosts'):
self._clean_results(result.result, result.task.action)
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._handle_warnings_and_exception(result)
if result.task.loop is not None and 'results' in result.result:
self._process_items(result)
msg = "skipping: [%s]" % result.host.get_name()
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result.result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._handle_warnings_and_exception(result)
host_label = self.host_label(result)
msg = "fatal: [%s]: UNREACHABLE! => %s" % (host_label, self._dump_results(result.result))
self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.get_option('display_failed_stderr'))
if result.task.ignore_unreachable:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
self._task_start(task, prefix='TASK')
def _task_start(self, task, prefix=None):
# Cache output prefix for task if provided
# This is needed to properly display 'RUNNING HANDLER' and similar
# when hiding skipped/ok task results
if prefix is not None:
self._task_type_cache[task._uuid] = prefix
# Preserve task name, as all vars may not be available for templating
# when we need it later
if self._play.strategy in add_internal_fqcns(('free', 'host_pinned')):
# Explicitly set to None for strategy free/host_pinned to account for any cached
# task title from a previous non-free play
self._last_task_name = None
else:
self._last_task_name = task.get_name().strip()
# Display the task banner immediately if we're not doing any filtering based on task result
if self.get_option('display_skipped_hosts') and self.get_option('display_ok_hosts'):
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it there yet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
# FIXME: the no_log value is not templated at this point, so any template will be considered truthy
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
prefix = self._task_type_cache.get(task._uuid, 'TASK')
# Use cached task name
task_name = self._last_task_name
if task_name is None:
task_name = task.get_name().strip()
if task.check_mode and self.get_option('check_mode_markers'):
checkmsg = " [CHECK MODE]"
else:
checkmsg = ""
self._display.banner(u"%s [%s%s]%s" % (prefix, task_name, args, checkmsg))
if self._display.verbosity >= 2:
self._print_task_path(task)
self._last_task_banner = task._uuid
def v2_playbook_on_handler_task_start(self, task):
self._task_start(task, prefix='RUNNING HANDLER')
def v2_runner_on_start(self, host, task):
if self.get_option('show_per_host_start'):
self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if play.check_mode and self.get_option('check_mode_markers'):
checkmsg = " [CHECK MODE]"
else:
checkmsg = ""
if not name:
msg = u"PLAY%s" % checkmsg
else:
msg = u"PLAY [%s]%s" % (name, checkmsg)
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result: CallbackTaskResult) -> None:
if result.task.loop and 'results' in result.result:
for res in result.result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._display.display(diff)
elif 'diff' in result.result and result.result['diff'] and result.result.get('changed', False):
diff = self._get_diff(result.result['diff'])
if diff:
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._display.display(diff)
def v2_runner_item_on_ok(self, result: CallbackTaskResult) -> None:
host_label = self.host_label(result)
if isinstance(result.task, TaskInclude):
return
elif result.result.get('changed', False):
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
msg = 'changed'
color = C.COLOR_CHANGED
else:
if not self.get_option('display_ok_hosts'):
return
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
msg = 'ok'
color = C.COLOR_OK
self._handle_warnings_and_exception(result)
msg = "%s: [%s] => (item=%s)" % (msg, host_label, self._get_item_label(result.result))
self._clean_results(result.result, result.task.action)
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result.result)
self._display.display(msg, color=color)
def v2_runner_item_on_failed(self, result: CallbackTaskResult) -> None:
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._handle_warnings_and_exception(result)
host_label = self.host_label(result)
msg = "failed: [%s]" % (host_label,)
self._clean_results(result.result, result.task.action)
self._display.display(
msg + " (item=%s) => %s" % (self._get_item_label(result.result), self._dump_results(result.result)),
color=C.COLOR_ERROR,
stderr=self.get_option('display_failed_stderr')
)
def v2_runner_item_on_skipped(self, result: CallbackTaskResult) -> None:
if self.get_option('display_skipped_hosts'):
if self._last_task_banner != result.task._uuid:
self._print_task_banner(result.task)
self._handle_warnings_and_exception(result)
self._clean_results(result.result, result.task.action)
msg = "skipping: [%s] => (item=%s) " % (result.host.get_name(), self._get_item_label(result.result))
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result.result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
label = self._get_item_label(included_file._vars)
if label:
msg += " => (item=%s)" % label
self._display.display(msg, color=C.COLOR_INCLUDED)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(
u"%s : %s %s %s %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR),
colorize(u'skipped', t['skipped'], C.COLOR_SKIP),
colorize(u'rescued', t['rescued'], C.COLOR_OK),
colorize(u'ignored', t['ignored'], C.COLOR_WARN),
),
screen_only=True
)
self._display.display(
u"%s : %s %s %s %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None),
colorize(u'skipped', t['skipped'], None),
colorize(u'rescued', t['rescued'], None),
colorize(u'ignored', t['ignored'], None),
),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats if required
if stats.custom and self.get_option('show_custom_stats'):
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
self._display.banner("DRY RUN")
def v2_playbook_on_start(self, playbook):
if self._display.verbosity > 1:
from os.path import basename
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
# show CLI arguments
if self._display.verbosity > 3:
if context.CLIARGS.get('args'):
self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
color=C.COLOR_VERBOSE, screen_only=True)
for argument in (a for a in context.CLIARGS if a != 'args'):
val = context.CLIARGS[argument]
if val:
self._display.display('%s: %s' % (argument, val), color=C.COLOR_VERBOSE, screen_only=True)
if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
self._display.banner("DRY RUN")
def v2_runner_retry(self, result: CallbackTaskResult) -> None:
task_name = result.task_name or result.task
host_label = self.host_label(result)
msg = "FAILED - RETRYING: [%s]: %s (%d retries left)." % (host_label, task_name, result.result['retries'] - result.result['attempts'])
if self._run_is_verbose(result, verbosity=2):
msg += "Result was: %s" % self._dump_results(result.result)
self._display.display(msg, color=C.COLOR_DEBUG)
def v2_runner_on_async_poll(self, result: CallbackTaskResult) -> None:
host = result.host.get_name()
jid = result.result.get('ansible_job_id')
started = result.result.get('started')
finished = result.result.get('finished')
self._display.display(
'ASYNC POLL on %s: jid=%s started=%s finished=%s' % (host, jid, started, finished),
color=C.COLOR_DEBUG
)
def v2_runner_on_async_ok(self, result: CallbackTaskResult) -> None:
host = result.host.get_name()
jid = result.result.get('ansible_job_id')
self._display.display("ASYNC OK on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_runner_on_async_failed(self, result: CallbackTaskResult) -> None:
host = result.host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
jid = result.result.get('ansible_job_id')
if not jid and 'async_result' in result.result:
jid = result.result['async_result'].get('ansible_job_id')
self._display.display("ASYNC FAILED on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_playbook_on_notify(self, handler, host):
if self._display.verbosity > 1:
self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
| CallbackModule |
python | doocs__leetcode | solution/3000-3099/3099.Harshad Number/Solution.py | {
"start": 0,
"end": 197
} | class ____:
def sumOfTheDigitsOfHarshadNumber(self, x: int) -> int:
s, y = 0, x
while y:
s += y % 10
y //= 10
return s if x % s == 0 else -1
| Solution |
python | ray-project__ray | python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py | {
"start": 497,
"end": 812
} | class ____:
"""Data class that represents stats of publishing a batch of events."""
# Whether the publish was successful
is_publish_successful: bool
# Number of events published
num_events_published: int
# Number of events filtered out
num_events_filtered_out: int
@dataclass
| PublishStats |
python | pypa__twine | tests/test_settings.py | {
"start": 6133,
"end": 6925
} | class ____:
@staticmethod
def parse_args(args):
parser = argparse.ArgumentParser()
settings.Settings.register_argparse_arguments(parser)
return parser.parse_args(args)
def test_non_interactive_flag(self):
args = self.parse_args(["--non-interactive"])
assert args.non_interactive
def test_non_interactive_environment(self, monkeypatch):
monkeypatch.setenv("TWINE_NON_INTERACTIVE", "1")
args = self.parse_args([])
assert args.non_interactive
monkeypatch.setenv("TWINE_NON_INTERACTIVE", "0")
args = self.parse_args([])
assert not args.non_interactive
def test_attestations_flag(self):
args = self.parse_args(["--attestations"])
assert args.attestations
| TestArgumentParsing |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/validators/base/data_source.py | {
"start": 391,
"end": 698
} | class ____(Generic[T]):
def __init__(self, create_fn: Callable[[], T]):
self._create_fn = create_fn
self._instance: T | None = None
def create(self) -> T:
if self._instance is None:
self._instance = self._create_fn()
return self._instance
| DataSourceCreator |
python | huggingface__transformers | src/transformers/models/longformer/modeling_longformer.py | {
"start": 23684,
"end": 26290
} | class ____(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor inputs_embeds:
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
| LongformerEmbeddings |
python | tensorflow__tensorflow | tensorflow/compiler/tests/depthwise_conv_op_test.py | {
"start": 5414,
"end": 28881
} | class ____(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.session() as sess:
if data_type == np.float32:
tolerance = 1e-4
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
conv_interface = ReferenceDepthwiseConv2D(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
@test_util.run_without_tensor_float_32(
"DepthwiseConv2D may use TF32 when available.")
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2D,", index, "th config:", input_size, "*",
filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type)
@test_util.run_without_tensor_float_32(
"DepthwiseConv2D may use TF32 when available.")
def testDepthwiseConv2DFormat(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFormat,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=np.float32).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=np.float32).reshape(filter_in_sizes)
with self.session() as sess:
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)
with self.test_scope():
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv, {t1: x1, t2: x2})
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-4)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
# This is testing that depthwise_conv2d with dilation produces
# the same results between CPU and TPU. It also tests that NCHW
# and NWHC formats agree.
def _VerifyValuesWithDilation(self,
tensor_in_sizes,
filter_in_sizes,
stride,
dilation,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [filter_rows, filter_cols,
input_depth, depth_multiplier].
stride: Stride.
dilation: Dilation.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.session() as sess:
if data_type == np.float32:
# TODO(b/64210055): Tolerance for TPU is high.
tolerance = 1e-2
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
dilations = [dilation, dilation]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_impl.depthwise_conv2d(
native_t1,
t2,
strides=strides,
rate=dilations,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
# CPU only support NHWC format
strides = [1, stride, stride, 1]
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=strides, rate=dilations, padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
def testDilationDepthwiseConv2DWith(self):
for index, (input_size, filter_size, _, stride, dilation,
padding) in enumerate(ConfigsWithDilationsToTest()):
print("Testing DilationDepthwiseConv2D,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "dilation: ", dilation,
"padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValuesWithDilation(input_size, filter_size, stride,
dilation, padding, data_type)
def testDilationDepthwiseConv2DWithFormat(self):
for index, (input_size, filter_size, _, stride, dilation,
padding) in enumerate(ConfigsWithDilationsToTest()):
print("Testing DilationDepthwiseConv2DFormat,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "dilation:",
dilation, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValuesWithDilation(
input_size,
filter_size,
stride,
dilation,
padding,
data_type,
data_format="NCHW")
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DInputGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)
def _CompareBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
stride,
padding,
data_format="NHWC"):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]
if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0, t1, native_t2, strides=strides, padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
@test_util.run_without_tensor_float_32(
"DepthwiseConv2DFilterGrad may use TF32 when available.")
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradCompare,", index, "th config:",
input_size, "*", filter_size, "producing output", output_size,
"stride:", stride, "padding:", padding)
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)
@test_util.run_without_tensor_float_32(
"DepthwiseConv2DFilterGrad may use TF32 when available.")
def testDepthwiseConv2DFilterGradFormatNCHWCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradFormatNCHWCompare,", index,
"th config:", input_size, "*", filter_size, "producing output",
output_size, "stride:", stride, "padding:", padding)
self._CompareBackpropFilter(
input_size,
filter_size,
output_size,
stride,
padding,
data_format="NCHW")
def _CompareBackpropInputWithDilation(self, input_sizes, filter_sizes,
output_sizes, stride, dilation,
padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0,
t1,
t2,
strides=[1, stride, stride, 1],
dilations=[1, dilation, dilation, 1],
padding=padding)
else:
# TODO(wangtao): figure out gradient with stride > 1.
# depthwise_conv2d_native_backprop_input on CPU doesn't support
# dilation.
t3 = array_ops.space_to_batch(
t2, block_size=dilation, paddings=[[0, 0], [0, 0]])
input_sizes_transform = [
input_sizes[0] * dilation * dilation, input_sizes[1] // dilation,
input_sizes[2] // dilation, input_sizes[3]
]
t0 = constant_op.constant(
input_sizes_transform, shape=[len(input_sizes)])
backprop_naive = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t3, strides=[1, stride, stride, 1], padding=padding)
backprop = array_ops.batch_to_space(
backprop_naive, [[0, 0], [0, 0]], block_size=dilation)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
# TODO (b/64210055): Tolerance for TPU is high.
self.assertAllClose(cpu_value, gpu_value, rtol=1e-2, atol=1e-3)
def testDilationDepthwiseConv2DInputGradWithCompare(self):
for index, (input_size, filter_size, output_size, stride, dilation,
padding) in enumerate(ConfigsWithDilationsToTest()):
print("Testing DilationDepthwiseConv2DInputGradWithDilationCompare,",
index, "th config:", input_size, "*", filter_size, "stride:",
stride, "dilation:", dilation, "padding:", padding)
# TODO(wangtao): implement CPU grad computation with stride > 1.
if stride == 1:
self._CompareBackpropInputWithDilation(input_size, filter_size,
output_size, stride, dilation,
padding)
def _CompareBackpropFilterWithDilation(self,
input_sizes,
filter_sizes,
output_sizes,
stride,
dilation,
padding,
data_format="NHWC"):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]
dilations = [1, dilation, dilation, 1]
if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
dilations = [1, 1, dilation, dilation]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
dilations=dilations,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
# depthwise_conv2d_native_backprop_filter on CPU doesn't support
# dilation.
native_t3 = array_ops.space_to_batch(
native_t2, block_size=dilation, paddings=[[0, 0], [0, 0]])
native_t0_transform = array_ops.space_to_batch(
native_t0, block_size=dilation, paddings=[[0, 0], [0, 0]])
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0_transform,
t1,
native_t3,
strides=strides,
padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
# TODO(b/64210055): Tolerance for TPU is high.
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-4)
def testDilationDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride, dilation,
padding) in enumerate(ConfigsWithDilationsToTest()):
print("Testing DilationDepthwiseConv2DFilterGradCompare,", index,
"th config:", input_size, "*", filter_size, "producing output",
output_size, "stride:", stride, "dilation:", dilation, "padding:",
padding)
if stride == 1:
# TODO(wangtao): implement CPU grad computation with stride > 1.
self._CompareBackpropFilterWithDilation(input_size, filter_size,
output_size, stride, dilation,
padding)
if __name__ == "__main__":
test.main()
| DepthwiseConv2DTest |
python | rq__rq | tests/test_timeouts.py | {
"start": 290,
"end": 519
} | class ____(SimpleWorker):
death_penalty_class = TimerDeathPenalty
def thread_friendly_sleep_func(seconds):
end_at = time.time() + seconds
while True:
if time.time() > end_at:
break
| TimerBasedWorker |
python | numba__numba | numba/cuda/tests/cudadrv/test_cuda_devicerecord.py | {
"start": 849,
"end": 3502
} | class ____(CUDATestCase):
"""
Tests the DeviceRecord class with np.void host types.
"""
def setUp(self):
super().setUp()
self._create_data(np.zeros)
def _create_data(self, array_ctor):
self.dtype = np.dtype([('a', np.int32), ('b', np.float32)], align=True)
self.hostz = array_ctor(1, self.dtype)[0]
self.hostnz = array_ctor(1, self.dtype)[0]
self.hostnz['a'] = 10
self.hostnz['b'] = 11.0
def _check_device_record(self, reference, rec):
self.assertEqual(rec.shape, tuple())
self.assertEqual(rec.strides, tuple())
self.assertEqual(rec.dtype, reference.dtype)
self.assertEqual(rec.alloc_size, reference.dtype.itemsize)
self.assertIsNotNone(rec.gpu_data)
self.assertNotEqual(rec.device_ctypes_pointer, ctypes.c_void_p(0))
numba_type = numpy_support.from_dtype(reference.dtype)
self.assertEqual(rec._numba_type_, numba_type)
def test_device_record_interface(self):
hostrec = self.hostz.copy()
devrec = DeviceRecord(self.dtype)
self._check_device_record(hostrec, devrec)
def test_device_record_copy(self):
hostrec = self.hostz.copy()
devrec = DeviceRecord(self.dtype)
devrec.copy_to_device(hostrec)
# Copy back and check values are all zeros
hostrec2 = self.hostnz.copy()
devrec.copy_to_host(hostrec2)
np.testing.assert_equal(self.hostz, hostrec2)
# Copy non-zero values to GPU and back and check values
hostrec3 = self.hostnz.copy()
devrec.copy_to_device(hostrec3)
hostrec4 = self.hostz.copy()
devrec.copy_to_host(hostrec4)
np.testing.assert_equal(hostrec4, self.hostnz)
def test_from_record_like(self):
# Create record from host record
hostrec = self.hostz.copy()
devrec = from_record_like(hostrec)
self._check_device_record(hostrec, devrec)
# Create record from device record and check for distinct data
devrec2 = from_record_like(devrec)
self._check_device_record(devrec, devrec2)
self.assertNotEqual(devrec.gpu_data, devrec2.gpu_data)
def test_auto_device(self):
# Create record from host record
hostrec = self.hostnz.copy()
devrec, new_gpu_obj = auto_device(hostrec)
self._check_device_record(hostrec, devrec)
self.assertTrue(new_gpu_obj)
# Copy data back and check it is equal to auto_device arg
hostrec2 = self.hostz.copy()
devrec.copy_to_host(hostrec2)
np.testing.assert_equal(hostrec2, hostrec)
| TestCudaDeviceRecord |
python | mkdocs__mkdocs | mkdocs/contrib/search/search_index.py | {
"start": 5804,
"end": 7945
} | class ____(HTMLParser):
"""
Given a block of HTML, group the content under the preceding
heading tags which can then be used for creating an index
for that section.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.data: list[ContentSection] = []
self.section: ContentSection | None = None
self.is_header_tag = False
self._stripped_html: list[str] = []
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
"""Called at the start of every HTML tag."""
# We only care about the opening tag for headings.
if tag not in _HEADER_TAGS:
return
# We are dealing with a new header, create a new section
# for it and assign the ID if it has one.
self.is_header_tag = True
self.section = ContentSection()
self.data.append(self.section)
for attr in attrs:
if attr[0] == "id":
self.section.id = attr[1]
def handle_endtag(self, tag: str) -> None:
"""Called at the end of every HTML tag."""
# We only care about the opening tag for headings.
if tag not in _HEADER_TAGS:
return
self.is_header_tag = False
def handle_data(self, data: str) -> None:
"""Called for the text contents of each tag."""
self._stripped_html.append(data)
if self.section is None:
# This means we have some content at the start of the
# HTML before we reach a heading tag. We don't actually
# care about that content as it will be added to the
# overall page entry in the search. So just skip it.
return
# If this is a header, then the data is the title.
# Otherwise it is content of something under that header
# section.
if self.is_header_tag:
self.section.title = data
else:
self.section.text.append(data.rstrip('\n'))
@property
def stripped_html(self) -> str:
return '\n'.join(self._stripped_html)
| ContentParser |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/clients/test_base.py | {
"start": 345,
"end": 16896
} | class ____(TestCase):
client_id = "someclientid"
uri = "https://example.com/path?query=world"
body = "not=empty"
headers = {}
access_token = "token"
mac_key = "secret"
bearer_query = uri + "&access_token=" + access_token
bearer_header = {
"Authorization": "Bearer " + access_token
}
bearer_body = body + "&access_token=" + access_token
mac_00_header = {
"Authorization": 'MAC id="' + access_token + '", nonce="0:abc123",' +
' bodyhash="Yqyso8r3hR5Nm1ZFv+6AvNHrxjE=",' +
' mac="0X6aACoBY0G6xgGZVJ1IeE8dF9k="'
}
mac_01_header = {
"Authorization": 'MAC id="' + access_token + '", ts="123456789",' +
' nonce="abc123", mac="Xuk+9oqaaKyhitkgh1CD0xrI6+s="'
}
def test_add_bearer_token(self):
"""Test a number of bearer token placements"""
# Invalid token type
client = Client(self.client_id, token_type="invalid")
self.assertRaises(ValueError, client.add_token, self.uri)
# Case-insensitive token type
client = Client(self.client_id, access_token=self.access_token, token_type="bEAreR")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
# Non-HTTPS
insecure_uri = 'http://example.com/path?query=world'
client = Client(self.client_id, access_token=self.access_token, token_type="Bearer")
self.assertRaises(InsecureTransportError, client.add_token, insecure_uri,
body=self.body,
headers=self.headers)
# Missing access token
client = Client(self.client_id)
self.assertRaises(ValueError, client.add_token, self.uri)
# Expired token
expired = 523549800
expired_token = {
'expires_at': expired,
}
client = Client(self.client_id, token=expired_token, access_token=self.access_token, token_type="Bearer")
self.assertRaises(TokenExpiredError, client.add_token, self.uri,
body=self.body, headers=self.headers)
# The default token placement, bearer in auth header
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
# Setting default placements of tokens
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=AUTH_HEADER)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=URI_QUERY)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.bearer_query)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.headers)
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=BODY)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.bearer_body)
self.assertEqual(headers, self.headers)
# Asking for specific placement in the add_token method
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=AUTH_HEADER)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=URI_QUERY)
self.assertURLEqual(uri, self.bearer_query)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.headers)
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=BODY)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.bearer_body)
self.assertEqual(headers, self.headers)
# Invalid token placement
client = Client(self.client_id, access_token=self.access_token)
self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
headers=self.headers, token_placement="invalid")
client = Client(self.client_id, access_token=self.access_token,
default_token_placement="invalid")
self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
headers=self.headers)
def test_add_mac_token(self):
# Missing access token
client = Client(self.client_id, token_type="MAC")
self.assertRaises(ValueError, client.add_token, self.uri)
# Invalid hash algorithm
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-2")
self.assertRaises(ValueError, client.add_token, self.uri)
orig_generate_timestamp = common.generate_timestamp
orig_generate_nonce = common.generate_nonce
orig_generate_age = utils.generate_age
self.addCleanup(setattr, common, 'generage_timestamp', orig_generate_timestamp)
self.addCleanup(setattr, common, 'generage_nonce', orig_generate_nonce)
self.addCleanup(setattr, utils, 'generate_age', orig_generate_age)
common.generate_timestamp = lambda: '123456789'
common.generate_nonce = lambda: 'abc123'
utils.generate_age = lambda *args: 0
# Add the Authorization header (draft 00)
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, issue_time=datetime.datetime.now())
self.assertEqual(uri, self.uri)
self.assertEqual(body, self.body)
self.assertEqual(headers, self.mac_00_header)
# Non-HTTPS
insecure_uri = 'http://example.com/path?query=world'
self.assertRaises(InsecureTransportError, client.add_token, insecure_uri,
body=self.body,
headers=self.headers,
issue_time=datetime.datetime.now())
# Expired Token
expired = 523549800
expired_token = {
'expires_at': expired,
}
client = Client(self.client_id, token=expired_token, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
self.assertRaises(TokenExpiredError, client.add_token, self.uri,
body=self.body,
headers=self.headers,
issue_time=datetime.datetime.now())
# Add the Authorization header (draft 01)
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, draft=1)
self.assertEqual(uri, self.uri)
self.assertEqual(body, self.body)
self.assertEqual(headers, self.mac_01_header)
# Non-HTTPS
insecure_uri = 'http://example.com/path?query=world'
self.assertRaises(InsecureTransportError, client.add_token, insecure_uri,
body=self.body,
headers=self.headers,
draft=1)
# Expired Token
expired = 523549800
expired_token = {
'expires_at': expired,
}
client = Client(self.client_id, token=expired_token, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
self.assertRaises(TokenExpiredError, client.add_token, self.uri,
body=self.body,
headers=self.headers,
draft=1)
def test_revocation_request(self):
client = Client(self.client_id)
url = 'https://example.com/revoke'
token = 'foobar'
# Valid request
u, h, b = client.prepare_token_revocation_request(url, token)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'token=%s&token_type_hint=access_token' % token)
# Non-HTTPS revocation endpoint
self.assertRaises(InsecureTransportError,
client.prepare_token_revocation_request,
'http://example.com/revoke', token)
u, h, b = client.prepare_token_revocation_request(
url, token, token_type_hint='refresh_token')
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'token=%s&token_type_hint=refresh_token' % token)
# JSONP
u, h, b = client.prepare_token_revocation_request(
url, token, callback='hello.world')
self.assertURLEqual(u, url + '?callback=hello.world&token=%s&token_type_hint=access_token' % token)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, '')
def test_prepare_authorization_request(self):
redirect_url = 'https://example.com/callback/'
scopes = 'read'
auth_url = 'https://example.com/authorize/'
state = 'fake_state'
client = Client(self.client_id, redirect_url=redirect_url, scope=scopes, state=state)
# Non-HTTPS
self.assertRaises(InsecureTransportError,
client.prepare_authorization_request, 'http://example.com/authorize/')
# NotImplementedError
self.assertRaises(NotImplementedError, client.prepare_authorization_request, auth_url)
def test_prepare_token_request(self):
redirect_url = 'https://example.com/callback/'
scopes = 'read'
token_url = 'https://example.com/token/'
state = 'fake_state'
client = Client(self.client_id, scope=scopes, state=state)
# Non-HTTPS
self.assertRaises(InsecureTransportError,
client.prepare_token_request, 'http://example.com/token/')
# NotImplementedError
self.assertRaises(NotImplementedError, client.prepare_token_request, token_url)
def test_prepare_refresh_token_request(self):
client = Client(self.client_id)
url = 'https://example.com/revoke'
token = 'foobar'
scope = 'extra_scope'
u, h, b = client.prepare_refresh_token_request(url, token)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertFormBodyEqual(b, 'grant_type=refresh_token&refresh_token=%s' % token)
# Non-HTTPS revocation endpoint
self.assertRaises(InsecureTransportError,
client.prepare_refresh_token_request,
'http://example.com/revoke', token)
# provide extra scope
u, h, b = client.prepare_refresh_token_request(url, token, scope=scope)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertFormBodyEqual(b, 'grant_type=refresh_token&scope={}&refresh_token={}'.format(scope, token))
# provide scope while init
client = Client(self.client_id, scope=scope)
u, h, b = client.prepare_refresh_token_request(url, token, scope=scope)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertFormBodyEqual(b, 'grant_type=refresh_token&scope={}&refresh_token={}'.format(scope, token))
def test_create_code_verifier_min_length(self):
client = Client(self.client_id)
length = 43
code_verifier = client.create_code_verifier(length=length)
self.assertEqual(client.code_verifier, code_verifier)
def test_create_code_verifier_max_length(self):
client = Client(self.client_id)
length = 128
code_verifier = client.create_code_verifier(length=length)
self.assertEqual(client.code_verifier, code_verifier)
def test_create_code_verifier_length(self):
client = Client(self.client_id)
length = 96
code_verifier = client.create_code_verifier(length=length)
self.assertEqual(len(code_verifier), length)
def test_create_code_challenge_plain(self):
client = Client(self.client_id)
code_verifier = client.create_code_verifier(length=128)
code_challenge_plain = client.create_code_challenge(code_verifier=code_verifier)
# if no code_challenge_method specified, code_challenge = code_verifier
self.assertEqual(code_challenge_plain, client.code_verifier)
self.assertEqual(client.code_challenge_method, "plain")
def test_create_code_challenge_s256(self):
client = Client(self.client_id)
code_verifier = client.create_code_verifier(length=128)
code_challenge_s256 = client.create_code_challenge(code_verifier=code_verifier, code_challenge_method='S256')
self.assertEqual(code_challenge_s256, client.code_challenge)
def test_parse_token_response_expires_at_types(self):
for title, fieldjson, expected, generated in [
('int', 1661185148, 1661185148, 1661185148),
('float', 1661185148.6437678, 1661185148.6437678, 1661185148.6437678),
('str', "\"2006-01-02T15:04:05Z\"", "2006-01-02T15:04:05Z", None),
('str-as-int', "\"1661185148\"", 1661185148, 1661185148),
('str-as-float', "\"1661185148.42\"", 1661185148.42, 1661185148.42),
]:
with self.subTest(msg=title):
token_json = ('{{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_at":{expires_at},'
' "scope":"/profile",'
' "example_parameter":"example_value"}}'.format(expires_at=fieldjson))
client = Client(self.client_id)
response = client.parse_request_body_response(token_json, scope=["/profile"])
self.assertEqual(response['expires_at'], expected, "response attribute wrong")
self.assertEqual(client.expires_at, expected, "client attribute wrong")
if generated:
self.assertEqual(client._expires_at, generated, "internal expiration wrong")
@patch('time.time')
def test_parse_token_response_generated_expires_at_is_int(self, t):
t.return_value = 1661185148.6437678
expected_expires_at = round(t.return_value) + 3600
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "example_parameter":"example_value"}')
client = Client(self.client_id)
response = client.parse_request_body_response(token_json, scope=["/profile"])
self.assertEqual(response['expires_at'], expected_expires_at)
self.assertEqual(client._expires_at, expected_expires_at)
| ClientTest |
python | Textualize__textual | src/textual/renderables/gradient.py | {
"start": 265,
"end": 1226
} | class ____:
"""Draw a vertical gradient."""
def __init__(self, color1: str, color2: str) -> None:
self._color1 = Color.parse(color1)
self._color2 = Color.parse(color2)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
width = options.max_width
height = options.height or options.max_height
color1 = self._color1
color2 = self._color2
default_color = Color(0, 0, 0).rich_color
from_color = Style.from_color
blend = color1.blend
rich_color1 = color1.rich_color
for y in range(height):
line_color = from_color(
default_color,
(
blend(color2, y / (height - 1)).rich_color
if height > 1
else rich_color1
),
)
yield Segment(f"{width * ' '}\n", line_color)
| VerticalGradient |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/runtime_wrappers.py | {
"start": 27541,
"end": 31634
} | class ____(CompilerWrapper):
def post_compile(
self,
compiled_fn,
_aot_config,
*,
runtime_metadata: ViewAndMutationMeta,
):
num_tokens = len(runtime_metadata.tokens)
@wraps(compiled_fn)
def inner_fn(args: list[Any]):
if num_tokens > 0:
# Pass in forward effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
old_args = args
args = [*([None] * num_tokens), *args]
old_args.clear()
outs = compiled_fn(args)
# Inductor cache DummyModule can return None
if outs is None:
return None
# Toss out the effect tokens (See Note [Side-Effectful Tokens in AOTAutograd])
return outs[num_tokens:] if num_tokens != 0 else outs
# box it
inner_fn._boxed_call = True # type: ignore[attr-defined]
return inner_fn
# MOTIVATION:
#
# When tracing functions for future execution, one must be careful not to pass
# in the same input tensor multiple times (e.g., f(x, x), as this can result
# in graphs that are ONLY valid if you later pass a new tensor in exactly the
# same way (e.g., f(y, y)). (NB: we really mean duplicate; two distinct
# tensors that alias each other is a different situation that is covered by
# aot_dispatch_deduplicated_autograd). Here are two examples:
#
# (1) Suppose you have a function:
#
# def f(x, y):
# return x + y
#
# If you make_fx(f)(x, x), you will trace out:
#
# def f(x, y):
# return y + y
#
# Oops!
#
# (2) For most tensors x and y, you can compute f's gradient with respect to
# these to inputs by saying torch.autograd.grad(f(x, y), (x, y)). However,
# if x is y, you will trace out a program that gets incorrect gradients:
#
# >>> x = torch.randn(1, requires_grad=True)
# >>> torch.autograd.grad(x + x, (x, x))
# (tensor([2.]), tensor([2.]))
#
# In other words, the gradient is double-counted. Deduplicating the arguments
# gives you an appropriate gradient:
#
# >>> y = torch.randn(1, requires_grad=True)
# >>> torch.autograd.grad(x + y, (x, y))
# (tensor([1.]), tensor([1.]))
#
# HOW TO DEDUPLICATE:
#
# There are a few strategies, in order of preference:
#
# 1. For every duplicate argument to the function, detach it into
# a separate leaf tensor, so that it is no longer duplicated.
#
# PRO: The resulting compiled graph works for any configuration
# of duplicated arguments.
#
# CON: It does not (naively) work if you mutate the metadata of inputs:
#
# def f(x, y):
# x.transpose_(0, 1)
# y.transpose_(0, 2)
#
# x = torch.randn(2, 3, 4)
# f(x, x)
#
# The ordering of the transposes inside f dictates whether or not
# you get [4, 2, 3] or [3, 4, 2]. This means that you cannot precompute
# what metadata mutations should get applied to each input; you need to
# assume they aren't duplicates (what we do today) or preserve
# the original metadata mutations exactly in order, so that they work
# for any duplicate configuration.
#
# CON: It does not (naively) work if you mutate the data of inputs.
# In particular, leaf tensors that require grad cannot be mutated,
# this makes it impossible to differentiate with respect to the original
# base.
#
# 2. For every duplicate argument to the function, remove it, so it is
# no longer part of the "true" signature:
#
# PRO: Implemented naively, it still works for metadata/data mutation.
#
# CON: The resulting compiled graph is duplicate-specialized: it only
# works if future calls duplicate arguments in exactly the same way.
# Horribly, Dynamo doesn't guard on this at the moment. But even if
# it did, you could still end up recompiling a bunch of each duplicate.
#
# Our strategy is to do (1) if we can, and do (2) otherwise, erroring if
# Dynamo's guards are not enough. In practice, this seems to cover
# everything.
#
@dataclass
| EffectTokensWrapper |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 216635,
"end": 219394
} | class ____:
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], np.array(val, T))
def test_ip_types(self):
unchecked_types = [bytes, str, np.void]
x = np.random.random(1000) * 100
mask = x < 40
for val in [-100, 0, 15]:
for types in np._core.sctypes.values():
for T in types:
if T not in unchecked_types:
if val < 0 and np.dtype(T).kind == "u":
val = np.iinfo(T).max - 99
self.tst_basic(x.copy().astype(T), T, mask, val)
# Also test string of a length which uses an untypical length
dt = np.dtype("S3")
self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3])
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
@pytest.mark.parametrize('dtype', ('>i4', '<i4'))
def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_overlaps(self):
# gh-6272 check overlap
x = np.array([True, False, True, False])
np.putmask(x[1:4], [True, True, True], x[:3])
assert_equal(x, np.array([True, True, False, True]))
x = np.array([True, False, True, False])
np.putmask(x[1:4], x[:3], [True, False, True])
assert_equal(x, np.array([True, True, True, True]))
def test_writeable(self):
a = np.arange(5)
a.flags.writeable = False
with pytest.raises(ValueError):
np.putmask(a, a >= 2, 3)
def test_kwargs(self):
x = np.array([0, 0])
np.putmask(x, [0, 1], [-1, -2])
assert_array_equal(x, [0, -2])
x = np.array([0, 0])
np.putmask(x, mask=[0, 1], values=[-1, -2])
assert_array_equal(x, [0, -2])
x = np.array([0, 0])
np.putmask(x, values=[-1, -2], mask=[0, 1])
assert_array_equal(x, [0, -2])
with pytest.raises(TypeError):
np.putmask(a=x, values=[-1, -2], mask=[0, 1])
| TestPutmask |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 18064,
"end": 19186
} | class ____(LRTBGlyph):
''' Render horizontal bars, given a center coordinate, ``height`` and
(``left``, ``right``) coordinates.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/HBar.py"
_args = ('y', 'height', 'right', 'left')
y = NumberSpec(default=field("y"), help="""
The y-coordinates of the centers of the horizontal bars.
""")
height = DistanceSpec(default=1, help="""
The heights of the vertical bars.
""")
left = NumberSpec(default=0, help="""
The x-coordinates of the left edges.
""")
right = NumberSpec(default=field("right"), help="""
The x-coordinates of the right edges.
""")
line_props = Include(LineProps, help="""
The {prop} values for the horizontal bars.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the horizontal bars.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the horizontal bars.
""")
| HBar |
python | pypa__setuptools | setuptools/_vendor/importlib_metadata/_compat.py | {
"start": 898,
"end": 1314
} | class ____:
"""
A "Finder" (aka "MetaPathFinder") that never finds any modules,
but may find distributions.
"""
@staticmethod
def find_spec(*args, **kwargs):
return None
def pypy_partial(val):
"""
Adjust for variable stacklevel on partial under PyPy.
Workaround for #327.
"""
is_pypy = platform.python_implementation() == 'PyPy'
return val + is_pypy
| NullFinder |
python | getsentry__sentry | src/sentry/api/event_search.py | {
"start": 21908,
"end": 23043
} | class ____(NamedTuple):
key: SearchKey
operator: str
value: SearchValue
def __str__(self) -> str:
return f"{self.key.name}{self.operator}{self.value.raw_value}"
def to_query_string(self) -> str:
if self.operator == "IN":
return f"{self.key.name}:{self.value.to_query_string()}"
elif self.operator == "NOT IN":
return f"!{self.key.name}:{self.value.to_query_string()}"
else:
return f"{self.key.name}:{self.operator}{self.value.to_query_string()}"
@property
def is_negation(self) -> bool:
# Negations are mostly just using != operators. But we also have
# negations on has: filters, which translate to = '', so handle that
# case as well.
return bool(
self.operator == "!="
and self.value.raw_value != ""
or self.operator == "="
and self.value.raw_value == ""
or self.operator == "NOT IN"
and self.value.raw_value
)
@property
def is_in_filter(self) -> bool:
return self.operator in ("IN", "NOT IN")
| SearchFilter |
python | kamyu104__LeetCode-Solutions | Python/check-if-there-is-a-valid-parentheses-string-path.py | {
"start": 626,
"end": 1541
} | class ____(object):
def hasValidPath(self, grid):
"""
:type grid: List[List[str]]
:rtype: bool
"""
if (len(grid)+len(grid[0])-1)%2:
return False
dp = [[float("inf"), float("-inf")] for _ in xrange(len(grid[0])+1)]
for i in xrange(len(grid)):
dp[0] = [0, 0] if not i else [float("inf"), float("-inf")]
for j in xrange(len(grid[0])):
d = 1 if grid[i][j] == '(' else -1
dp[j+1] = [min(dp[j+1][0], dp[j][0])+d, max(dp[j+1][1], dp[j][1])+d]
# bitset pattern is like xxx1010101xxxx (in fact, it is not always true in this problem where some paths are invalid)
if dp[j+1][1] < 0:
dp[j+1] = [float("inf"), float("-inf")]
else:
dp[j+1][0] = max(dp[j+1][0], dp[j+1][1]%2)
return dp[-1][0] == 0
| Solution_WA |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-make-all-characters-equal.py | {
"start": 38,
"end": 244
} | class ____(object):
def minimumCost(self, s):
"""
:type s: str
:rtype: int
"""
return sum(min(i+1, len(s)-(i+1)) for i in xrange(len(s)-1) if s[i] != s[i+1])
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchMapping1.py | {
"start": 3515,
"end": 4391
} | class ____(TypedDict):
type: NotRequired[Literal["Complex"]]
complex_value: complex
def test_negative_narrowing1(value: IntValue | StrValue | ComplexValue | int) -> None:
match value:
case {"type": "Int"}:
reveal_type(value, expected_text="IntValue")
case {"type": "Str" | "Complex"}:
reveal_type(value, expected_text="StrValue | ComplexValue")
case _:
reveal_type(value, expected_text="ComplexValue | int")
def test_negative_narrowing2(value: StrValue | ComplexValue) -> None:
if "type" not in value:
raise
match value:
case {"type": "Str"}:
reveal_type(value, expected_text="StrValue")
case {"type": "Complex"}:
reveal_type(value, expected_text="ComplexValue")
case _:
reveal_type(value, expected_text="Never")
| ComplexValue |
python | facebook__pyre-check | client/commands/infer.py | {
"start": 21976,
"end": 36407
} | class ____:
full_stub_path: str
full_code_path: str
options: StubGenerationOptions
@staticmethod
def _annotated_code(
code_path: str,
stub: str,
code: str,
options: StubGenerationOptions,
) -> Optional[str]:
"""
Merge inferred annotations from stubs with source code to get
annotated code.
"""
if "@" "generated" in code:
LOG.warning(f"Skipping generated file {code_path}")
return
context = CodemodContext()
ApplyTypeAnnotationsVisitor.store_stub_in_context(
context=context,
stub=libcst.parse_module(stub),
use_future_annotations=options.use_future_annotations,
)
modified_tree = ApplyTypeAnnotationsVisitor(context).transform_module(
libcst.parse_module(code)
)
return modified_tree.code
@staticmethod
def annotate_code(
stub_path: str,
code_path: str,
options: StubGenerationOptions,
) -> None:
"Merge a stub file of inferred annotations with a code file in place."
try:
stub = Path(stub_path).read_text()
code = Path(code_path).read_text()
annotated_code = AnnotateModuleInPlace._annotated_code(
code_path=code_path,
stub=stub,
code=code,
options=options,
)
if annotated_code is not None:
Path(code_path).write_text(annotated_code)
LOG.info(f"Annotated {code_path}")
except Exception as error:
LOG.warning(f"Failed to annotate {code_path}")
if options.debug_infer:
LOG.warning(f"\tError: {error}")
def run(self) -> None:
return self.annotate_code(
stub_path=self.full_stub_path,
code_path=self.full_code_path,
options=self.options,
)
@staticmethod
def run_task(task: "AnnotateModuleInPlace") -> None:
"Wrap `run` in a static method to use with multiprocessing"
return task.run()
# For Python<3.9 compatibility
def _remove_prefix(text: str, prefix: str) -> str:
if text.startswith(prefix):
return text[len(prefix) :]
return text
def create_infer_arguments(
configuration: frontend_configuration.Base,
infer_arguments: command_arguments.InferArguments,
) -> Arguments:
"""
Translate client configurations to backend check configurations.
This API is not pure since it needs to access filesystem to filter out
nonexistent directories. It is idempotent though, since it does not alter
any filesystem state.
"""
source_paths = backend_arguments.get_source_path_for_check(
configuration,
kill_buck_after_build=infer_arguments.kill_buck_after_build,
number_of_buck_threads=infer_arguments.number_of_buck_threads,
)
log_directory = configuration.get_log_directory()
profiling_output = (
backend_arguments.get_profiling_log_path(log_directory)
if infer_arguments.enable_profiling
else None
)
memory_profiling_output = (
backend_arguments.get_profiling_log_path(log_directory)
if infer_arguments.enable_memory_profiling
else None
)
logger = configuration.get_remote_logger()
remote_logging = (
backend_arguments.RemoteLogging(
logger=logger, identifier=infer_arguments.log_identifier or ""
)
if logger is not None
else None
)
return Arguments(
base_arguments=backend_arguments.BaseArguments(
log_path=str(log_directory),
global_root=str(configuration.get_global_root()),
checked_directory_allowlist=backend_arguments.get_checked_directory_allowlist(
configuration, source_paths
),
checked_directory_blocklist=(configuration.get_ignore_all_errors()),
debug=infer_arguments.debug_infer,
excludes=configuration.get_excludes(),
extensions=configuration.get_valid_extension_suffixes(),
relative_local_root=configuration.get_relative_local_root(),
memory_profiling_output=memory_profiling_output,
number_of_workers=configuration.get_number_of_workers(),
parallel=not infer_arguments.sequential,
profiling_output=profiling_output,
python_version=configuration.get_python_version(),
shared_memory=configuration.get_shared_memory(),
remote_logging=remote_logging,
search_paths=configuration.get_existent_search_paths(),
source_paths=source_paths,
),
paths_to_modify=infer_arguments.paths_to_modify,
)
@contextlib.contextmanager
def create_infer_arguments_and_cleanup(
configuration: frontend_configuration.Base,
infer_arguments: command_arguments.InferArguments,
) -> Iterator[Arguments]:
arguments = create_infer_arguments(configuration, infer_arguments)
try:
yield arguments
finally:
# It is safe to clean up source paths after infer command since
# any created artifact directory won't be reused by other commands.
arguments.base_arguments.source_paths.cleanup()
def _check_working_directory(
working_directory: Path, global_root: Path, relative_local_root: Optional[str]
) -> None:
candidate_locations: List[str] = []
if working_directory == global_root:
return
candidate_locations.append(f"`{global_root}` with `--local-configuration` set")
if relative_local_root is not None:
local_root = global_root / relative_local_root
if working_directory == local_root:
return
candidate_locations.append(f"`{local_root}`")
valid_locations = " or from ".join(candidate_locations)
raise ValueError(
f"Infer must run from {valid_locations}. "
f"Cannot run from current working directory `{working_directory}`."
)
def _run_infer_command_get_output(command: Sequence[str]) -> str:
with backend_arguments.backend_log_file(prefix="pyre_infer") as log_file:
with start.background_logging(Path(log_file.name)):
# lint-ignore: NoUnsafeExecRule
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=log_file.file,
universal_newlines=True,
errors="replace",
)
return_code = result.returncode
# Interpretation of the return code needs to be kept in sync with
# `source/command/inferCommand.ml`.
if return_code == 0:
return result.stdout
elif return_code == 1:
raise commands.ClientException(
message="Pyre encountered an internal failure",
exit_code=commands.ExitCode.FAILURE,
)
elif return_code == 2:
raise commands.ClientException(
message="Pyre encountered a failure within buck.",
exit_code=commands.ExitCode.BUCK_INTERNAL_ERROR,
)
elif return_code == 3:
raise commands.ClientException(
message="Pyre encountered an error when building the buck targets.",
exit_code=commands.ExitCode.BUCK_USER_ERROR,
)
else:
raise commands.ClientException(
message=(
"Infer command exited with unexpected return code: "
f"{return_code}."
),
exit_code=commands.ExitCode.FAILURE,
)
def _get_infer_command_output(
configuration: frontend_configuration.Base,
infer_arguments: command_arguments.InferArguments,
) -> str:
start_command = configuration.get_server_start_command(download_if_needed=True)
if start_command is None:
raise configuration_module.InvalidConfiguration(
"Cannot locate a Pyre binary to run."
)
LOG.info(f"Pyre binary is located at `{start_command.get_pyre_binary_location()}`")
with create_infer_arguments_and_cleanup(
configuration, infer_arguments
) as arguments:
with backend_arguments.temporary_argument_file(arguments) as argument_file_path:
infer_command = [
str(start_command.get_pyre_binary_location()),
"infer",
str(argument_file_path),
]
return _run_infer_command_get_output(command=infer_command)
def _load_output(
configuration: frontend_configuration.Base,
infer_arguments: command_arguments.InferArguments,
) -> str:
if infer_arguments.read_stdin:
return sys.stdin.read()
else:
return _get_infer_command_output(configuration, infer_arguments)
def _relativize_path(path: str, against: Path) -> Optional[str]:
given_path = Path(path)
return (
None
if against not in given_path.parents
else str(given_path.relative_to(against))
)
def create_module_annotations(
infer_output: RawInferOutput, base_path: Path, options: StubGenerationOptions
) -> List[ModuleAnnotations]:
infer_output_relativized: Dict[Optional[str], RawInferOutputForPath] = {
_relativize_path(path, against=base_path): data
for path, data in infer_output.split_by_path().items()
}
return [
ModuleAnnotations.from_infer_output(
path=path,
infer_output=infer_output_for_path,
options=options,
)
for path, infer_output_for_path in infer_output_relativized.items()
if path is not None
]
def _print_inferences(
infer_output: RawInferOutput, module_annotations: Sequence[ModuleAnnotations]
) -> None:
LOG.log(log.SUCCESS, "Raw Infer Outputs:")
LOG.log(log.SUCCESS, json.dumps(infer_output.to_dict(), indent=2))
LOG.log(log.SUCCESS, "Generated Stubs:")
LOG.log(
log.SUCCESS,
"\n\n".join(
f"*{module.path}*\n{module.to_stubs()}" for module in module_annotations
),
)
def _get_type_directory(log_directory: Path) -> Path:
return log_directory / "types"
def _write_stubs(
type_directory: Path, module_annotations: Sequence[ModuleAnnotations]
) -> None:
if type_directory.exists():
LOG.log(log.SUCCESS, f"Deleting {type_directory}")
shutil.rmtree(type_directory)
type_directory.mkdir(parents=True, exist_ok=True)
LOG.log(log.SUCCESS, f"Outputting inferred stubs to {type_directory}...")
for module in module_annotations:
module.write_stubs(type_directory=type_directory)
def should_annotate_in_place(
path: Path,
paths_to_modify: Optional[Set[Path]],
) -> bool:
return (
True
if paths_to_modify is None
else any(path in paths_to_modify for path in (path, *path.parents))
)
def _annotate_in_place(
working_directory: Path,
type_directory: Path,
paths_to_modify: Optional[Set[Path]],
options: StubGenerationOptions,
number_of_workers: int,
) -> None:
tasks: List[AnnotateModuleInPlace] = []
for full_stub_path in type_directory.rglob("*.pyi"):
relative_stub_path = full_stub_path.relative_to(type_directory)
relative_code_path = relative_stub_path.with_suffix(".py")
full_code_path = working_directory / relative_code_path
if should_annotate_in_place(full_code_path, paths_to_modify):
tasks.append(
AnnotateModuleInPlace(
full_stub_path=str(full_stub_path),
full_code_path=str(full_code_path),
options=options,
)
)
with multiprocessing.Pool(number_of_workers) as pool:
for _ in pool.imap_unordered(AnnotateModuleInPlace.run_task, tasks):
pass
def run(
configuration: frontend_configuration.Base,
infer_arguments: command_arguments.InferArguments,
) -> commands.ExitCode:
working_directory = infer_arguments.working_directory
_check_working_directory(
working_directory=working_directory,
global_root=configuration.get_global_root(),
relative_local_root=configuration.get_relative_local_root(),
)
type_directory = _get_type_directory(configuration.get_log_directory())
in_place = infer_arguments.in_place
options = StubGenerationOptions(
annotate_attributes=infer_arguments.annotate_attributes,
use_future_annotations=infer_arguments.use_future_annotations,
dequalify=infer_arguments.dequalify,
quote_annotations=infer_arguments.quote_annotations,
simple_annotations=infer_arguments.simple_annotations,
debug_infer=infer_arguments.debug_infer,
)
if infer_arguments.annotate_from_existing_stubs:
if not in_place:
raise ValueError(
"`--annotate-from-existing-stubs` cannot be used without the"
" `--in-place` flag"
)
_annotate_in_place(
working_directory=working_directory,
type_directory=type_directory,
paths_to_modify=infer_arguments.paths_to_modify,
options=options,
number_of_workers=configuration.get_number_of_workers(),
)
else:
infer_output = RawInferOutput.create_from_json(
json.loads(_load_output(configuration, infer_arguments))[0]
)
module_annotations = create_module_annotations(
infer_output=infer_output,
base_path=working_directory,
options=options,
)
if infer_arguments.print_only:
_print_inferences(infer_output, module_annotations)
else:
_write_stubs(type_directory, module_annotations)
if in_place:
_annotate_in_place(
working_directory=working_directory,
type_directory=type_directory,
paths_to_modify=infer_arguments.paths_to_modify,
options=options,
number_of_workers=configuration.get_number_of_workers(),
)
return commands.ExitCode.SUCCESS
| AnnotateModuleInPlace |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.