language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/swin/configuration_swin.py | {
"start": 893,
"end": 7405
} | class ____(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SwinModel`]. It is used to instantiate a Swin
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Swin
[microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import SwinConfig, SwinModel
>>> # Initializing a Swin microsoft/swin-tiny-patch4-window7-224 style configuration
>>> configuration = SwinConfig()
>>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration
>>> model = SwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "swin"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
encoder_stride=32,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["SwinConfig"]
| SwinConfig |
python | django__django | tests/admin_views/test_forms.py | {
"start": 475,
"end": 948
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(
username="inactive", password="password", is_active=False
)
def test_inactive_user(self):
data = {
"username": "inactive",
"password": "password",
}
form = AdminAuthenticationForm(None, data)
self.assertEqual(form.non_field_errors(), ["This account is inactive."])
| AdminAuthenticationFormTests |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 4343,
"end": 4688
} | class ____(HasOptions, Generic[R, T0]):
def __init__(self, function: Callable[[T0], R]) -> None:
pass
def remote(
self,
__arg0: "Union[T0, ObjectRef[T0]]",
) -> "ObjectRef[R]":
...
def bind(
self,
__arg0: "Union[T0, DAGNode[T0]]",
) -> "DAGNode[R]":
...
| RemoteFunction0 |
python | Textualize__textual | docs/examples/widgets/switch.py | {
"start": 130,
"end": 991
} | class ____(App):
def compose(self) -> ComposeResult:
yield Static("[b]Example switches\n", classes="label")
yield Horizontal(
Static("off: ", classes="label"),
Switch(animate=False),
classes="container",
)
yield Horizontal(
Static("on: ", classes="label"),
Switch(value=True),
classes="container",
)
focused_switch = Switch()
focused_switch.focus()
yield Horizontal(
Static("focused: ", classes="label"), focused_switch, classes="container"
)
yield Horizontal(
Static("custom: ", classes="label"),
Switch(id="custom-design"),
classes="container",
)
app = SwitchApp(css_path="switch.tcss")
if __name__ == "__main__":
app.run()
| SwitchApp |
python | bokeh__bokeh | src/bokeh/models/renderers/renderer.py | {
"start": 5216,
"end": 5911
} | class ____(Renderer):
""" A base class for all guide renderer types. ``GuideRenderer`` is
not generally useful to instantiate on its own.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
level = Override(default="guide")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| GuideRenderer |
python | huggingface__transformers | src/transformers/models/nllb_moe/configuration_nllb_moe.py | {
"start": 755,
"end": 11222
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`NllbMoeModel`]. It is used to instantiate an
NLLB-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the NLLB-MoE
[facebook/nllb-moe-54b](https://huggingface.co/facebook/nllb-moe-54b) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the NllbMoe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`NllbMoeModel`] or
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
second_expert_policy ( `str`, *optional*, default to `"all"`):
The policy used for the sampling the probability of being sampled to a second expert for each token.
normalize_router_prob_before_dropping (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the router probabilities before applying a mask based on the experts capacity
(capacity dropping).
batch_prioritized_routing (`bool`, *optional*, defaults to `True`):
Whether or not to orders the tokens by their router probabilities before capacity dropping. This means that
the tokens that have the highest probabilities will be routed before other tokens that might be further in
the sequence.
moe_eval_capacity_token_fraction (`float`, *optional*, defaults to 1.0):
Fraction of tokens as capacity during validation, if set to negative, uses the same as training. Should be
in range: (0.0, 1.0].
num_experts (`int`, *optional*, defaults to 128):
Number of experts for each NllbMoeSparseMlp layer.
expert_capacity (`int`, *optional*, defaults to 64):
Number of tokens that can be stored in each expert.
encoder_sparse_step (`int`, *optional*, defaults to 4):
Frequency of the sparse layers in the encoder. 4 means that one out of 4 layers will be sparse.
decoder_sparse_step (`int`, *optional*, defaults to 4):
Frequency of the sparse layers in the decoder. 4 means that one out of 4 layers will be sparse.
router_dtype (`str`, *optional*, default to `"float32"`):
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
*selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961).
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
Whether to ignore padding tokens when routing. if `False`, the padding tokens are not routed to any
experts.
router_bias (`bool`, *optional*, defaults to `False`):
Whether or not the classifier of the router should have a bias.
moe_token_dropout (`float`, *optional*, default to 0.2):
Masking rate for MoE expert output masking (EOM), which is implemented via a Dropout2d on the expert
outputs.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not to return the router logits. Only set to `True` to get the auxiliary loss when training.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import NllbMoeModel, NllbMoeConfig
>>> # Initializing a NllbMoe facebook/nllb-moe-54b style configuration
>>> configuration = NllbMoeConfig()
>>> # Initializing a model from the facebook/nllb-moe-54b style configuration
>>> model = NllbMoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "nllb-moe"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=128112,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.05,
decoder_layerdrop=0.05,
use_cache=True,
is_encoder_decoder=True,
activation_function="relu",
d_model=1024,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
scale_embedding=True,
router_bias=False,
router_dtype="float32",
router_ignore_padding_tokens=False,
num_experts=128,
expert_capacity=64,
encoder_sparse_step=4,
decoder_sparse_step=4,
router_z_loss_coef=0.001,
router_aux_loss_coef=0.001,
second_expert_policy="all",
normalize_router_prob_before_dropping=False,
batch_prioritized_routing=False,
moe_eval_capacity_token_fraction=1.0,
moe_token_dropout=0.2,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
output_router_logits=False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.router_z_loss_coef = router_z_loss_coef
self.router_aux_loss_coef = router_aux_loss_coef
self.decoder_sparse_step = decoder_sparse_step
self.encoder_sparse_step = encoder_sparse_step
self.num_experts = num_experts
self.expert_capacity = expert_capacity
self.router_bias = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
self.router_dtype = router_dtype
self.router_ignore_padding_tokens = router_ignore_padding_tokens
self.batch_prioritized_routing = batch_prioritized_routing
self.second_expert_policy = second_expert_policy
self.normalize_router_prob_before_dropping = normalize_router_prob_before_dropping
self.moe_eval_capacity_token_fraction = moe_eval_capacity_token_fraction
self.moe_token_dropout = moe_token_dropout
self.output_router_logits = output_router_logits
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["NllbMoeConfig"]
| NllbMoeConfig |
python | pytorch__pytorch | functorch/dim/_enable_all_layers.py | {
"start": 174,
"end": 4932
} | class ____:
"""
RAII-style context manager for enabling functorch vmap layers.
It manages the creation and cleanup of functorch dynamic layers.
This is probably one of the more algorithmically important parts of first
class dims. Intuitively, FCD can be thought of as another way of using
vmap, where you don't actually have to vmap at the top level, instead the
vmaps are implicitly determined by inspecting the bound dimensions on the
FCD tensors involved in a compute (this is similar to our concept of
non-lexical modes that we spent a long time talking about years ago). But
under the hood you still need to actually enable the vmap mode. So once
FCD has determined all of the dims we are batching over, it needs to
enable all those layers so functorch can actually apply the batching
rules. Therefore enable all layers!
"""
levels_start: int
levels_to_dim: list[Dim]
def __init__(self, levels: list[DimEntry]):
"""
Initialize and push dynamic layers for all first-class dimensions.
Args:
levels: List of dimension entries to create layers for
"""
from . import Dim
self.levels_start = 0
self.levels_to_dim = []
for l in levels:
if not l.is_positional():
d = l.dim()
assert isinstance(d, Dim)
self.levels_to_dim.append(d)
# Sort by level for stable ordering
self.levels_to_dim.sort(key=lambda d: d._level)
def __enter__(self) -> EnableAllLayers: # noqa: PYI034
# Create functorch dynamic layers
for i, dim in enumerate(self.levels_to_dim):
batch_size = dim.size
level = torch._C._functorch._vmap_increment_nesting(batch_size, "different")
if i == 0:
self.levels_start = level
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""Clean up dynamic layers in reverse order."""
to_remove = self.levels_start + len(self.levels_to_dim) - 1
for i in range(len(self.levels_to_dim)):
popped = torch._C._functorch._vmap_decrement_nesting()
assert popped == to_remove - i, (
f"Expected layer {to_remove - i}, got {popped}"
)
def from_batched(self, batchedtensor: torch.Tensor, has_device: bool) -> Tensor:
"""
Create a Tensor from a batched tensor by unwrapping functorch layers.
Args:
batchedtensor: Batched tensor from functorch operation
has_device: Whether tensor has device info
Returns:
Tensor with appropriate levels
"""
# Create positional levels for base dimensions
levels: list[DimEntry] = []
for i in range(-batchedtensor.dim(), 0):
levels.append(DimEntry(i))
tensor = batchedtensor
while torch._C._functorch.is_batchedtensor(tensor):
level = torch._C._functorch.maybe_get_level(tensor)
assert level is not None
assert level >= self.levels_start and level < self.levels_start + len(
self.levels_to_dim
)
dim = DimEntry(self.levels_to_dim[level - self.levels_start])
bdim = torch._C._functorch.maybe_get_bdim(tensor)
assert bdim is not None
levels.insert(bdim, dim)
tensor = torch._C._functorch.get_unwrapped(tensor)
from . import Tensor
result = Tensor()
result._tensor = tensor
result._batchtensor = batchedtensor
result._has_device = has_device
result._levels = levels
return result
def inplace_update_layers(
self, batchtensor: torch.Tensor, levels: list[DimEntry]
) -> None:
"""
Update the levels of a batched tensor in place.
This requires the _maybe_unsafe_set_level binding that we'll add to functorch.
Args:
batchtensor: Batched tensor to update
levels: New levels to set
"""
# Check if tensor is batched
if not torch._C._functorch.is_batchedtensor(batchtensor):
return
impl = batchtensor
for i in reversed(range(len(self.levels_to_dim))):
if impl is None:
break
if any(l == DimEntry(self.levels_to_dim[i]) for l in levels):
# This is very interesting! The level on batch tensor is
# meaningless! We set it RIGHT before we go into vmap
torch._C._functorch._maybe_unsafe_set_level(impl, self.levels_start + i)
impl = torch._C._functorch.get_unwrapped(impl)
| EnableAllLayers |
python | django__django | django/db/backends/oracle/base.py | {
"start": 3216,
"end": 3918
} | class ____:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__["operators"]
def _get_decimal_column(data):
if data["max_digits"] is None and data["decimal_places"] is None:
return "NUMBER"
return "NUMBER(%(max_digits)s, %(decimal_places)s)" % data
| _UninitializedOperatorsDescriptor |
python | automl__auto-sklearn | test/test_metalearning/test_metalearning.py | {
"start": 494,
"end": 622
} | class ____(object):
def __init__(self, name, value):
self.name = name
self.value = value
| MetafeatureValueDummy |
python | django__django | tests/inline_formsets/tests.py | {
"start": 8565,
"end": 9937
} | class ____(TestCase):
def test_constraint_refs_inline_foreignkey_field(self):
"""
Constraints that reference an InlineForeignKeyField should not be
skipped from validation (#35676).
"""
ChildFormSet = inlineformset_factory(
Parent,
Child,
fk_name="mother",
fields="__all__",
extra=1,
)
father = Parent.objects.create(name="James")
school = School.objects.create(name="Hogwarts")
mother = Parent.objects.create(name="Lily")
Child.objects.create(name="Harry", father=father, mother=mother, school=school)
data = {
"mothers_children-TOTAL_FORMS": "1",
"mothers_children-INITIAL_FORMS": "0",
"mothers_children-MIN_NUM_FORMS": "0",
"mothers_children-MAX_NUM_FORMS": "1000",
"mothers_children-0-id": "",
"mothers_children-0-father": str(father.pk),
"mothers_children-0-school": str(school.pk),
"mothers_children-0-name": "Mary",
}
formset = ChildFormSet(instance=mother, data=data, queryset=None)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{"__all__": ["Constraint “unique_parents” is violated."]}],
)
| InlineFormsetConstraintsValidationTests |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 8806,
"end": 10880
} | class ____(OrganizationEventsSpansEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response(status=404)
serializer = SpanSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
serialized = serializer.validated_data
query = serialized.get("query")
span = serialized["span"]
min_exclusive_time = serialized.get("min_exclusive_time")
max_exclusive_time = serialized.get("max_exclusive_time")
direction, orderby_column = self.get_orderby_column(request)
def data_fn(offset: int, limit: int) -> list[_Example]:
example_transactions = query_example_transactions(
snuba_params,
query,
direction,
orderby_column,
span,
limit,
offset,
min_exclusive_time,
max_exclusive_time,
)
return [
{
"op": span.op,
"group": span.group,
"examples": [
get_example_transaction(
event,
span.op,
span.group,
min_exclusive_time,
max_exclusive_time,
).serialize()
for event in example_transactions.get(span, [])
],
}
]
with handle_query_errors():
return self.paginate(
request,
paginator=SpanExamplesPaginator(data_fn=data_fn),
default_per_page=3,
max_per_page=10,
)
| OrganizationEventsSpansExamplesEndpoint |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/mongo_to_s3.py | {
"start": 1290,
"end": 6072
} | class ____(BaseOperator):
"""
Move data from MongoDB to S3.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MongoToS3Operator`
:param mongo_conn_id: reference to a specific mongo connection
:param aws_conn_id: reference to a specific S3 connection
:param mongo_collection: reference to a specific collection in your mongo db
:param mongo_query: query to execute. A list including a dict of the query
:param mongo_projection: optional parameter to filter the returned fields by
the query. It can be a list of fields names to include or a dictionary
for excluding fields (e.g ``projection={"_id": 0}`` )
:param s3_bucket: reference to a specific S3 bucket to store the data
:param s3_key: in which S3 key the file will be stored
:param mongo_db: reference to a specific mongo database
:param replace: whether or not to replace the file in S3 if it previously existed
:param allow_disk_use: enables writing to temporary files in the case you are handling large dataset.
This only takes effect when `mongo_query` is a list - running an aggregate pipeline
:param compression: type of compression to use for output file in S3. Currently only gzip is supported.
"""
template_fields: Sequence[str] = ("s3_bucket", "s3_key", "mongo_query", "mongo_collection")
ui_color = "#589636"
template_fields_renderers = {"mongo_query": "json"}
def __init__(
self,
*,
mongo_conn_id: str = "mongo_default",
aws_conn_id: str | None = "aws_default",
mongo_collection: str,
mongo_query: list | dict,
s3_bucket: str,
s3_key: str,
mongo_db: str | None = None,
mongo_projection: list | dict | None = None,
replace: bool = False,
allow_disk_use: bool = False,
compression: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.mongo_conn_id = mongo_conn_id
self.aws_conn_id = aws_conn_id
self.mongo_db = mongo_db
self.mongo_collection = mongo_collection
# Grab query and determine if we need to run an aggregate pipeline
self.mongo_query = mongo_query
self.is_pipeline = isinstance(self.mongo_query, list)
self.mongo_projection = mongo_projection
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.replace = replace
self.allow_disk_use = allow_disk_use
self.compression = compression
def execute(self, context: Context):
"""Is written to depend on transform method."""
s3_conn = S3Hook(self.aws_conn_id)
# Grab collection and execute query according to whether or not it is a pipeline
if self.is_pipeline:
results: CommandCursor[Any] | Cursor = MongoHook(self.mongo_conn_id).aggregate(
mongo_collection=self.mongo_collection,
aggregate_query=cast("list", self.mongo_query),
mongo_db=self.mongo_db,
allowDiskUse=self.allow_disk_use,
)
else:
results = MongoHook(self.mongo_conn_id).find(
mongo_collection=self.mongo_collection,
query=cast("dict", self.mongo_query),
projection=self.mongo_projection,
mongo_db=self.mongo_db,
find_one=False,
)
# Performs transform then stringifies the docs results into json format
docs_str = self._stringify(self.transform(results))
s3_conn.load_string(
string_data=docs_str,
key=self.s3_key,
bucket_name=self.s3_bucket,
replace=self.replace,
compression=self.compression,
)
@staticmethod
def _stringify(iterable: Iterable, joinable: str = "\n") -> str:
"""
Stringify an iterable of dicts.
This dumps each dict with JSON, and joins them with ``joinable``.
"""
return joinable.join(json.dumps(doc, default=json_util.default) for doc in iterable)
@staticmethod
def transform(docs: Any) -> Any:
"""
Transform the data for transfer.
This method is meant to be extended by child classes to perform
transformations unique to those operators needs. Processes pyMongo
cursor and returns an iterable with each element being a JSON
serializable dictionary
The default implementation assumes no processing is needed, i.e. input
is a pyMongo cursor of documents and just needs to be passed through.
Override this method for custom transformations.
"""
return docs
| MongoToS3Operator |
python | readthedocs__readthedocs.org | readthedocs/integrations/migrations/0014_add_index_speedup.py | {
"start": 150,
"end": 576
} | class ____(migrations.Migration):
safe = Safe.always()
dependencies = [
("integrations", "0013_set_timestamp_fields_as_no_null"),
]
operations = [
migrations.AddIndex(
model_name="httpexchange",
index=models.Index(
fields=["content_type", "object_id", "date"],
name="integration_content_5d4e98_idx",
),
),
]
| Migration |
python | Textualize__textual | src/textual/widgets/_data_table.py | {
"start": 4057,
"end": 4278
} | class ____(StringKey):
"""Uniquely identifies a row in the DataTable.
Even if the visual location
of the row changes due to sorting or other modifications, a key will always
refer to the same row."""
| RowKey |
python | tensorflow__tensorflow | tensorflow/python/ops/data_flow_ops.py | {
"start": 78824,
"end": 92106
} | class ____(BaseStagingArea):
"""A `MapStagingArea` is a TensorFlow data structure that stores tensors
across multiple steps, and exposes operations that can put and get tensors.
Each `MapStagingArea` element is a (key, value) pair.
Only int64 keys are supported, other types should be
hashed to produce a key.
Values are a tuple of one or more tensors.
Each tuple component has a static dtype,
and may have a static shape.
The capacity of a `MapStagingArea` may be bounded or unbounded.
It supports multiple concurrent producers and consumers; and
provides exactly-once delivery.
Each value tuple of a `MapStagingArea` is a fixed-length tuple of tensors
whose
dtypes are described by `dtypes`, and whose shapes are optionally described
by the `shapes` argument.
If the `shapes` argument is specified, each component of a staging area
element must have the respective fixed shape. If it is
unspecified, different elements may have different shapes,
It behaves like an associative container with support for:
- put(key, values)
- peek(key) like dict.get(key)
- get(key) like dict.pop(key)
- get(key=None) like dict.popitem()
- size()
- clear()
If ordered a tree structure ordered by key will be used and
get(key=None) will remove (key, value) pairs in increasing key order.
Otherwise a hashtable
It can be configured with a capacity in which case
put(key, values) will block until space becomes available.
Similarly, it can be configured with a memory limit which
will block put(key, values) until space is available.
This is mostly useful for limiting the number of tensors on
devices such as GPUs.
All get() and peek() commands block if the requested
(key, value) pair is not present in the staging area.
Partial puts are supported and will be placed in an incomplete
map until such time as all values associated with the key have
been inserted. Once completed, this (key, value) pair will be
inserted into the map. Data in the incomplete map
counts towards the memory limit, but not towards capacity limit.
Partial gets from the map are also supported.
This removes the partially requested tensors from the entry,
but the entry is only removed from the map once all tensors
associated with it are removed.
"""
def __init__(self,
dtypes,
shapes=None,
names=None,
shared_name=None,
ordered=False,
capacity=0,
memory_limit=0):
"""Args:
dtypes: A list of types. The length of dtypes must equal the number
of tensors in each element.
capacity: (Optional.) Maximum number of elements.
An integer. If zero, the Staging Area is unbounded
memory_limit: (Optional.) Maximum number of bytes of all tensors
in the Staging Area (excluding keys).
An integer. If zero, the Staging Area is unbounded
ordered: (Optional.) If True the underlying data structure
is a tree ordered on key. Otherwise assume a hashtable.
shapes: (Optional.) Constraints on the shapes of tensors in an element.
A list of shape tuples or None. This list is the same length
as dtypes. If the shape of any tensors in the element are constrained,
all must be; shapes can be None if the shapes should not be constrained.
names: (Optional.) If provided, the `get()` and
`put()` methods will use dictionaries with these names as keys.
Must be None or a list or tuple of the same length as `dtypes`.
shared_name: (Optional.) A name to be used for the shared object. By
passing the same name to two different python objects they will share
the underlying staging area. Must be a string.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(MapStagingArea, self).__init__(dtypes, shapes, names, shared_name,
capacity, memory_limit)
# Defer to different methods depending if the map is ordered
self._ordered = ordered
if ordered:
self._put_fn = gen_data_flow_ops.ordered_map_stage
self._pop_fn = gen_data_flow_ops.ordered_map_unstage
self._popitem_fn = gen_data_flow_ops.ordered_map_unstage_no_key
self._peek_fn = gen_data_flow_ops.ordered_map_peek
self._size_fn = gen_data_flow_ops.ordered_map_size
self._incomplete_size_fn = gen_data_flow_ops.ordered_map_incomplete_size
self._clear_fn = gen_data_flow_ops.ordered_map_clear
else:
self._put_fn = gen_data_flow_ops.map_stage
self._pop_fn = gen_data_flow_ops.map_unstage
self._popitem_fn = gen_data_flow_ops.map_unstage_no_key
self._peek_fn = gen_data_flow_ops.map_peek
self._size_fn = gen_data_flow_ops.map_size
self._incomplete_size_fn = gen_data_flow_ops.map_incomplete_size
self._clear_fn = gen_data_flow_ops.map_clear
def put(self, key, vals, indices=None, name=None):
"""Create an op that stores the (key, vals) pair in the staging area.
Incomplete puts are possible, preferably using a dictionary for vals
as the appropriate dtypes and shapes can be inferred from the value names
dictionary key values. If vals is a list or tuple, indices must
also be specified so that the op knows at which element position
to perform the insert.
This operation will block if the capacity or memory limit of this
container is reached.
Args:
key: Key associated with the data
vals: Tensor (or a dict/tuple of Tensors) to place
into the staging area.
indices: (Optional) if vals is a tuple/list, this is required.
name: A name for the operation (optional)
Returns:
The created op
Raises:
ValueError: If the number or type of inputs don't match the staging
area.
"""
with ops.name_scope(name, "%s_put" % self._name,
self._scope_vals(vals)) as scope:
vals, indices = self._check_put_dtypes(vals, indices)
with ops.colocate_with(self._coloc_op):
op = self._put_fn(
key,
indices,
vals,
dtypes=self._dtypes,
shared_name=self._name,
name=scope,
capacity=self._capacity,
memory_limit=self._memory_limit)
return op
def _get_indices_and_dtypes(self, indices=None):
if indices is None:
indices = list(range(len(self._dtypes)))
if not isinstance(indices, (tuple, list)):
raise TypeError(f"Invalid indices type {type(indices)}")
if len(indices) == 0:
raise ValueError("Empty indices")
if all(isinstance(i, str) for i in indices):
if self._names is None:
raise ValueError(f"String indices provided {indices}, but "
"this Staging Area was not created with names.")
try:
indices = [self._names.index(n) for n in indices]
except ValueError:
raise ValueError(f"Named index not in "
f"Staging Area names {self._names}")
elif all(isinstance(i, int) for i in indices):
pass
else:
raise TypeError(f"Mixed types in indices {indices}. "
"May only be str or int")
dtypes = [self._dtypes[i] for i in indices]
return indices, dtypes
def peek(self, key, indices=None, name=None):
"""Peeks at staging area data associated with the key.
If the key is not in the staging area, it will block
until the associated (key, value) is inserted.
Args:
key: Key associated with the required data
indices: Partial list of tensors to retrieve (optional).
A list of integer or string indices.
String indices are only valid if the Staging Area
has names associated with it.
name: A name for the operation (optional)
Returns:
The created op
"""
if name is None:
name = "%s_pop" % self._name
indices, dtypes = self._get_indices_and_dtypes(indices)
with ops.colocate_with(self._coloc_op):
result = self._peek_fn(
key,
shared_name=self._name,
indices=indices,
dtypes=dtypes,
name=name,
capacity=self._capacity,
memory_limit=self._memory_limit)
return self._get_return_value(result, indices)
def get(self, key=None, indices=None, name=None):
"""If the key is provided, the associated (key, value) is returned from the staging area.
If the key is not in the staging area, this method will block until
the associated (key, value) is inserted.
If no key is provided and the staging area is ordered,
the (key, value) with the smallest key will be returned.
Otherwise, a random (key, value) will be returned.
If the staging area is empty when this operation executes,
it will block until there is an element to dequeue.
Args:
key: Key associated with the required data (Optional)
indices: Partial list of tensors to retrieve (optional).
A list of integer or string indices.
String indices are only valid if the Staging Area
has names associated with it.
name: A name for the operation (optional)
Returns:
The created op
"""
if key is None:
return self._popitem(indices=indices, name=name)
else:
return self._pop(key, indices=indices, name=name)
def _pop(self, key, indices=None, name=None):
"""Remove and return the associated (key, value) is returned from the staging area.
If the key is not in the staging area, this method will block until
the associated (key, value) is inserted.
Args:
key: Key associated with the required data
indices: Partial list of tensors to retrieve (optional).
A list of integer or string indices.
String indices are only valid if the Staging Area
has names associated with it.
name: A name for the operation (optional)
Returns:
The created op
"""
if name is None:
name = "%s_get" % self._name
indices, dtypes = self._get_indices_and_dtypes(indices)
with ops.colocate_with(self._coloc_op):
result = self._pop_fn(
key,
shared_name=self._name,
indices=indices,
dtypes=dtypes,
name=name,
capacity=self._capacity,
memory_limit=self._memory_limit)
return key, self._get_return_value(result, indices)
def _popitem(self, indices=None, name=None):
"""If the staging area is ordered, the (key, value) with the smallest key will be returned.
Otherwise, a random (key, value) will be returned.
If the staging area is empty when this operation executes,
it will block until there is an element to dequeue.
Args:
key: Key associated with the required data
indices: Partial list of tensors to retrieve (optional).
A list of integer or string indices.
String indices are only valid if the Staging Area
has names associated with it.
name: A name for the operation (optional)
Returns:
The created op
"""
if name is None:
name = "%s_get_nokey" % self._name
indices, dtypes = self._get_indices_and_dtypes(indices)
with ops.colocate_with(self._coloc_op):
key, result = self._popitem_fn(
shared_name=self._name,
indices=indices,
dtypes=dtypes,
name=name,
capacity=self._capacity,
memory_limit=self._memory_limit)
# Separate keys and results out from
# underlying namedtuple
key = self._create_device_transfers(key)[0]
result = self._get_return_value(result, indices)
return key, result
def size(self, name=None):
"""Returns the number of elements in the staging area.
Args:
name: A name for the operation (optional)
Returns:
The created op
"""
if name is None:
name = "%s_size" % self._name
return self._size_fn(
shared_name=self._name,
name=name,
dtypes=self._dtypes,
capacity=self._capacity,
memory_limit=self._memory_limit)
def incomplete_size(self, name=None):
"""Returns the number of incomplete elements in the staging area.
Args:
name: A name for the operation (optional)
Returns:
The created op
"""
if name is None:
name = "%s_incomplete_size" % self._name
return self._incomplete_size_fn(
shared_name=self._name,
name=name,
dtypes=self._dtypes,
capacity=self._capacity,
memory_limit=self._memory_limit)
def clear(self, name=None):
"""Clears the staging area.
Args:
name: A name for the operation (optional)
Returns:
The created op
"""
if name is None:
name = "%s_clear" % self._name
return self._clear_fn(
shared_name=self._name,
name=name,
dtypes=self._dtypes,
capacity=self._capacity,
memory_limit=self._memory_limit)
| MapStagingArea |
python | PyCQA__pycodestyle | testing/support.py | {
"start": 193,
"end": 997
} | class ____(BaseReport):
"""
Collect the results in memory, without printing anything.
"""
def __init__(self, options):
super().__init__(options)
self.in_memory_errors = []
def error(self, line_number, offset, text, check):
"""
Report an error, according to options.
"""
code = text[:4]
self.in_memory_errors.append(f'{code}:{line_number}:{offset + 1}')
return super().error(line_number, offset, text, check)
def errors_from_src(src: str) -> list[str]:
guide = StyleGuide(select=('E', 'W'), max_doc_length=72)
reporter = guide.init_report(InMemoryReport)
guide.input_file(
filename='in-memory-test-file.py',
lines=src.splitlines(True),
)
return reporter.in_memory_errors
| InMemoryReport |
python | django__django | tests/timezones/forms.py | {
"start": 272,
"end": 378
} | class ____(forms.ModelForm):
class Meta:
model = Event
fields = "__all__"
| EventModelForm |
python | pytorch__pytorch | benchmarks/dynamo/runner.py | {
"start": 43697,
"end": 47590
} | class ____:
"""
Plots progress of different metrics over time to detect regressions.
"""
def __init__(self, args):
self.args = args
self.suites = self.args.suites
self.lookup_file = os.path.join(self.args.dashboard_archive_path, "lookup.csv")
assert os.path.exists(self.lookup_file)
self.k = 10
def find_last_k(self):
"""
Find the last k pairs of (day number, log_path)
"""
dtype = self.args.dtypes[0]
df = pd.read_csv(self.lookup_file, names=("day", "mode", "prec", "path"))
df = df[df["mode"] == "performance"]
df = df[df["prec"] == dtype]
log_infos = []
for day, path in zip(df["day"], df["path"]):
log_infos.append(LogInfo(day, path))
assert len(log_infos) >= self.k
log_infos = log_infos[len(log_infos) - self.k :]
return log_infos
def generate_comment(self):
title = "## Metrics over time ##\n"
str_io = io.StringIO()
if not self.args.update_dashboard_test and not self.args.no_graphs:
for name in glob.glob(self.args.output_dir + "/*over_time.png"):
output = (
subprocess.check_output([self.args.dashboard_image_uploader, name])
.decode("ascii")
.rstrip()
)
str_io.write(f"\n{name} : \n")
comment = generate_dropdown_comment(title, str_io.getvalue())
with open(f"{self.args.output_dir}/gh_regression.txt", "w") as gh_fh:
gh_fh.write(comment)
def diff(self):
log_infos = self.find_last_k()
for metric in ["geomean", "passrate", "comp_time", "memory"]:
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
for idx, suite in enumerate(self.suites):
dfs = []
for log_info in log_infos:
dir_path = os.path.join(
self.args.dashboard_archive_path, log_info.dir_path
)
assert os.path.exists(dir_path)
gmean_filename = os.path.join(dir_path, f"{metric}.csv")
if not os.path.exists(gmean_filename):
continue
df = pd.read_csv(gmean_filename)
if suite not in df:
continue
if metric == "geomean" or metric == "memory":
df[suite] = df[suite].str.replace("x", "").astype(float)
elif metric == "passrate":
df[suite] = df[suite].str.split("%").str[0].astype(float)
df.insert(0, "day", get_date(log_info))
df = df.pivot(index="day", columns="Compiler", values=suite)
# Interim stage when both inductor_cudagraphs and inductor exist
df = df.rename(columns={"inductor_cudagraphs": "inductor"})
for col_name in df.columns:
if col_name not in self.args.compilers:
df = df.drop(columns=[col_name])
dfs.append(df)
df = pd.concat(dfs)
df = df.interpolate(method="linear")
ax = df.plot(
ax=axes[idx],
kind="line",
ylabel=metric,
xlabel="Date",
grid=True,
ylim=0 if metric == "passrate" else 0.8,
title=suite,
style=".-",
legend=False,
)
ax.legend(loc="lower right", ncol=2)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, f"{metric}_over_time.png"))
self.generate_comment()
| RegressionTracker |
python | pandas-dev__pandas | pandas/tests/indexing/test_scalar.py | {
"start": 571,
"end": 2115
} | class ____:
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
def test_iat_set_ints(self, dtype, frame_or_series):
f = frame_or_series(range(3), index=Index([0, 1, 2], dtype=dtype))
indices = generate_indices(f, True)
for i in indices:
f.iat[i] = 1
expected = f.values[i]
tm.assert_almost_equal(expected, 1)
@pytest.mark.parametrize(
"index",
[
Index(list("abcd"), dtype=object),
date_range("20130101", periods=4),
Index(range(0, 8, 2), dtype=np.float64),
],
)
def test_iat_set_other(self, index, frame_or_series):
f = frame_or_series(range(len(index)), index=index)
msg = "iAt based indexing can only have integer indexers"
idx = next(generate_indices(f, False))
with pytest.raises(ValueError, match=msg):
f.iat[idx] = 1
@pytest.mark.parametrize(
"index",
[
Index(list("abcd"), dtype=object),
date_range("20130101", periods=4),
Index(range(0, 8, 2), dtype=np.float64),
Index(range(0, 8, 2), dtype=np.uint64),
Index(range(0, 8, 2), dtype=np.int64),
],
)
def test_at_set_ints_other(self, index, frame_or_series):
f = frame_or_series(range(len(index)), index=index)
indices = generate_indices(f, False)
for i in indices:
f.at[i] = 1
expected = f.loc[i]
tm.assert_almost_equal(expected, 1)
| TestScalar |
python | huggingface__transformers | src/transformers/models/qwen3/modeling_qwen3.py | {
"start": 23388,
"end": 23493
} | class ____(GenericForSequenceClassification, Qwen3PreTrainedModel):
pass
| Qwen3ForSequenceClassification |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 328932,
"end": 329560
} | class ____(Matrix):
"""Identity matrix [1, 0, 0, 1, 0, 0]"""
def __hash__(self):
return hash((1,0,0,1,0,0))
def __init__(self):
Matrix.__init__(self, 1.0, 1.0)
def __repr__(self):
return "IdentityMatrix(1.0, 0.0, 0.0, 1.0, 0.0, 0.0)"
def __setattr__(self, name, value):
if name in "ad":
self.__dict__[name] = 1.0
elif name in "bcef":
self.__dict__[name] = 0.0
else:
self.__dict__[name] = value
def checkargs(*args):
raise NotImplementedError("Identity is readonly")
Identity = IdentityMatrix()
| IdentityMatrix |
python | doocs__leetcode | solution/0400-0499/0460.LFU Cache/Solution.py | {
"start": 890,
"end": 2427
} | class ____:
def __init__(self, capacity: int):
self.capacity = capacity
self.min_freq = 0
self.map = defaultdict(Node)
self.freq_map = defaultdict(DoublyLinkedList)
def get(self, key: int) -> int:
if self.capacity == 0 or key not in self.map:
return -1
node = self.map[key]
self.incr_freq(node)
return node.value
def put(self, key: int, value: int) -> None:
if self.capacity == 0:
return
if key in self.map:
node = self.map[key]
node.value = value
self.incr_freq(node)
return
if len(self.map) == self.capacity:
ls = self.freq_map[self.min_freq]
node = ls.remove_last()
self.map.pop(node.key)
node = Node(key, value)
self.add_node(node)
self.map[key] = node
self.min_freq = 1
def incr_freq(self, node: Node) -> None:
freq = node.freq
ls = self.freq_map[freq]
ls.remove(node)
if ls.is_empty():
self.freq_map.pop(freq)
if freq == self.min_freq:
self.min_freq += 1
node.freq += 1
self.add_node(node)
def add_node(self, node: Node) -> None:
freq = node.freq
ls = self.freq_map[freq]
ls.add_first(node)
self.freq_map[freq] = ls
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| LFUCache |
python | pypa__installer | tests/test_sources.py | {
"start": 275,
"end": 1978
} | class ____:
def test_takes_two_arguments(self):
WheelSource("distribution", "version")
WheelSource(distribution="distribution", version="version")
def test_correctly_computes_properties(self):
source = WheelSource(distribution="distribution", version="version")
assert source.data_dir == "distribution-version.data"
assert source.dist_info_dir == "distribution-version.dist-info"
def test_raises_not_implemented_error(self):
source = WheelSource(distribution="distribution", version="version")
with pytest.raises(NotImplementedError):
_ = source.dist_info_filenames
with pytest.raises(NotImplementedError):
source.read_dist_info("METADATA")
with pytest.raises(NotImplementedError):
source.get_contents()
with pytest.raises(NotImplementedError):
source.validate_record()
def replace_file_in_zip(path: str, filename: str, content: "str | None") -> None:
"""Helper function for replacing a file in the zip.
Exists because ZipFile doesn't support remove.
"""
files = {}
# Copy everything except `filename`, and replace it with `content`.
with zipfile.ZipFile(path) as archive:
for file in archive.namelist():
if file == filename:
if content is None:
continue # Remove the file
files[file] = content.encode()
else:
files[file] = archive.read(file)
# Replace original archive
with zipfile.ZipFile(path, mode="w") as archive:
for name, content in files.items():
archive.writestr(name, content)
| TestWheelSource |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels17.py | {
"start": 315,
"end": 2118
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels17.xlsx")
self.ignore_elements = {"xl/charts/chart1.xml": ["<c:formatCode"]}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "stock"})
date_format = workbook.add_format({"num_format": 14})
chart.axis_ids = [45740032, 45747200]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column("A:D", 11)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$D$1:$D$5",
"data_labels": {"value": 1, "position": "right"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/4-keys-keyboard.py | {
"start": 30,
"end": 560
} | class ____(object):
def maxA(self, N):
"""
:type N: int
:rtype: int
"""
if N < 7:
return N
if N == 10:
return 20 # the following rule doesn't hold when N = 10
n = N // 5 + 1 # n3 + n4 increases one every 5 keys
# (1) n = n3 + n4
# (2) N + 1 = 4 * n3 + 5 * n4
# 5 x (1) - (2) => 5*n - N - 1 = n3
n3 = 5*n - N - 1
n4 = n - n3
return 3**n3 * 4**n4
# Time: O(n)
# Space: O(1)
| Solution |
python | astropy__astropy | astropy/table/np_utils.py | {
"start": 329,
"end": 5992
} | class ____(ValueError):
pass
def get_col_name_map(
arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None
):
"""
Find the column names mapping when merging the list of structured ndarrays
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.dtype.names:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.dtype.names for other in others):
out_name = uniq_col_name.format(
table_name=table_name, col_name=name
)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError(
f"Merging column names resulted in duplicates: {repeated_names}. "
"Change uniq_col_name or table_names args to fix this."
)
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError(
f"The '{names[0]}' columns have incompatible types: "
f"{tme._incompat_types}"
) from tme
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in in_cols}
if len(uniq_shapes) != 1:
raise TableMergeError("Key columns have different shape")
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of structured ndarray columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = {
tuple(issubclass(col.dtype.type, np_type) for np_type in np_types)
for col in cols
}
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [col.dtype.name for col in cols]
tme = TableMergeError(f"Columns have incompatible types {incompat_types}")
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=col.dtype) for col in cols]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for arr in arrs:
if arr.dtype.kind in ("S", "U"):
arr[0] = "0" * arr.itemsize
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
def _check_for_sequence_of_structured_arrays(arrays):
err = "`arrays` arg must be a sequence (e.g. list) of structured arrays"
if not isinstance(arrays, Sequence):
raise TypeError(err)
for array in arrays:
# Must be structured array
if not isinstance(array, np.ndarray) or array.dtype.names is None:
raise TypeError(err)
if len(arrays) == 0:
raise ValueError("`arrays` arg must include at least one array")
| TableMergeError |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/snowflake.py | {
"start": 831,
"end": 1639
} | class ____(DataSourceTestConfig):
@property
@override
def label(self) -> str:
return "snowflake"
@property
@override
def pytest_mark(self) -> pytest.MarkDecorator:
return pytest.mark.snowflake
@override
def create_batch_setup(
self,
request: pytest.FixtureRequest,
data: pd.DataFrame,
extra_data: Mapping[str, pd.DataFrame],
context: AbstractDataContext,
engine_manager: Optional[SessionSQLEngineManager] = None,
) -> BatchTestSetup:
return SnowflakeBatchTestSetup(
data=data,
config=self,
extra_data=extra_data,
table_name=self.table_name,
context=context,
engine_manager=engine_manager,
)
| SnowflakeDatasourceTestConfig |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 48113,
"end": 48567
} | class ____(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 1.1
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
filenames = ['*.xml']
mimetypes = ['application/xml+evoque']
def __init__(self, **options):
super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
**options)
| EvoqueXmlLexer |
python | pypa__pip | src/pip/_vendor/rich/progress.py | {
"start": 28809,
"end": 29120
} | class ____(ProgressColumn):
"""Renders total filesize."""
def render(self, task: "Task") -> Text:
"""Show data completed."""
data_size = filesize.decimal(int(task.total)) if task.total is not None else ""
return Text(data_size, style="progress.filesize.total")
| TotalFileSizeColumn |
python | kubernetes-client__python | kubernetes/client/models/v1_azure_file_persistent_volume_source.py | {
"start": 383,
"end": 7209
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'read_only': 'bool',
'secret_name': 'str',
'secret_namespace': 'str',
'share_name': 'str'
}
attribute_map = {
'read_only': 'readOnly',
'secret_name': 'secretName',
'secret_namespace': 'secretNamespace',
'share_name': 'shareName'
}
def __init__(self, read_only=None, secret_name=None, secret_namespace=None, share_name=None, local_vars_configuration=None): # noqa: E501
"""V1AzureFilePersistentVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._read_only = None
self._secret_name = None
self._secret_namespace = None
self._share_name = None
self.discriminator = None
if read_only is not None:
self.read_only = read_only
self.secret_name = secret_name
if secret_namespace is not None:
self.secret_namespace = secret_namespace
self.share_name = share_name
@property
def read_only(self):
"""Gets the read_only of this V1AzureFilePersistentVolumeSource. # noqa: E501
readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:return: The read_only of this V1AzureFilePersistentVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1AzureFilePersistentVolumeSource.
readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. # noqa: E501
:param read_only: The read_only of this V1AzureFilePersistentVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_name(self):
"""Gets the secret_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
secretName is the name of secret that contains Azure Storage Account Name and Key # noqa: E501
:return: The secret_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._secret_name
@secret_name.setter
def secret_name(self, secret_name):
"""Sets the secret_name of this V1AzureFilePersistentVolumeSource.
secretName is the name of secret that contains Azure Storage Account Name and Key # noqa: E501
:param secret_name: The secret_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and secret_name is None: # noqa: E501
raise ValueError("Invalid value for `secret_name`, must not be `None`") # noqa: E501
self._secret_name = secret_name
@property
def secret_namespace(self):
"""Gets the secret_namespace of this V1AzureFilePersistentVolumeSource. # noqa: E501
secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod # noqa: E501
:return: The secret_namespace of this V1AzureFilePersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._secret_namespace
@secret_namespace.setter
def secret_namespace(self, secret_namespace):
"""Sets the secret_namespace of this V1AzureFilePersistentVolumeSource.
secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod # noqa: E501
:param secret_namespace: The secret_namespace of this V1AzureFilePersistentVolumeSource. # noqa: E501
:type: str
"""
self._secret_namespace = secret_namespace
@property
def share_name(self):
"""Gets the share_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
shareName is the azure Share Name # noqa: E501
:return: The share_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
:rtype: str
"""
return self._share_name
@share_name.setter
def share_name(self, share_name):
"""Sets the share_name of this V1AzureFilePersistentVolumeSource.
shareName is the azure Share Name # noqa: E501
:param share_name: The share_name of this V1AzureFilePersistentVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and share_name is None: # noqa: E501
raise ValueError("Invalid value for `share_name`, must not be `None`") # noqa: E501
self._share_name = share_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1AzureFilePersistentVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1AzureFilePersistentVolumeSource):
return True
return self.to_dict() != other.to_dict()
| V1AzureFilePersistentVolumeSource |
python | joke2k__faker | faker/providers/lorem/__init__.py | {
"start": 154,
"end": 11460
} | class ____(BaseProvider):
"""Implement default lorem provider for Faker.
.. important::
The default locale of the lorem provider is ``la``. When using a locale
without a localized lorem provider, the ``la`` lorem provider will be
used, so generated words will be in pseudo-Latin. The locale used for
the standard provider docs was ``en_US``, and ``en_US`` has a localized
lorem provider which is why the samples here show words in American
English.
"""
word_connector = " "
sentence_punctuation = "."
def get_words_list(
self,
part_of_speech: Optional[str] = None,
ext_word_list: Optional[Sequence[str]] = None,
) -> List[str]:
"""Get list of words.
``ext_word_list`` is a parameter that allows the user to provide a list
of words to be used instead of the built-in word list. If ``ext_word_list``
is provided, then the value of ``part_of_speech`` is ignored.
``part_of_speech`` is a parameter that defines to what part of speech
the returned word belongs. If ``ext_word_list`` is not ``None``, then
``part_of_speech`` is ignored. If the value of ``part_of_speech`` does
not correspond to an existent part of speech according to the set locale,
then an exception is raised.
:sample: part_of_speech="abc", ext_word_list=['abc', 'def', 'ghi', 'jkl']
:sample: part_of_speech="abc"
:sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
.. warning::
Depending on the length of a locale provider's built-in word list or
on the length of ``ext_word_list`` if provided, a large ``nb`` can
exhaust said lists if ``unique`` is ``True``, raising an exception.
"""
if ext_word_list is not None:
word_list = ext_word_list
elif part_of_speech:
if part_of_speech not in self.parts_of_speech: # type: ignore[attr-defined]
raise ValueError(f"{part_of_speech} is not recognized as a part of speech.")
else:
word_list = self.parts_of_speech[part_of_speech] # type: ignore[attr-defined]
else:
word_list = self.word_list # type: ignore[attr-defined]
return list(word_list)
def words(
self,
nb: int = 3,
ext_word_list: Optional[List[str]] = None,
part_of_speech: Optional[str] = None,
unique: bool = False,
) -> List[str]:
"""Generate a tuple of words.
The ``nb`` argument controls the number of words in the resulting list,
and if ``ext_word_list`` is provided, words from that list will be used
instead of those from the locale provider's built-in word list.
if ``word_list`` is not provided, the method will use a default value of None,
which will result in the method calling the ``get_words_list`` method to get the
word list. If ``word_list`` is provided, the method will use the provided list.
If ``unique`` is ``True``, this method will return a list containing
unique words. Under the hood, |random_sample| will be used for sampling
without replacement. If ``unique`` is ``False``, |random_choices| is
used instead, and the list returned may contain duplicates.
:sample:
:sample: nb=5
:sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
:sample: nb=4, ext_word_list=['abc', 'def', 'ghi', 'jkl'], unique=True
"""
word_list = self.get_words_list(part_of_speech=part_of_speech, ext_word_list=ext_word_list)
if unique:
unique_samples = cast(List[str], self.random_sample(word_list, length=nb))
return unique_samples
samples = cast(List[str], self.random_choices(word_list, length=nb))
return samples
def word(self, part_of_speech: Optional[str] = None, ext_word_list: Optional[Sequence[str]] = None) -> str:
"""Generate a word.
This method uses |words| under the hood with the ``nb`` argument set to
``1`` to generate the result.
:sample:
:sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
word_list = self.get_words_list(part_of_speech, ext_word_list)
return self.words(1, word_list)[0]
def sentence(
self, nb_words: int = 6, variable_nb_words: bool = True, ext_word_list: Optional[Sequence[str]] = None
) -> str:
"""Generate a sentence.
The ``nb_words`` argument controls how many words the sentence will
contain, and setting ``variable_nb_words`` to ``False`` will generate
the exact amount, while setting it to ``True`` (default) will generate
a random amount (+/-40%, minimum of 1) using |randomize_nb_elements|.
Under the hood, |words| is used to generate the words, so the argument
``ext_word_list`` works in the same way here as it would in that method.
:sample: nb_words=10
:sample: nb_words=10, variable_nb_words=False
:sample: nb_words=10, ext_word_list=['abc', 'def', 'ghi', 'jkl']
:sample: nb_words=10, variable_nb_words=True,
ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
if nb_words <= 0:
return ""
if variable_nb_words:
nb_words = self.randomize_nb_elements(nb_words, min=1)
word_list = self.get_words_list(ext_word_list=ext_word_list)
words = list(self.words(nb=nb_words, ext_word_list=word_list))
words[0] = words[0].title()
return self.word_connector.join(words) + self.sentence_punctuation
def sentences(self, nb: int = 3, ext_word_list: Optional[Sequence[str]] = None) -> List[str]:
"""Generate a list of sentences.
This method uses |sentence| under the hood to generate sentences, and
the ``nb`` argument controls exactly how many sentences the list will
contain. The ``ext_word_list`` argument works in exactly the same way
as well.
:sample:
:sample: nb=5
:sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
return [self.sentence(ext_word_list=ext_word_list) for _ in range(0, nb)]
def paragraph(
self, nb_sentences: int = 3, variable_nb_sentences: bool = True, ext_word_list: Optional[Sequence[str]] = None
) -> str:
"""Generate a paragraph.
The ``nb_sentences`` argument controls how many sentences the paragraph
will contain, and setting ``variable_nb_sentences`` to ``False`` will
generate the exact amount, while setting it to ``True`` (default) will
generate a random amount (+/-40%, minimum of 1) using
|randomize_nb_elements|.
Under the hood, |sentences| is used to generate the sentences, so the
argument ``ext_word_list`` works in the same way here as it would in
that method.
:sample: nb_sentences=5
:sample: nb_sentences=5, variable_nb_sentences=False
:sample: nb_sentences=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
:sample: nb_sentences=5, variable_nb_sentences=False,
ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
if nb_sentences <= 0:
return ""
if variable_nb_sentences:
nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)
para = self.word_connector.join(self.sentences(nb_sentences, ext_word_list=ext_word_list))
return para
def paragraphs(self, nb: int = 3, ext_word_list: Optional[Sequence[str]] = None) -> List[str]:
"""Generate a list of paragraphs.
This method uses |paragraph| under the hood to generate paragraphs, and
the ``nb`` argument controls exactly how many sentences the list will
contain. The ``ext_word_list`` argument works in exactly the same way
as well.
:sample: nb=5
:sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
return [self.paragraph(ext_word_list=ext_word_list) for _ in range(0, nb)]
def text(self, max_nb_chars: int = 200, ext_word_list: Optional[Sequence[str]] = None) -> str:
"""Generate a text string.
The ``max_nb_chars`` argument controls the approximate number of
characters the text string will have, and depending on its value, this
method may use either |words|, |sentences|, or |paragraphs| for text
generation. The ``ext_word_list`` argument works in exactly the same way
it would in any of those methods.
:sample: max_nb_chars=20
:sample: max_nb_chars=80
:sample: max_nb_chars=160
:sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
text: List[str] = []
if max_nb_chars < 5:
raise ValueError("text() can only generate text of at least 5 characters")
if max_nb_chars < 25:
# join words
while not text:
size = 0
# determine how many words are needed to reach the $max_nb_chars
# once;
while size < max_nb_chars:
word = (self.word_connector if size else "") + self.word(ext_word_list=ext_word_list)
text.append(word)
size += len(word)
text.pop()
text[0] = text[0][0].upper() + text[0][1:]
last_index = len(text) - 1
text[last_index] += self.sentence_punctuation
elif max_nb_chars < 100:
# join sentences
while not text:
size = 0
# determine how many sentences are needed to reach the
# $max_nb_chars once
while size < max_nb_chars:
sentence = (self.word_connector if size else "") + self.sentence(ext_word_list=ext_word_list)
text.append(sentence)
size += len(sentence)
text.pop()
else:
# join paragraphs
while not text:
size = 0
# determine how many paragraphs are needed to reach the
# $max_nb_chars once
while size < max_nb_chars:
paragraph = ("\n" if size else "") + self.paragraph(ext_word_list=ext_word_list)
text.append(paragraph)
size += len(paragraph)
text.pop()
return "".join(text)
def texts(
self, nb_texts: int = 3, max_nb_chars: int = 200, ext_word_list: Optional[Sequence[str]] = None
) -> List[str]:
"""Generate a list of text strings.
The ``nb_texts`` argument controls how many text strings the list will
contain, and this method uses |text| under the hood for text generation,
so the two remaining arguments, ``max_nb_chars`` and ``ext_word_list``
will work in exactly the same way as well.
:sample: nb_texts=5
:sample: nb_texts=5, max_nb_chars=50
:sample: nb_texts=5, max_nb_chars=50,
ext_word_list=['abc', 'def', 'ghi', 'jkl']
"""
return [self.text(max_nb_chars, ext_word_list) for _ in range(0, nb_texts)]
| Provider |
python | tensorflow__tensorflow | tensorflow/python/util/custom_nest_protocol.py | {
"start": 810,
"end": 4521
} | class ____(Protocol):
"""Protocol for adding custom tf.nest support in user-defined classes.
User classes should implement the two methods defined in this protocol in
order to be supported by nest functions.
- `__tf_flatten__` for generating the flattened components and the metadata
of the current object.
- `__tf_unflatten__` for creating a new object based on the input metadata
and the components.
See the method doc for details.
In terms of support level, classes implementing this protocol
- are supported by tf.nest and tf.data functions.
- have limited support from tf.function, which requires writing a custom
TraceType subclass to be used as the input or output of a tf.function.
- are NOT supported by SavedModel.
Code Examples:
>>> import dataclasses
>>> @dataclasses.dataclass
... class MaskedTensor:
... mask: bool
... value: tf.Tensor
...
... def __tf_flatten__(self):
... metadata = (self.mask,) # static config.
... components = (self.value,) # dynamic values.
... return metadata, components
...
... @classmethod
... def __tf_unflatten__(cls, metadata, components):
... mask = metadata[0]
... value = components[0]
... return MaskedTensor(mask=mask, value=value)
...
>>> mt = MaskedTensor(mask=True, value=tf.constant([1]))
>>> mt
MaskedTensor(mask=True, value=<tf.Tensor: ... numpy=array([1], dtype=int32)>)
>>> tf.nest.is_nested(mt)
True
>>> mt2 = MaskedTensor(mask=False, value=tf.constant([2]))
>>> tf.nest.assert_same_structure(mt, mt2)
>>> leaves = tf.nest.flatten(mt)
>>> leaves
[<tf.Tensor: shape=(1,), dtype=int32, numpy=array([1], dtype=int32)>]
>>> mt3 = tf.nest.pack_sequence_as(mt, leaves)
>>> mt3
MaskedTensor(mask=True, value=<tf.Tensor: ... numpy=array([1], dtype=int32)>)
>>> bool(mt == mt3)
True
>>> tf.nest.map_structure(lambda x: x * 2, mt)
MaskedTensor(mask=True, value=<tf.Tensor: ... numpy=array([2], dtype=int32)>)
More examples are available in the unit tests (nest_test.py).
"""
def __tf_flatten__(self):
"""Flatten current object into (metadata, components).
Returns:
A `tuple` of (metadata, components), where
- metadata is a custom Python object that stands for the static config
of the current object, which is supposed to be fixed and not affected
by data transformation.
- components is a `tuple` that contains the modifiable fields of the
current object.
Implementation Note:
- This method should not invoke any TensorFlow ops.
- This method only needs to flatten the current level. If current object has
an attribute that also need custom flattening, nest functions (such as
`nest.flatten`) will utilize this method to do recursive flattening.
- Components must be a `tuple`, not a `list`
"""
@classmethod
def __tf_unflatten__(cls, metadata, components):
"""Create a user-defined object from (metadata, components).
Args:
metadata: a custom Python object that stands for the static config for
reconstructing a new object of the current class.
components: a `tuple` that contains the dynamic data fields of the current
class, for object reconstruction.
Returns:
The user-defined object, with the same class of the current object.
Implementation Note:
- This method should not invoke any TensorFlow ops.
- This method only needs to unflatten the current level. If the object has
an attribute that also need custom unflattening, nest functions will
utilize this method to do recursive unflattening.
"""
| CustomNestProtocol |
python | great-expectations__great_expectations | great_expectations/experimental/metric_repository/metric_repository.py | {
"start": 273,
"end": 662
} | class ____:
"""A repository for storing and retrieving MetricRuns.
Args:
data_store: The DataStore to use for storing and retrieving MetricRuns.
"""
def __init__(self, data_store: DataStore):
self._data_store = data_store
def add_metric_run(self, metric_run: MetricRun) -> uuid.UUID:
return self._data_store.add(value=metric_run)
| MetricRepository |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/variables.py | {
"start": 2136,
"end": 2305
} | class ____(BaseModel):
"""Variable Collection serializer for responses."""
variables: Iterable[VariableResponse]
total_entries: int
| VariableCollectionResponse |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/visitors.py | {
"start": 18791,
"end": 18889
} | class ____(Protocol):
def __call__(self, element: _ET, **kw: Any) -> _ET: ...
| _CloneCallableType |
python | arrow-py__arrow | tests/test_parser.py | {
"start": 51416,
"end": 53299
} | class ____:
def test_parse_local(self):
from datetime import datetime
assert self.parser.parse("local") == datetime.now().astimezone().tzinfo
def test_parse_utc(self):
assert self.parser.parse("utc") == timezone.utc
assert self.parser.parse("UTC") == timezone.utc
def test_parse_utc_withoffset(self):
assert self.parser.parse("(UTC+01:00") == timezone(timedelta(seconds=3600))
assert self.parser.parse("(UTC-01:00") == timezone(timedelta(seconds=-3600))
assert self.parser.parse("(UTC+01:00") == timezone(timedelta(seconds=3600))
assert self.parser.parse(
"(UTC+01:00) Amsterdam, Berlin, Bern, Rom, Stockholm, Wien"
) == timezone(timedelta(seconds=3600))
def test_parse_iso(self):
assert self.parser.parse("01:00") == timezone(timedelta(seconds=3600))
assert self.parser.parse("11:35") == timezone(
timedelta(seconds=11 * 3600 + 2100)
)
assert self.parser.parse("+01:00") == timezone(timedelta(seconds=3600))
assert self.parser.parse("-01:00") == timezone(timedelta(seconds=-3600))
assert self.parser.parse("0100") == timezone(timedelta(seconds=3600))
assert self.parser.parse("+0100") == timezone(timedelta(seconds=3600))
assert self.parser.parse("-0100") == timezone(timedelta(seconds=-3600))
assert self.parser.parse("01") == timezone(timedelta(seconds=3600))
assert self.parser.parse("+01") == timezone(timedelta(seconds=3600))
assert self.parser.parse("-01") == timezone(timedelta(seconds=-3600))
def test_parse_str(self):
assert self.parser.parse("US/Pacific") == ZoneInfo("US/Pacific")
def test_parse_fails(self):
with pytest.raises(parser.ParserError):
self.parser.parse("fail")
@pytest.mark.usefixtures("dt_parser")
| TestTzinfoParser |
python | walkccc__LeetCode | solutions/969. Pancake Sorting/969.py | {
"start": 0,
"end": 313
} | class ____:
def pancakeSort(self, arr: list[int]) -> list[int]:
ans = []
for target in range(len(arr), 0, -1):
index = arr.index(target)
arr[:index + 1] = arr[:index + 1][::-1]
arr[:target] = arr[:target][::-1]
ans.append(index + 1)
ans.append(target)
return ans
| Solution |
python | PyCQA__pydocstyle | src/pydocstyle/config.py | {
"start": 945,
"end": 4575
} | class ____:
"""ConfigParser that partially mimics RawConfigParser but for toml files.
See RawConfigParser for more info. Also, please note that not all
RawConfigParser functionality is implemented, but only the subset that is
currently used by pydocstyle.
"""
def __init__(self):
"""Create a toml parser."""
self._config = {}
def read(self, filenames, encoding=None):
"""Read and parse a filename or an iterable of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify an iterable of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the iterable will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, (str, bytes, os.PathLike)):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, "rb") as fp:
if not tomllib:
log.warning(
"The %s configuration file was ignored, "
"because the `tomli` package is not installed.",
filename,
)
continue
self._config.update(tomllib.load(fp))
except OSError:
continue
if isinstance(filename, os.PathLike):
filename = os.fspath(filename)
read_ok.append(filename)
return read_ok
def _get_section(self, section, allow_none=False):
try:
current = reduce(
operator.getitem,
section.split('.'),
self._config['tool'],
)
except KeyError:
current = None
if isinstance(current, dict):
return current
elif allow_none:
return None
else:
raise NoSectionError(section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration."""
return self._get_section(section, allow_none=True) is not None
def options(self, section):
"""Return a list of option names for the given section name."""
current = self._get_section(section)
return list(current.keys())
def get(self, section, option, *, _conv=None):
"""Get an option value for a given section."""
d = self._get_section(section)
option = option.lower()
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if isinstance(value, dict):
raise TypeError(
f"Expected {section}.{option} to be an option, not a section."
)
# toml should convert types automatically
# don't manually convert, just check, that the type is correct
if _conv is not None and not isinstance(value, _conv):
raise TypeError(
f"The type of {section}.{option} should be {_conv}"
)
return value
def getboolean(self, section, option):
"""Get a boolean option value for a given section."""
return self.get(section, option, _conv=bool)
def getint(self, section, option):
"""Get an integer option value for a given section."""
return self.get(section, option, _conv=int)
| TomlParser |
python | uqfoundation__dill | dill/tests/test_objects.py | {
"start": 828,
"end": 1931
} | class ____:
def _method(self):
pass
# objects that *fail* if imported
special = {}
special['LambdaType'] = _lambda = lambda x: lambda y: x
special['MethodType'] = _method = _class()._method
special['UnboundMethodType'] = _class._method
objects.update(special)
def pickles(name, exact=False, verbose=True):
"""quick check if object pickles with dill"""
obj = objects[name]
try:
pik = pickle.loads(pickle.dumps(obj))
if exact:
try:
assert pik == obj
except AssertionError:
assert type(obj) == type(pik)
if verbose: print ("weak: %s %s" % (name, type(obj)))
else:
assert type(obj) == type(pik)
except Exception:
if verbose: print ("fails: %s %s" % (name, type(obj)))
def test_objects(verbose=True):
for member in objects.keys():
#pickles(member, exact=True, verbose=verbose)
pickles(member, exact=False, verbose=verbose)
if __name__ == '__main__':
import warnings
warnings.simplefilter('ignore')
test_objects(verbose=False)
| _class |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 14559,
"end": 14865
} | class ____(VOTableSpecWarning):
"""
To avoid local-dependent number parsing differences, ``astropy.io.votable``
may require a string or unicode string where a numeric type may
make more sense.
"""
message_template = "'{}' must be a str or bytes object"
default_args = ("x",)
| W08 |
python | streamlit__streamlit | lib/tests/streamlit/web/server/routes_test.py | {
"start": 3888,
"end": 8808
} | class ____(tornado.testing.AsyncHTTPTestCase):
def setUp(self) -> None:
self._tmpdir = tempfile.TemporaryDirectory()
self._tmpfile = tempfile.NamedTemporaryFile(dir=self._tmpdir.name, delete=False)
self._tmp_js_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="script.js", delete=False
)
self._tmp_mjs_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="module.mjs", delete=False
)
self._tmp_html_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="file.html", delete=False
)
self._tmp_css_file = tempfile.NamedTemporaryFile(
dir=self._tmpdir.name, suffix="stylesheet.css", delete=False
)
# The manifest file must not have a prefix - create it manually in the tmpdir
self._tmp_manifest_filename = os.path.join(self._tmpdir.name, "manifest.json")
Path(self._tmp_manifest_filename).touch()
self._filename = os.path.basename(self._tmpfile.name)
self._js_filename = os.path.basename(self._tmp_js_file.name)
self._mjs_filename = os.path.basename(self._tmp_mjs_file.name)
self._html_filename = os.path.basename(self._tmp_html_file.name)
self._css_filename = os.path.basename(self._tmp_css_file.name)
self._manifest_filename = os.path.basename(self._tmp_manifest_filename)
super().setUp()
def tearDown(self) -> None:
super().tearDown()
self._tmpfile.close()
self._tmpdir.cleanup()
def get_app(self):
return tornado.web.Application(
[
(
r"/(.*)",
StaticFileHandler,
{
"path": self._tmpdir.name,
"default_filename": self._filename,
"reserved_paths": [
NEW_HEALTH_ENDPOINT,
HOST_CONFIG_ENDPOINT,
],
},
)
]
)
def test_parse_url_path_200(self):
responses = [
self.fetch("/"),
self.fetch(f"/{self._filename}"),
self.fetch("/page1/"),
self.fetch(f"/page1/{self._filename}"),
self.fetch("/page2/"),
self.fetch(f"/page2/{self._filename}"),
]
for r in responses:
assert r.code == 200
def test_nonexistent_urls_return_default_page(self):
responses = [
self.fetch("/nonexistent"),
self.fetch("/page2/nonexistent"),
self.fetch(f"/page3/{self._filename}"),
]
for r in responses:
assert r.code == 200
def test_reserved_paths_serve_404(self):
responses = [
self.fetch("/nonexistent/_stcore/health"),
self.fetch("/page2/_stcore/host-config"),
]
for r in responses:
assert r.code == 404
def test_cache_control_header(self):
r = self.fetch(f"/{self._html_filename}")
assert r.headers["Cache-Control"] == "no-cache"
r = self.fetch(f"/{self._manifest_filename}")
assert r.headers["Cache-Control"] == "no-cache"
r = self.fetch(f"/nested/{self._manifest_filename}")
assert r.headers["Cache-Control"] == "public, immutable, max-age=31536000"
r = self.fetch(f"/{self._js_filename}")
assert r.headers["Cache-Control"] == "public, immutable, max-age=31536000"
r = self.fetch(f"/{self._css_filename}")
assert r.headers["Cache-Control"] == "public, immutable, max-age=31536000"
def test_mimetype_is_overridden_by_server(self):
"""Test get_content_type function."""
mimetypes.add_type("custom/html", ".html")
mimetypes.add_type("custom/js", ".js")
mimetypes.add_type("custom/mjs", ".mjs")
mimetypes.add_type("custom/css", ".css")
r = self.fetch(f"/{self._html_filename}")
assert r.headers["Content-Type"] == "custom/html"
r = self.fetch(f"/{self._js_filename}")
assert r.headers["Content-Type"] == "custom/js"
r = self.fetch(f"/{self._mjs_filename}")
assert r.headers["Content-Type"] == "custom/mjs"
r = self.fetch(f"/{self._css_filename}")
assert r.headers["Content-Type"] == "custom/css"
Server.initialize_mimetypes()
r = self.fetch(f"/{self._html_filename}")
assert r.headers["Content-Type"] == "text/html"
r = self.fetch(f"/{self._js_filename}")
assert r.headers["Content-Type"] == "application/javascript"
r = self.fetch(f"/{self._mjs_filename}")
assert r.headers["Content-Type"] == "application/javascript"
r = self.fetch(f"/{self._css_filename}")
assert r.headers["Content-Type"] == "text/css"
| StaticFileHandlerTest |
python | dateutil__dateutil | tests/_common.py | {
"start": 4433,
"end": 5373
} | class ____(TZContextBase):
"""
Context manager for changing local time zone on Windows.
Because the effect of this is system-wide and global, it may have
unintended side effect. Set the ``DATEUTIL_MAY_CHANGE_TZ`` environment
variable to a truthy value before using this context manager.
"""
def get_current_tz(self):
p = subprocess.Popen(['tzutil', '/g'], stdout=subprocess.PIPE)
ctzname, err = p.communicate()
ctzname = ctzname.decode() # Popen returns
if p.returncode:
raise OSError('Failed to get current time zone: ' + err)
return ctzname
def set_current_tz(self, tzname):
p = subprocess.Popen('tzutil /s "' + tzname + '"')
out, err = p.communicate()
if p.returncode:
raise OSError('Failed to set current time zone: ' +
(err or 'Unknown error.'))
###
# Utility classes
| TZWinContext |
python | great-expectations__great_expectations | great_expectations/core/config_provider.py | {
"start": 3886,
"end": 4331
} | class ____(_AbstractConfigurationProvider):
"""
Responsible for the management of the runtime_environment dictionary provided at runtime.
"""
def __init__(self, runtime_environment: Dict[str, str]) -> None:
self._runtime_environment = runtime_environment
super().__init__()
@override
def get_values(self) -> Dict[str, str]:
return self._runtime_environment
| _RuntimeEnvironmentConfigurationProvider |
python | scrapy__scrapy | tests/test_squeues.py | {
"start": 1753,
"end": 1841
} | class ____(MarshalFifoDiskQueueTest):
chunksize = 2
| ChunkSize2MarshalFifoDiskQueueTest |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/runtime_wrappers.py | {
"start": 19003,
"end": 21982
} | class ____(InductorWrapper):
# TODO: I would love to get rid of this argument, but it's
# Wrapped pretty tightly around our aot_dispatch_autograd logic.
# Specifically, tensors_saved_for_backwards_slice's value is both used for calculating indices
# for setting placeholder strides(which is done before runtime, before this wrapper runs)
# and for saving tensors for backward (which is done during runtime, after this wrapper runs)
# So in aot_dispatch_autograd, this wrapper can't edit the set of outs without making one
# of those two indices incorrect.
return_new_outs: bool = True
def pre_compile(
self,
flat_fn: torch.fx.GraphModule,
flat_args,
aot_config,
*,
fw_metadata,
) -> None:
if config.functionalize_rng_ops:
# Update example inputs for the fw_compiler
fake_mode = detect_fake_mode()
assert fake_mode is not None
seed, offset = CUDARngStateHelper.get_torch_state_as_tuple(fake_mode)
flat_args.extend([seed, offset])
# We are not clearing flat_args here because
# 1) There is a check in the debug compiler at the end
# 2) It does not matter as these are fake tensors
def post_compile(
self,
compiled_fn,
aot_config: AOTConfig,
*,
runtime_metadata: ViewAndMutationMeta,
):
@wraps(compiled_fn)
def wrapper(runtime_args: list[Any]):
if runtime_metadata.is_rng_op_functionalized:
# Add the seed and offset to args
seed, offset = CUDARngStateHelper.get_torch_state_as_tuple()
runtime_args.extend([seed, offset])
out = compiled_fn(runtime_args)
out = self._functionalized_rng_runtime_epilogue(
runtime_metadata,
out,
# TODO: this won't be right for the backward when we convert the call_compiled_backward to use the wrapper
runtime_metadata.num_forward_returns,
)
return out
return compiled_fn(runtime_args)
return wrapper
# Calling convention: If we are running functionalized RNG, then outs consists
# of (user_outs, rng_offset)
def _functionalized_rng_runtime_epilogue(
self,
metadata: ViewAndMutationMeta,
outs,
offset_index,
):
if metadata.is_rng_op_functionalized:
assert metadata.num_outputs_rng_offset == 1
new_rng_offset = outs[offset_index]
CUDARngStateHelper.set_new_offset(new_rng_offset)
if self.return_new_outs:
user_outs = outs[:offset_index] + outs[offset_index + 1 :]
return user_outs
else:
return outs
return outs
# WARNING: this does NOT operate on TraceFn
@dataclass
| FunctionalizedRngRuntimeWrapper |
python | google__jax | jax/_src/custom_transpose.py | {
"start": 2247,
"end": 5777
} | class ____:
fun: Callable
transpose: Callable | None = None
def __init__(self, fun: Callable):
functools.update_wrapper(self, fun)
self.fun = fun
__getattr__ = custom_api_util.forward_attr
def def_transpose(self, transpose: Callable):
self.transpose = transpose
return transpose
@traceback_util.api_boundary
def __call__(self, out_types, res_arg, lin_arg):
_, res_tree = tree_flatten(res_arg)
_, lin_tree = tree_flatten(lin_arg)
args_flat, in_tree = tree_flatten((res_arg, lin_arg))
# TODO(frostig,mattjj): check that out_trees match
# TODO(frostig,mattjj): could, and should, we avoid flattening
# self.fun at this point?
flat_fun, out_tree2 = flatten_fun_nokwargs(
lu.wrap_init(
self.fun,
debug_info=api_util.debug_info("custom_transpose fun", self.fun,
(res_arg, lin_arg), {})),
in_tree)
out_types_flat, out_tree = tree_flatten(out_types)
transpose_wrapped = lu.wrap_init(
self.transpose,
debug_info=api_util.debug_info("custom_transpose transpose_fun",
self.transpose,
(res_arg, out_types), {}) )
out_flat = custom_transpose_p.bind(flat_fun, *args_flat,
transpose=transpose_wrapped,
out_types=tuple(out_types_flat),
lin_tree=lin_tree,
res_tree=res_tree,
out_tree=out_tree)
return tree_unflatten(out_tree, out_flat)
### utils
def tree_fill(x, treedef):
return tree_unflatten(treedef, [x] * treedef.num_leaves)
def tree_fill_like(x, tree):
return tree_fill(x, tree_structure(tree))
def tree_broadcast(full_treedef, tree, is_leaf=None):
full_tree = tree_fill(0, full_treedef)
return tree_map(tree_fill_like, tree, full_tree, is_leaf=is_leaf)
def is_treedef_prefix(entire, prefix):
entire = tree_fill(0, entire)
prefix = tree_fill(0, prefix)
try:
tree_map(lambda x, y: x, prefix, entire)
except ValueError:
return False
return True
def rule_name(rule):
return getattr(rule, '__name__', '<unnamed transpose rule>')
def check_transpose_rule_trees(rule: lu.WrappedFun,
lin_tree: PyTreeDef,
rule_out_tree: PyTreeDef):
if not is_treedef_prefix(lin_tree, rule_out_tree):
rule_name = rule.debug_info.func_src_info if rule.debug_info else "<unknown>"
raise TypeError(
'structure of custom transpose rule\'s output does not prefix-match '
'structure of primal function\'s linear inputs under '
f'custom transpose rule ({rule_name}).\n'
f'Transpose rule output: {rule_out_tree}\n'
f'Linear primal inputs: {lin_tree}')
def make_transpose_from_thunk(thunk: Callable,
lin_tree: PyTreeDef) -> lu.WrappedFun:
transpose_jaxpr, transpose_consts = thunk()
transpose_jaxpr = core.ClosedJaxpr(
pe.convert_constvars_jaxpr(transpose_jaxpr), ())
def transpose(res_arg, ct_out):
args_flat = tree_leaves((res_arg, ct_out))
ct_ins = core.jaxpr_as_fun(transpose_jaxpr)(*transpose_consts, *args_flat)
return tree_unflatten(lin_tree, ct_ins)
return lu.wrap_init(transpose, debug_info=transpose_jaxpr.jaxpr.debug_info)
### custom_transpose primitive and rules
| custom_transpose |
python | sanic-org__sanic | sanic/models/server_types.py | {
"start": 238,
"end": 2409
} | class ____:
"""
Local and remote addresses and SSL status info.
"""
__slots__ = (
"client_port",
"client",
"client_ip",
"ctx",
"lost",
"peername",
"server_port",
"server",
"server_name",
"sockname",
"ssl",
"cert",
"network_paths",
)
def __init__(self, transport: TransportProtocol, unix=None):
self.ctx = SimpleNamespace()
self.lost = False
self.peername: Optional[tuple[str, int]] = None
self.server = self.client = ""
self.server_port = self.client_port = 0
self.client_ip = ""
self.sockname = addr = transport.get_extra_info("sockname")
self.ssl = False
self.server_name = ""
self.cert: dict[str, Any] = {}
self.network_paths: list[Any] = []
sslobj: Optional[SSLObject] = transport.get_extra_info("ssl_object") # type: ignore
sslctx: Optional[SSLContext] = transport.get_extra_info("ssl_context") # type: ignore
if sslobj:
self.ssl = True
self.server_name = getattr(sslobj, "sanic_server_name", None) or ""
self.cert = dict(getattr(sslobj.context, "sanic", {}))
if sslctx and not self.cert:
self.cert = dict(getattr(sslctx, "sanic", {}))
if isinstance(addr, str): # UNIX socket
self.server = unix or addr
return
# IPv4 (ip, port) or IPv6 (ip, port, flowinfo, scopeid)
if isinstance(addr, tuple):
self.server = addr[0] if len(addr) == 2 else f"[{addr[0]}]"
self.server_port = addr[1]
# self.server gets non-standard port appended
if addr[1] != (443 if self.ssl else 80):
self.server = f"{self.server}:{addr[1]}"
self.peername = addr = transport.get_extra_info("peername")
self.network_paths = transport.get_extra_info("network_paths") # type: ignore
if isinstance(addr, tuple):
self.client = addr[0] if len(addr) == 2 else f"[{addr[0]}]"
self.client_ip = addr[0]
self.client_port = addr[1]
| ConnInfo |
python | pytorch__pytorch | test/distributed/algorithms/test_join.py | {
"start": 4282,
"end": 16753
} | class ____(MultiProcessTestCase):
r"""Test cases for the generic join context."""
def setUp(self):
super().setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["BACKEND"] = BACKEND
self._spawn_processes()
@property
def device(self):
return (
torch.device(self.rank)
if BACKEND == dist.Backend.NCCL
else torch.device("cpu")
)
@property
def world_size(self):
return WORLD_SIZE
@property
def process_group(self):
return dist.group.WORLD
def tearDown(self):
try:
dist.destroy_process_group()
except AssertionError:
pass
try:
os.remove(self.file_name)
except OSError:
pass
def dist_init(self, rank, world_size, backend=BACKEND):
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(
backend=backend, store=store, rank=rank, world_size=world_size
)
def construct_uneven_inputs(self, base, offset, device=None):
r"""
Returns uneven inputs: rank i gets ``base`` + i * ``offset`` inputs.
"""
if device is None:
device = self.device
return [torch.zeros(1, device=device) for _ in range(base + self.rank * offset)]
def construct_even_inputs(self, base, device=None):
r"""Returns even inputs: each rank gets ``base`` inputs."""
if device is None:
device = self.device
return [torch.zeros(1, device=device) for _ in range(base)]
@property
def base_num_inputs(self):
r"""Base number of inputs to be used by all ranks."""
return 3
@property
def offset(self):
r"""Rank i gets i * ``offset`` additional inputs."""
return 1
def _test_join_base(
self,
uneven_inputs: bool,
num_joinables: int,
enable: bool,
throw_on_early_termination: bool,
num_allreduces: int,
run_post_hooks: bool,
expected_total: Optional[int] = None,
):
r"""
Skeleton for all :class:`Join` tests.
Arguments:
uneven_inputs (bool): ``True`` to use uneven inputs; ``False``
otherwise.
num_joinables (int): number of :class:`AllReducer` s to construct.
enable (bool): ``True`` to enable the join context manager;
``False`` otherwise.
throw_on_early_termination (bool): ``True`` to raise an exception
upon detecting uneven inputs; ``False`` otherwise.
num_allreduces (int): number of all-reduces to perform per input.
run_post_hooks (bool): ``True`` to run post-hooks; ``False``
otherwise.
expected_total (Optional[int]): ``None`` to not check the expected
all-reduce total; otherwise, the expected total; default is
``None``.
"""
self.dist_init(self.rank, self.world_size)
allreducers = [
AllReducer(self.device, self.process_group) for _ in range(num_joinables)
]
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), BEFORE_CONSTANT)
inputs = (
self.construct_uneven_inputs(self.base_num_inputs, self.offset)
if uneven_inputs
else self.construct_even_inputs(self.base_num_inputs)
)
allreduce_total = 0
# Expect a `RuntimeError` if `throw_on_early_termination=True`
# Rank 0 exhausts its inputs first
expected_msg = (
"Rank 0 exhausted all inputs."
if self.rank == 0
else "Detected at least one rank that exhausted inputs. "
"Throwing across all ranks."
)
with (
self.assertRaisesRegex(RuntimeError, expected_msg)
if throw_on_early_termination
else contextlib.nullcontext()
):
with Join(
allreducers,
enable=enable,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
):
for _ in inputs:
for allreducer in allreducers:
allreduce_total += allreducer(num_allreduces)
if throw_on_early_termination:
return
# Check `expected_total` if not `None`
if expected_total:
self.assertEqual(allreduce_total, expected_total)
# All `AllReduce` instances should receive the updated
# `post_hook_tensor` from the last-joined process
if run_post_hooks:
for allreducer in allreducers:
self.assertEqual(allreducer.post_hook_tensor.item(), AFTER_CONSTANT)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_main_hooks(self):
r"""Tests the main hooks of a single :class:`Joinable`."""
num_joinables = 1
num_allreduces = 1
run_post_hooks = False
# Non-joined processes all-reduce a 1, so this rank's all-reduce total
# should be precisely equal to the total number of inputs processed
# before it joined
expected_total = self.world_size * self.base_num_inputs
# Rank i runs for i additional iterations
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_post_hooks(self):
r"""Tests the post-hooks of a single :class:`Joinable`."""
num_joinables = 1
num_allreduces = 0 # set to 0 to skip the main hooks
run_post_hooks = False
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=None,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable(self):
r"""
Tests the main hooks and post-hooks of a single :class:`Joinable`
together.
This combines ``test_single_joinable_main_hooks()`` and
``test_single_joinable_post_hooks()`` into a single test to ensure that
main hooks and post-hooks operate correctly together.
"""
num_joinables = 1
num_allreduces = 1
run_post_hooks = True
expected_total = self.world_size * self.base_num_inputs
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_multiple_joinables(self):
r"""
Tests the main hooks and post-hooks of multiple :class:`Joinable` s
together.
This generalizes ``test_single_joinable()`` to multiple
:class:`Joinable` s.
"""
num_joinables = 3
num_allreduces = 1
run_post_hooks = True
expected_total = self.world_size * self.base_num_inputs
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
# The expected total is now multiplied by a factor of `NUM_JOINABLES`
expected_total *= num_joinables
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_disable(self):
r"""Tests ``enable=False`` for a single :class:`Joinable`."""
num_joinables = 1
num_allreduces = 1
uneven_inputs = False
enable = False
run_post_hooks = False
expected_total = self.world_size * self.base_num_inputs
self._test_join_base(
uneven_inputs=uneven_inputs,
num_joinables=num_joinables,
enable=enable,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_multiple_joinable_disable(self):
r"""
Tests ``enable=False`` for multiple :class:`Joinable` s.
This generalizes ``test_single_joinable_disable`` to multiple
:class:`Joinable` s.
"""
num_joinables = 3
num_allreduces = 1
uneven_inputs = False
enable = False
run_post_hooks = False
expected_total = self.world_size * self.base_num_inputs * num_joinables
self._test_join_base(
uneven_inputs=uneven_inputs,
num_joinables=num_joinables,
enable=enable,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_single_joinable_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for a single
:class:`Joinable`.
"""
num_joinables = 1
num_allreduces = 1
throw_on_early_termination = True
run_post_hooks = False
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=None,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_multiple_joinables_throw(self):
r"""
Tests ``throw_on_early_termination=True`` for multiple
:class:`Joinable` s together.
This generalizes ``test_single_joinable_throw`` to multiple
:class:`Joinable` s.
"""
num_joinables = 3
num_allreduces = 1
throw_on_early_termination = True
run_post_hooks = False
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=throw_on_early_termination,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=None,
)
@require_n_gpus_for_nccl_backend(WORLD_SIZE, BACKEND)
def test_join_kwargs(self):
r"""
Tests passing keyword arguments to the context manager.
"""
num_joinables = 1
num_allreduces = 2
run_post_hooks = False
expected_total = self.world_size * self.base_num_inputs
for num_joined in range(1, self.rank + 1):
expected_total += (self.world_size - num_joined) * self.offset
# The expected total is now multiplied by a factor of `NUM_ALLREDUCES`
expected_total *= num_allreduces
self._test_join_base(
uneven_inputs=True,
num_joinables=num_joinables,
enable=True,
throw_on_early_termination=False,
num_allreduces=num_allreduces,
run_post_hooks=run_post_hooks,
expected_total=expected_total,
)
if __name__ == "__main__":
run_tests()
| TestJoin |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 53326,
"end": 53498
} | class ____(Protocol):
def __call__(
self,
*args: FxValue,
) -> tuple[tuple[list[FxValue], list[bool]], list[AOTOutput]]: ...
| PreppedForAutogradTraceFn |
python | pytorch__pytorch | torch/_inductor/async_compile.py | {
"start": 4967,
"end": 6955
} | class ____:
"""
In memory cache for storing compiled triton kernels.
Each triton kernel is keyed by the hash of its source code. Each value stored
in the cache is a return value of AsyncCompile.triton().
Currently, the cache stores Future objects, but it should be generalizable for any kernels.
"""
_cache: dict[str, CodeCacheFuture] = {}
@staticmethod
def key(kernel_src: str):
"""
Generates a cache key given a triton kernel's full source code.
This source includes the inductor meta, compilation metadata, the kernel itself, etc.
`kernel_src` should be the exact string passed to async_compile.triton()'s first argument.
"""
# Hashes the kernel source with torch_key into a single hash key
return code_hash(kernel_src, extra=torch_key())
@staticmethod
def save(kernel_src: str, future: CodeCacheFuture):
"""
Saves a compiled triton kernel to the cache.
TODO: We store a LambdaFuture as that's the callable returned by async_compile.triton,
but the real type we want to return here is actually an abstract triton kernel.
TODO: Source code here is not just the kernel's source code, but also includes the inductor preamble, etc.
so it could be less strict.
"""
key = CompiledTritonKernels.key(kernel_src)
CompiledTritonKernels._cache[key] = future
@staticmethod
def get(kernel_src: str) -> Optional[CodeCacheFuture]:
key = CompiledTritonKernels.key(kernel_src)
return CompiledTritonKernels._cache.get(key, None)
@staticmethod
def cache_clear():
CompiledTritonKernels._cache = {}
@staticmethod
def remove_future(kernel_src: str) -> None:
key = CompiledTritonKernels.key(kernel_src)
# Delete the LambdaFuture if there is one
if key in CompiledTritonKernels._cache:
del CompiledTritonKernels._cache[key]
| CompiledTritonKernels |
python | pytest-dev__pytest | testing/example_scripts/unittest/test_setup_skip.py | {
"start": 180,
"end": 284
} | class ____(unittest.TestCase):
def setUp(self):
assert 0
@unittest.skip("skip all tests")
| Base |
python | scipy__scipy | scipy/stats/tests/test_multivariate.py | {
"start": 145903,
"end": 155040
} | class ____:
@pytest.mark.parametrize(
"x, m, n, expected",
[
# Ground truth value from R dmvhyper
([3, 4], [5, 10], 7, -1.119814),
# test for `n=0`
([3, 4], [5, 10], 0, -np.inf),
# test for `x < 0`
([-3, 4], [5, 10], 7, -np.inf),
# test for `m < 0` (RuntimeWarning issue)
([3, 4], [-5, 10], 7, np.nan),
# test for all `m < 0` and `x.sum() != n`
([[1, 2], [3, 4]], [[-4, -6], [-5, -10]],
[3, 7], [np.nan, np.nan]),
# test for `x < 0` and `m < 0` (RuntimeWarning issue)
([-3, 4], [-5, 10], 1, np.nan),
# test for `x > m`
([1, 11], [10, 1], 12, np.nan),
# test for `m < 0` (RuntimeWarning issue)
([1, 11], [10, -1], 12, np.nan),
# test for `n < 0`
([3, 4], [5, 10], -7, np.nan),
# test for `x.sum() != n`
([3, 3], [5, 10], 7, -np.inf)
]
)
def test_logpmf(self, x, m, n, expected):
vals = multivariate_hypergeom.logpmf(x, m, n)
assert_allclose(vals, expected, rtol=1e-6)
def test_reduces_hypergeom(self):
# test that the multivariate_hypergeom pmf reduces to the
# hypergeom pmf in the 2d case.
val1 = multivariate_hypergeom.pmf(x=[3, 1], m=[10, 5], n=4)
val2 = hypergeom.pmf(k=3, M=15, n=4, N=10)
assert_allclose(val1, val2, rtol=1e-8)
val1 = multivariate_hypergeom.pmf(x=[7, 3], m=[15, 10], n=10)
val2 = hypergeom.pmf(k=7, M=25, n=10, N=15)
assert_allclose(val1, val2, rtol=1e-8)
def test_rvs(self):
# test if `rvs` is unbiased and large sample size converges
# to the true mean.
rv = multivariate_hypergeom(m=[3, 5], n=4)
rvs = rv.rvs(size=1000, random_state=123)
assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
def test_rvs_broadcasting(self):
rv = multivariate_hypergeom(m=[[3, 5], [5, 10]], n=[4, 9])
rvs = rv.rvs(size=(1000, 2), random_state=123)
assert_allclose(rvs.mean(0), rv.mean(), rtol=1e-2)
@pytest.mark.parametrize('m, n', (
([0, 0, 20, 0, 0], 5), ([0, 0, 0, 0, 0], 0),
([0, 0], 0), ([0], 0)
))
def test_rvs_gh16171(self, m, n):
res = multivariate_hypergeom.rvs(m, n)
m = np.asarray(m)
res_ex = m.copy()
res_ex[m != 0] = n
assert_equal(res, res_ex)
@pytest.mark.parametrize(
"x, m, n, expected",
[
([5], [5], 5, 1),
([3, 4], [5, 10], 7, 0.3263403),
# Ground truth value from R dmvhyper
([[[3, 5], [0, 8]], [[-1, 9], [1, 1]]],
[5, 10], [[8, 8], [8, 2]],
[[0.3916084, 0.006993007], [0, 0.4761905]]),
# test with empty arrays.
(np.array([], dtype=int), np.array([], dtype=int), 0, []),
([1, 2], [4, 5], 5, 0),
# Ground truth value from R dmvhyper
([3, 3, 0], [5, 6, 7], 6, 0.01077354)
]
)
def test_pmf(self, x, m, n, expected):
vals = multivariate_hypergeom.pmf(x, m, n)
assert_allclose(vals, expected, rtol=1e-7)
@pytest.mark.parametrize(
"x, m, n, expected",
[
([3, 4], [[5, 10], [10, 15]], 7, [0.3263403, 0.3407531]),
([[1], [2]], [[3], [4]], [1, 3], [1., 0.]),
([[[1], [2]]], [[3], [4]], [1, 3], [[1., 0.]]),
([[1], [2]], [[[[3]]]], [1, 3], [[[1., 0.]]])
]
)
def test_pmf_broadcasting(self, x, m, n, expected):
vals = multivariate_hypergeom.pmf(x, m, n)
assert_allclose(vals, expected, rtol=1e-7)
def test_cov(self):
cov1 = multivariate_hypergeom.cov(m=[3, 7, 10], n=12)
cov2 = [[0.64421053, -0.26526316, -0.37894737],
[-0.26526316, 1.14947368, -0.88421053],
[-0.37894737, -0.88421053, 1.26315789]]
assert_allclose(cov1, cov2, rtol=1e-8)
def test_cov_broadcasting(self):
cov1 = multivariate_hypergeom.cov(m=[[7, 9], [10, 15]], n=[8, 12])
cov2 = [[[1.05, -1.05], [-1.05, 1.05]],
[[1.56, -1.56], [-1.56, 1.56]]]
assert_allclose(cov1, cov2, rtol=1e-8)
cov3 = multivariate_hypergeom.cov(m=[[4], [5]], n=[4, 5])
cov4 = [[[0.]], [[0.]]]
assert_allclose(cov3, cov4, rtol=1e-8)
cov5 = multivariate_hypergeom.cov(m=[7, 9], n=[8, 12])
cov6 = [[[1.05, -1.05], [-1.05, 1.05]],
[[0.7875, -0.7875], [-0.7875, 0.7875]]]
assert_allclose(cov5, cov6, rtol=1e-8)
def test_var(self):
# test with hypergeom
var0 = multivariate_hypergeom.var(m=[10, 5], n=4)
var1 = hypergeom.var(M=15, n=4, N=10)
assert_allclose(var0, var1, rtol=1e-8)
def test_var_broadcasting(self):
var0 = multivariate_hypergeom.var(m=[10, 5], n=[4, 8])
var1 = multivariate_hypergeom.var(m=[10, 5], n=4)
var2 = multivariate_hypergeom.var(m=[10, 5], n=8)
assert_allclose(var0[0], var1, rtol=1e-8)
assert_allclose(var0[1], var2, rtol=1e-8)
var3 = multivariate_hypergeom.var(m=[[10, 5], [10, 14]], n=[4, 8])
var4 = [[0.6984127, 0.6984127], [1.352657, 1.352657]]
assert_allclose(var3, var4, rtol=1e-8)
var5 = multivariate_hypergeom.var(m=[[5], [10]], n=[5, 10])
var6 = [[0.], [0.]]
assert_allclose(var5, var6, rtol=1e-8)
def test_mean(self):
# test with hypergeom
mean0 = multivariate_hypergeom.mean(m=[10, 5], n=4)
mean1 = hypergeom.mean(M=15, n=4, N=10)
assert_allclose(mean0[0], mean1, rtol=1e-8)
mean2 = multivariate_hypergeom.mean(m=[12, 8], n=10)
mean3 = [12.*10./20., 8.*10./20.]
assert_allclose(mean2, mean3, rtol=1e-8)
def test_mean_broadcasting(self):
mean0 = multivariate_hypergeom.mean(m=[[3, 5], [10, 5]], n=[4, 8])
mean1 = [[3.*4./8., 5.*4./8.], [10.*8./15., 5.*8./15.]]
assert_allclose(mean0, mean1, rtol=1e-8)
def test_mean_edge_cases(self):
mean0 = multivariate_hypergeom.mean(m=[0, 0, 0], n=0)
assert_equal(mean0, [0., 0., 0.])
mean1 = multivariate_hypergeom.mean(m=[1, 0, 0], n=2)
assert_equal(mean1, [np.nan, np.nan, np.nan])
mean2 = multivariate_hypergeom.mean(m=[[1, 0, 0], [1, 0, 1]], n=2)
assert_allclose(mean2, [[np.nan, np.nan, np.nan], [1., 0., 1.]],
rtol=1e-17)
mean3 = multivariate_hypergeom.mean(m=np.array([], dtype=int), n=0)
assert_equal(mean3, [])
assert_(mean3.shape == (0, ))
def test_var_edge_cases(self):
var0 = multivariate_hypergeom.var(m=[0, 0, 0], n=0)
assert_allclose(var0, [0., 0., 0.], rtol=1e-16)
var1 = multivariate_hypergeom.var(m=[1, 0, 0], n=2)
assert_equal(var1, [np.nan, np.nan, np.nan])
var2 = multivariate_hypergeom.var(m=[[1, 0, 0], [1, 0, 1]], n=2)
assert_allclose(var2, [[np.nan, np.nan, np.nan], [0., 0., 0.]],
rtol=1e-17)
var3 = multivariate_hypergeom.var(m=np.array([], dtype=int), n=0)
assert_equal(var3, [])
assert_(var3.shape == (0, ))
def test_cov_edge_cases(self):
cov0 = multivariate_hypergeom.cov(m=[1, 0, 0], n=1)
cov1 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
assert_allclose(cov0, cov1, rtol=1e-17)
cov3 = multivariate_hypergeom.cov(m=[0, 0, 0], n=0)
cov4 = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
assert_equal(cov3, cov4)
cov5 = multivariate_hypergeom.cov(m=np.array([], dtype=int), n=0)
cov6 = np.array([], dtype=np.float64).reshape(0, 0)
assert_allclose(cov5, cov6, rtol=1e-17)
assert_(cov5.shape == (0, 0))
def test_frozen(self):
# The frozen distribution should agree with the regular one
n = 12
m = [7, 9, 11, 13]
x = [[0, 0, 0, 12], [0, 0, 1, 11], [0, 1, 1, 10],
[1, 1, 1, 9], [1, 1, 2, 8]]
x = np.asarray(x, dtype=int)
mhg_frozen = multivariate_hypergeom(m, n)
assert_allclose(mhg_frozen.pmf(x),
multivariate_hypergeom.pmf(x, m, n))
assert_allclose(mhg_frozen.logpmf(x),
multivariate_hypergeom.logpmf(x, m, n))
assert_allclose(mhg_frozen.var(), multivariate_hypergeom.var(m, n))
assert_allclose(mhg_frozen.cov(), multivariate_hypergeom.cov(m, n))
def test_invalid_params(self):
assert_raises(ValueError, multivariate_hypergeom.pmf, 5, 10, 5)
assert_raises(ValueError, multivariate_hypergeom.pmf, 5, [10], 5)
assert_raises(ValueError, multivariate_hypergeom.pmf, [5, 4], [10], 5)
assert_raises(TypeError, multivariate_hypergeom.pmf, [5.5, 4.5],
[10, 15], 5)
assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
[10.5, 15.5], 5)
assert_raises(TypeError, multivariate_hypergeom.pmf, [5, 4],
[10, 15], 5.5)
| TestMultivariateHypergeom |
python | kamyu104__LeetCode-Solutions | Python/iterator-for-combination.py | {
"start": 719,
"end": 2480
} | class ____(object):
def __init__(self, characters, combinationLength):
"""
:type characters: str
:type combinationLength: int
"""
self.__characters = characters
self.__combinationLength = combinationLength
self.__it = self.__iterative_backtracking()
self.__curr = None
self.__last = characters[-combinationLength:]
def __iterative_backtracking(self):
def conquer():
if len(curr) == self.__combinationLength:
return curr
def prev_divide(c):
curr.append(c)
def divide(i):
if len(curr) != self.__combinationLength:
for j in reversed(xrange(i, len(self.__characters)-(self.__combinationLength-len(curr)-1))):
stk.append(functools.partial(post_divide))
stk.append(functools.partial(divide, j+1))
stk.append(functools.partial(prev_divide, self.__characters[j]))
stk.append(functools.partial(conquer))
def post_divide():
curr.pop()
curr = []
stk = [functools.partial(divide, 0)]
while stk:
result = stk.pop()()
if result is not None:
yield result
def next(self):
"""
:rtype: str
"""
self.__curr = "".join(next(self.__it))
return self.__curr
def hasNext(self):
"""
:rtype: bool
"""
return self.__curr != self.__last
# Your CombinationIterator object will be instantiated and called as such:
# obj = CombinationIterator(characters, combinationLength)
# param_1 = obj.next()
# param_2 = obj.hasNext()
| CombinationIterator2 |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_datafusion.py | {
"start": 1450,
"end": 4878
} | class ____:
@pytest.mark.parametrize(
("expected_status", "current_status", "sensor_return"),
[
(PipelineStates.COMPLETED, PipelineStates.COMPLETED, True),
(PipelineStates.COMPLETED, PipelineStates.RUNNING, False),
],
)
@mock.patch("airflow.providers.google.cloud.sensors.datafusion.DataFusionHook")
def test_poke(self, mock_hook, expected_status, current_status, sensor_return):
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
task = CloudDataFusionPipelineStateSensor(
task_id="test_task_id",
pipeline_name=PIPELINE_NAME,
pipeline_id=PIPELINE_ID,
project_id=PROJECT_ID,
expected_statuses={expected_status},
instance_name=INSTANCE_NAME,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_pipeline_workflow.return_value = {"status": current_status}
result = task.poke(mock.MagicMock())
assert sensor_return == result
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
@mock.patch("airflow.providers.google.cloud.sensors.datafusion.DataFusionHook")
def test_assertion(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
task = CloudDataFusionPipelineStateSensor(
task_id="test_task_id",
pipeline_name=PIPELINE_NAME,
pipeline_id=PIPELINE_ID,
project_id=PROJECT_ID,
expected_statuses={PipelineStates.COMPLETED},
failure_statuses=FAILURE_STATUSES,
instance_name=INSTANCE_NAME,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_pipeline_workflow.return_value = {"status": "FAILED"}
with pytest.raises(
AirflowException,
match=f"Pipeline with id '{PIPELINE_ID}' state is: FAILED. Terminating sensor...",
):
task.poke(mock.MagicMock())
@mock.patch("airflow.providers.google.cloud.sensors.datafusion.DataFusionHook")
def test_not_found_exception(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {"apiEndpoint": INSTANCE_URL}
mock_hook.return_value.get_pipeline_workflow.side_effect = AirflowNotFoundException()
task = CloudDataFusionPipelineStateSensor(
task_id="test_task_id",
pipeline_name=PIPELINE_NAME,
pipeline_id=PIPELINE_ID,
project_id=PROJECT_ID,
expected_statuses={PipelineStates.COMPLETED},
failure_statuses=FAILURE_STATUSES,
instance_name=INSTANCE_NAME,
location=LOCATION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
with pytest.raises(
AirflowException,
match="Specified Pipeline ID was not found.",
):
task.poke(mock.MagicMock())
| TestCloudDataFusionPipelineStateSensor |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 39090,
"end": 39261
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("TIMESTAMP",)
| SponsorsActivityOrderField |
python | gevent__gevent | src/gevent/tests/test__os.py | {
"start": 5087,
"end": 5963
} | class ____(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def test_waitpid_all(self):
# Cover this specific case.
pid = os.fork_and_watch()
if pid:
os.waitpid(-1, 0)
# Can't assert on what the found pid actually was,
# our testrunner may have spawned multiple children.
os._reap_children(0) # make the leakchecker happy
else: # pragma: no cover
gevent.sleep(2)
# The test framework will catch a regular SystemExit
# from sys.exit(), we need to just kill the process.
os._exit(0)
def test_waitpid_wrong_neg(self):
self.assertRaises(OSError, os.waitpid, -2, 0)
def test_waitpid_wrong_pos(self):
self.assertRaises(OSError, os.waitpid, 1, 0)
if __name__ == '__main__':
greentest.main()
| TestForkAndWatch |
python | pydantic__pydantic | pydantic/functional_validators.py | {
"start": 21249,
"end": 21692
} | class ____(Protocol):
"""A `@model_validator` decorated function signature. This is used when `mode='before'`."""
def __call__( # noqa: D102
self,
# this can be a dict, a model instance
# or anything else that gets passed to validate_python
# thus validators _must_ handle all cases
value: Any,
info: core_schema.ValidationInfo[Any],
/,
) -> Any: ...
| FreeModelBeforeValidator |
python | prabhupant__python-ds | data_structures/binary_trees/print_nodes_at_k_distance_from_root.py | {
"start": 0,
"end": 290
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def print_k(root, k):
if root is None:
return
if k == 0:
print(root.val)
else:
print_k(root.left, k-1)
print_k(root.right, k-1)
| Node |
python | tensorflow__tensorflow | tensorflow/python/framework/dtypes.py | {
"start": 1637,
"end": 1884
} | class ____:
"""Holds resource/variant tensor specific data."""
shape_inference: Optional[
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData
] = None
alias_id: Optional[int] = None
@tf_export("dtypes.DType", "DType")
| HandleData |
python | scikit-learn__scikit-learn | sklearn/metrics/_plot/regression.py | {
"start": 312,
"end": 14706
} | class ____:
"""Visualization of the prediction error of a regression model.
This tool can display "residuals vs predicted" or "actual vs predicted"
using scatter plots to qualitatively assess the behavior of a regressor,
preferably on held-out data points.
See the details in the docstrings of
:func:`~sklearn.metrics.PredictionErrorDisplay.from_estimator` or
:func:`~sklearn.metrics.PredictionErrorDisplay.from_predictions` to
create a visualizer. All parameters are stored as attributes.
For general information regarding `scikit-learn` visualization tools, read
more in the :ref:`Visualization Guide <visualizations>`.
For details regarding interpreting these plots, refer to the
:ref:`Model Evaluation Guide <visualization_regression_evaluation>`.
.. versionadded:: 1.2
Parameters
----------
y_true : ndarray of shape (n_samples,)
True values.
y_pred : ndarray of shape (n_samples,)
Prediction values.
Attributes
----------
line_ : matplotlib Artist
Optimal line representing `y_true == y_pred`. Therefore, it is a
diagonal line for `kind="predictions"` and a horizontal line for
`kind="residuals"`.
errors_lines_ : matplotlib Artist or None
Residual lines. If `with_errors=False`, then it is set to `None`.
scatter_ : matplotlib Artist
Scatter data points.
ax_ : matplotlib Axes
Axes with the different matplotlib axis.
figure_ : matplotlib Figure
Figure containing the scatter and lines.
See Also
--------
PredictionErrorDisplay.from_estimator : Prediction error visualization
given an estimator and some data.
PredictionErrorDisplay.from_predictions : Prediction error visualization
given the true and predicted targets.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import Ridge
>>> from sklearn.metrics import PredictionErrorDisplay
>>> X, y = load_diabetes(return_X_y=True)
>>> ridge = Ridge().fit(X, y)
>>> y_pred = ridge.predict(X)
>>> display = PredictionErrorDisplay(y_true=y, y_pred=y_pred)
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, y_true, y_pred):
self.y_true = y_true
self.y_pred = y_pred
def plot(
self,
ax=None,
*,
kind="residual_vs_predicted",
scatter_kwargs=None,
line_kwargs=None,
):
"""Plot visualization.
Extra keyword arguments will be passed to matplotlib's ``plot``.
Parameters
----------
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
kind : {"actual_vs_predicted", "residual_vs_predicted"}, \
default="residual_vs_predicted"
The type of plot to draw:
- "actual_vs_predicted" draws the observed values (y-axis) vs.
the predicted values (x-axis).
- "residual_vs_predicted" draws the residuals, i.e. difference
between observed and predicted values, (y-axis) vs. the predicted
values (x-axis).
scatter_kwargs : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.scatter`
call.
line_kwargs : dict, default=None
Dictionary with keyword passed to the `matplotlib.pyplot.plot`
call to draw the optimal line.
Returns
-------
display : :class:`~sklearn.metrics.PredictionErrorDisplay`
Object that stores computed values.
"""
check_matplotlib_support(f"{self.__class__.__name__}.plot")
expected_kind = ("actual_vs_predicted", "residual_vs_predicted")
if kind not in expected_kind:
raise ValueError(
f"`kind` must be one of {', '.join(expected_kind)}. "
f"Got {kind!r} instead."
)
import matplotlib.pyplot as plt
if scatter_kwargs is None:
scatter_kwargs = {}
if line_kwargs is None:
line_kwargs = {}
default_scatter_kwargs = {"color": "tab:blue", "alpha": 0.8}
default_line_kwargs = {"color": "black", "alpha": 0.7, "linestyle": "--"}
scatter_kwargs = _validate_style_kwargs(default_scatter_kwargs, scatter_kwargs)
line_kwargs = _validate_style_kwargs(default_line_kwargs, line_kwargs)
scatter_kwargs = {**default_scatter_kwargs, **scatter_kwargs}
line_kwargs = {**default_line_kwargs, **line_kwargs}
if ax is None:
_, ax = plt.subplots()
if kind == "actual_vs_predicted":
max_value = max(np.max(self.y_true), np.max(self.y_pred))
min_value = min(np.min(self.y_true), np.min(self.y_pred))
self.line_ = ax.plot(
[min_value, max_value], [min_value, max_value], **line_kwargs
)[0]
x_data, y_data = self.y_pred, self.y_true
xlabel, ylabel = "Predicted values", "Actual values"
self.scatter_ = ax.scatter(x_data, y_data, **scatter_kwargs)
# force to have a squared axis
ax.set_aspect("equal", adjustable="datalim")
ax.set_xticks(np.linspace(min_value, max_value, num=5))
ax.set_yticks(np.linspace(min_value, max_value, num=5))
else: # kind == "residual_vs_predicted"
self.line_ = ax.plot(
[np.min(self.y_pred), np.max(self.y_pred)],
[0, 0],
**line_kwargs,
)[0]
self.scatter_ = ax.scatter(
self.y_pred, self.y_true - self.y_pred, **scatter_kwargs
)
xlabel, ylabel = "Predicted values", "Residuals (actual - predicted)"
ax.set(xlabel=xlabel, ylabel=ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
kind="residual_vs_predicted",
subsample=1_000,
random_state=None,
ax=None,
scatter_kwargs=None,
line_kwargs=None,
):
"""Plot the prediction error given a regressor and some data.
For general information regarding `scikit-learn` visualization tools,
read more in the :ref:`Visualization Guide <visualizations>`.
For details regarding interpreting these plots, refer to the
:ref:`Model Evaluation Guide <visualization_regression_evaluation>`.
.. versionadded:: 1.2
Parameters
----------
estimator : estimator instance
Fitted regressor or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
kind : {"actual_vs_predicted", "residual_vs_predicted"}, \
default="residual_vs_predicted"
The type of plot to draw:
- "actual_vs_predicted" draws the observed values (y-axis) vs.
the predicted values (x-axis).
- "residual_vs_predicted" draws the residuals, i.e. difference
between observed and predicted values, (y-axis) vs. the predicted
values (x-axis).
subsample : float, int or None, default=1_000
Sampling the samples to be shown on the scatter plot. If `float`,
it should be between 0 and 1 and represents the proportion of the
original dataset. If `int`, it represents the number of samples
display on the scatter plot. If `None`, no subsampling will be
applied. by default, 1000 samples or less will be displayed.
random_state : int or RandomState, default=None
Controls the randomness when `subsample` is not `None`.
See :term:`Glossary <random_state>` for details.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
scatter_kwargs : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.scatter`
call.
line_kwargs : dict, default=None
Dictionary with keyword passed to the `matplotlib.pyplot.plot`
call to draw the optimal line.
Returns
-------
display : :class:`~sklearn.metrics.PredictionErrorDisplay`
Object that stores the computed values.
See Also
--------
PredictionErrorDisplay : Prediction error visualization for regression.
PredictionErrorDisplay.from_predictions : Prediction error visualization
given the true and predicted targets.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import Ridge
>>> from sklearn.metrics import PredictionErrorDisplay
>>> X, y = load_diabetes(return_X_y=True)
>>> ridge = Ridge().fit(X, y)
>>> disp = PredictionErrorDisplay.from_estimator(ridge, X, y)
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
y_pred = estimator.predict(X)
return cls.from_predictions(
y_true=y,
y_pred=y_pred,
kind=kind,
subsample=subsample,
random_state=random_state,
ax=ax,
scatter_kwargs=scatter_kwargs,
line_kwargs=line_kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_pred,
*,
kind="residual_vs_predicted",
subsample=1_000,
random_state=None,
ax=None,
scatter_kwargs=None,
line_kwargs=None,
):
"""Plot the prediction error given the true and predicted targets.
For general information regarding `scikit-learn` visualization tools,
read more in the :ref:`Visualization Guide <visualizations>`.
For details regarding interpreting these plots, refer to the
:ref:`Model Evaluation Guide <visualization_regression_evaluation>`.
.. versionadded:: 1.2
Parameters
----------
y_true : array-like of shape (n_samples,)
True target values.
y_pred : array-like of shape (n_samples,)
Predicted target values.
kind : {"actual_vs_predicted", "residual_vs_predicted"}, \
default="residual_vs_predicted"
The type of plot to draw:
- "actual_vs_predicted" draws the observed values (y-axis) vs.
the predicted values (x-axis).
- "residual_vs_predicted" draws the residuals, i.e. difference
between observed and predicted values, (y-axis) vs. the predicted
values (x-axis).
subsample : float, int or None, default=1_000
Sampling the samples to be shown on the scatter plot. If `float`,
it should be between 0 and 1 and represents the proportion of the
original dataset. If `int`, it represents the number of samples
display on the scatter plot. If `None`, no subsampling will be
applied. by default, 1000 samples or less will be displayed.
random_state : int or RandomState, default=None
Controls the randomness when `subsample` is not `None`.
See :term:`Glossary <random_state>` for details.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
scatter_kwargs : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.scatter`
call.
line_kwargs : dict, default=None
Dictionary with keyword passed to the `matplotlib.pyplot.plot`
call to draw the optimal line.
Returns
-------
display : :class:`~sklearn.metrics.PredictionErrorDisplay`
Object that stores the computed values.
See Also
--------
PredictionErrorDisplay : Prediction error visualization for regression.
PredictionErrorDisplay.from_estimator : Prediction error visualization
given an estimator and some data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import Ridge
>>> from sklearn.metrics import PredictionErrorDisplay
>>> X, y = load_diabetes(return_X_y=True)
>>> ridge = Ridge().fit(X, y)
>>> y_pred = ridge.predict(X)
>>> disp = PredictionErrorDisplay.from_predictions(y_true=y, y_pred=y_pred)
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_predictions")
random_state = check_random_state(random_state)
n_samples = len(y_true)
if isinstance(subsample, numbers.Integral):
if subsample <= 0:
raise ValueError(
f"When an integer, subsample={subsample} should be positive."
)
elif isinstance(subsample, numbers.Real):
if subsample <= 0 or subsample >= 1:
raise ValueError(
f"When a floating-point, subsample={subsample} should"
" be in the (0, 1) range."
)
subsample = int(n_samples * subsample)
if subsample is not None and subsample < n_samples:
indices = random_state.choice(np.arange(n_samples), size=subsample)
y_true = _safe_indexing(y_true, indices, axis=0)
y_pred = _safe_indexing(y_pred, indices, axis=0)
viz = cls(
y_true=y_true,
y_pred=y_pred,
)
return viz.plot(
ax=ax,
kind=kind,
scatter_kwargs=scatter_kwargs,
line_kwargs=line_kwargs,
)
| PredictionErrorDisplay |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/ViewBox.py | {
"start": 845,
"end": 2503
} | class ____(QtWidgets.QGraphicsRectItem):
def __init__(self, *args):
QtWidgets.QGraphicsRectItem.__init__(self, *args)
self.setAcceptHoverEvents(True)
def hoverEnterEvent(self, ev):
self.savedPen = self.pen()
self.setPen(pg.mkPen(255, 255, 255))
ev.ignore()
def hoverLeaveEvent(self, ev):
self.setPen(self.savedPen)
ev.ignore()
def mousePressEvent(self, ev):
if ev.button() == QtCore.Qt.MouseButton.LeftButton:
ev.accept()
self.pressDelta = self.mapToParent(ev.pos()) - self.pos()
else:
ev.ignore()
def mouseMoveEvent(self, ev):
self.setPos(self.mapToParent(ev.pos()) - self.pressDelta)
rect = movableRect(QtCore.QRectF(0, 0, 1, 1))
rect.setPen(pg.mkPen(100, 200, 100))
vb.addItem(rect)
l.addItem(vb, 0, 1)
gv.centralWidget.setLayout(l)
xScale = pg.AxisItem(orientation='bottom', linkView=vb)
l.addItem(xScale, 1, 1)
yScale = pg.AxisItem(orientation='left', linkView=vb)
l.addItem(yScale, 0, 0)
xScale.setLabel(text="<span style='color: #ff0000; font-weight: bold'>X</span> <i>Axis</i>", units="s")
yScale.setLabel('Y Axis', units='V')
def rand(n):
data = np.random.random(n)
data[int(n*0.1):int(n*0.13)] += .5
data[int(n*0.18)] += 2
data[int(n*0.1):int(n*0.13)] *= 5
data[int(n*0.18)] *= 20
return data, np.arange(n, n+len(data)) / float(n)
def updateData():
yd, xd = rand(10000)
p1.setData(y=yd, x=xd)
yd, xd = rand(10000)
updateData()
vb.autoRange()
t = QtCore.QTimer()
t.timeout.connect(updateData)
t.start(50)
if __name__ == '__main__':
pg.exec()
| movableRect |
python | nedbat__coveragepy | tests/test_execfile.py | {
"start": 755,
"end": 6811
} | class ____(CoverageTest):
"""Test cases for `run_python_file`."""
@pytest.fixture(autouse=True)
def clean_up(self) -> Iterator[None]:
"""These tests all run in-process. Clean up global changes."""
yield
sys.excepthook = sys.__excepthook__
def test_run_python_file(self) -> None:
run_python_file([TRY_EXECFILE, "arg1", "arg2"])
mod_globs = json.loads(self.stdout())
# The file should think it is __main__
assert mod_globs["__name__"] == "__main__"
# It should seem to come from a file named try_execfile.py
dunder_file = os.path.basename(mod_globs["__file__"])
assert dunder_file == "try_execfile.py"
# It should have its correct module data.
assert mod_globs["__doc__"].splitlines()[0] == "Test file for run_python_file."
assert mod_globs["DATA"] == "xyzzy"
assert mod_globs["FN_VAL"] == "my_fn('fooey')"
# It must be self-importable as __main__.
assert mod_globs["__main__.DATA"] == "xyzzy"
# Argv should have the proper values.
assert mod_globs["argv0"] == TRY_EXECFILE
assert mod_globs["argv1-n"] == ["arg1", "arg2"]
# __builtins__ should have the right values, like open().
assert mod_globs["__builtins__.has_open"] is True
def test_no_extra_file(self) -> None:
# Make sure that running a file doesn't create an extra compiled file.
self.make_file(
"xxx",
"""\
desc = "a non-.py file!"
""",
)
assert os.listdir(".") == ["xxx"]
run_python_file(["xxx"])
assert os.listdir(".") == ["xxx"]
def test_universal_newlines(self) -> None:
# Make sure we can read any sort of line ending.
pylines = """# try newlines|print('Hello, world!')|""".split("|")
for nl in ["\n", "\r\n", "\r"]:
with open("nl.py", "wb") as fpy:
fpy.write(nl.join(pylines).encode("utf-8"))
run_python_file(["nl.py"])
assert self.stdout() == "Hello, world!\n" * 3
def test_missing_final_newline(self) -> None:
# Make sure we can deal with a Python file with no final newline.
self.make_file(
"abrupt.py",
"""\
if 1:
a = 1
print(f"a is {a!r}")
#""",
)
with open("abrupt.py", encoding="utf-8") as f:
abrupt = f.read()
assert abrupt[-1] == "#"
run_python_file(["abrupt.py"])
assert self.stdout() == "a is 1\n"
def test_no_such_file(self) -> None:
path = python_reported_file("xyzzy.py")
msg = re.escape(f"No file to run: '{path}'")
with pytest.raises(NoSource, match=msg):
run_python_file(["xyzzy.py"])
def test_directory_with_main(self) -> None:
self.make_file(
"with_main/__main__.py",
"""\
print("I am __main__")
""",
)
run_python_file(["with_main"])
assert self.stdout() == "I am __main__\n"
def test_directory_without_main(self) -> None:
self.make_file("without_main/__init__.py", "")
with pytest.raises(NoSource, match="Can't find '__main__' module in 'without_main'"):
run_python_file(["without_main"])
def test_code_throws(self) -> None:
self.make_file(
"throw.py",
"""\
class MyException(Exception):
pass
def f1():
print("about to raise..")
raise MyException("hey!")
def f2():
f1()
f2()
""",
)
with pytest.raises(SystemExit) as exc_info:
run_python_file(["throw.py"])
assert exc_info.value.args == (1,)
assert self.stdout() == "about to raise..\n"
assert self.stderr() == ""
def test_code_exits(self) -> None:
self.make_file(
"exit.py",
"""\
import sys
def f1():
print("about to exit..")
sys.exit(17)
def f2():
f1()
f2()
""",
)
with pytest.raises(SystemExit) as exc_info:
run_python_file(["exit.py"])
assert exc_info.value.args == (17,)
assert self.stdout() == "about to exit..\n"
assert self.stderr() == ""
def test_excepthook_exit(self) -> None:
self.make_file(
"excepthook_exit.py",
"""\
import sys
def excepthook(*args):
print('in excepthook')
sys.exit(0)
sys.excepthook = excepthook
raise RuntimeError('Error Outside')
""",
)
with pytest.raises(SystemExit):
run_python_file(["excepthook_exit.py"])
cov_out = self.stdout()
assert cov_out == "in excepthook\n"
def test_excepthook_throw(self) -> None:
self.make_file(
"excepthook_throw.py",
"""\
import sys
def excepthook(*args):
# Write this message to stderr so that we don't have to deal
# with interleaved stdout/stderr comparisons in the assertions
# in the test.
sys.stderr.write('in excepthook\\n')
raise RuntimeError('Error Inside')
sys.excepthook = excepthook
raise RuntimeError('Error Outside')
""",
)
with pytest.raises(_ExceptionDuringRun) as exc_info:
run_python_file(["excepthook_throw.py"])
# The _ExceptionDuringRun exception has the RuntimeError as its argument.
assert exc_info.value.args[1].args[0] == "Error Outside"
stderr = self.stderr()
assert "in excepthook\n" in stderr
assert "Error in sys.excepthook:\n" in stderr
assert "RuntimeError: Error Inside" in stderr
| RunFileTest |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 426,
"end": 654
} | class ____(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
| BadSearchIndex1 |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/base.py | {
"start": 33342,
"end": 33648
} | class ____:
"""
A simple vertical line with a width of 1.
"""
def __init__(self) -> None:
self.window = Window(
char=Border.VERTICAL, style="class:line,vertical-line", width=1
)
def __pt_container__(self) -> Container:
return self.window
| VerticalLine |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_on_conflict.py | {
"start": 531,
"end": 19318
} | class ____(fixtures.TablesTest):
__only_on__ = ("sqlite >= 3.24.0",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
Table(
"users_w_key",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50), key="name_keyed"),
)
class SpecialType(sqltypes.TypeDecorator):
impl = String
cache_ok = True
def process_bind_param(self, value, dialect):
return value + " processed"
Table(
"bind_targets",
metadata,
Column("id", Integer, primary_key=True),
Column("data", SpecialType()),
)
users_xtra = Table(
"users_xtra",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
Column("login_email", String(50)),
Column("lets_index_this", String(50)),
)
cls.unique_partial_index = schema.Index(
"idx_unique_partial_name",
users_xtra.c.name,
users_xtra.c.lets_index_this,
unique=True,
sqlite_where=users_xtra.c.lets_index_this == "unique_name",
)
cls.unique_constraint = schema.UniqueConstraint(
users_xtra.c.login_email, name="uq_login_email"
)
cls.bogus_index = schema.Index(
"idx_special_ops",
users_xtra.c.lets_index_this,
sqlite_where=users_xtra.c.lets_index_this > "m",
)
def test_bad_args(self):
with expect_raises(ValueError):
insert(self.tables.users).on_conflict_do_update()
def test_on_conflict_do_no_call_twice(self):
users = self.tables.users
for stmt in (
insert(users).on_conflict_do_nothing(),
insert(users).on_conflict_do_update(
index_elements=[users.c.id], set_=dict(name="foo")
),
):
for meth in (
stmt.on_conflict_do_nothing,
stmt.on_conflict_do_update,
):
with testing.expect_raises_message(
exc.InvalidRequestError,
"This Insert construct already has an "
"ON CONFLICT clause established",
):
meth()
def test_on_conflict_do_nothing(self, connection):
users = self.tables.users
conn = connection
result = conn.execute(
insert(users).on_conflict_do_nothing(),
dict(id=1, name="name1"),
)
eq_(result.inserted_primary_key, (1,))
result = conn.execute(
insert(users).on_conflict_do_nothing(),
dict(id=1, name="name2"),
)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name1")],
)
def test_on_conflict_do_nothing_connectionless(self, connection):
users = self.tables.users_xtra
result = connection.execute(
insert(users).on_conflict_do_nothing(
index_elements=["login_email"]
),
dict(name="name1", login_email="email1"),
)
eq_(result.inserted_primary_key, (1,))
result = connection.execute(
insert(users).on_conflict_do_nothing(
index_elements=["login_email"]
),
dict(name="name2", login_email="email1"),
)
eq_(result.inserted_primary_key, (1,))
eq_(
connection.execute(
users.select().where(users.c.id == 1)
).fetchall(),
[(1, "name1", "email1", None)],
)
@testing.provide_metadata
def test_on_conflict_do_nothing_target(self, connection):
users = self.tables.users
conn = connection
result = conn.execute(
insert(users).on_conflict_do_nothing(
index_elements=users.primary_key.columns
),
dict(id=1, name="name1"),
)
eq_(result.inserted_primary_key, (1,))
result = conn.execute(
insert(users).on_conflict_do_nothing(
index_elements=users.primary_key.columns
),
dict(id=1, name="name2"),
)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name1")],
)
@testing.combinations(
("with_dict", True),
("issue_5939", False),
id_="ia",
argnames="with_dict",
)
def test_on_conflict_do_update_one(self, connection, with_dict):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=[users.c.id],
set_=dict(name=i.excluded.name) if with_dict else i.excluded,
)
result = conn.execute(i, dict(id=1, name="name1"))
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name1")],
)
def test_on_conflict_do_update_two(self, connection):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=[users.c.id],
set_=dict(id=i.excluded.id, name=i.excluded.name),
)
result = conn.execute(i, dict(id=1, name="name2"))
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name2")],
)
def test_on_conflict_do_update_three(self, connection):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_=dict(name=i.excluded.name),
)
result = conn.execute(i, dict(id=1, name="name3"))
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name3")],
)
def test_on_conflict_do_update_four(self, connection):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_=dict(id=i.excluded.id, name=i.excluded.name),
).values(id=1, name="name4")
result = conn.execute(i)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name4")],
)
def test_on_conflict_do_update_five(self, connection):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_=dict(id=10, name="I'm a name"),
).values(id=1, name="name4")
result = conn.execute(i)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 10)).fetchall(),
[(10, "I'm a name")],
)
def test_on_conflict_do_update_column_keys(self, connection):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_={users.c.id: 10, users.c.name: "I'm a name"},
).values(id=1, name="name4")
result = conn.execute(i)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 10)).fetchall(),
[(10, "I'm a name")],
)
def test_on_conflict_do_update_clauseelem_keys(self, connection):
users = self.tables.users
class MyElem:
def __init__(self, expr):
self.expr = expr
def __clause_element__(self):
return self.expr
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_={MyElem(users.c.id): 10, MyElem(users.c.name): "I'm a name"},
).values({MyElem(users.c.id): 1, MyElem(users.c.name): "name4"})
result = conn.execute(i)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 10)).fetchall(),
[(10, "I'm a name")],
)
def test_on_conflict_do_update_multivalues(self, connection):
users = self.tables.users
conn = connection
conn.execute(users.insert(), dict(id=1, name="name1"))
conn.execute(users.insert(), dict(id=2, name="name2"))
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_=dict(name="updated"),
where=(i.excluded.name != "name12"),
).values(
[
dict(id=1, name="name11"),
dict(id=2, name="name12"),
dict(id=3, name="name13"),
dict(id=4, name="name14"),
]
)
result = conn.execute(i)
eq_(result.inserted_primary_key, (None,))
eq_(
conn.execute(users.select().order_by(users.c.id)).fetchall(),
[(1, "updated"), (2, "name2"), (3, "name13"), (4, "name14")],
)
def _exotic_targets_fixture(self, conn):
users = self.tables.users_xtra
conn.execute(
insert(users),
dict(
id=1,
name="name1",
login_email="name1@gmail.com",
lets_index_this="not",
),
)
conn.execute(
users.insert(),
dict(
id=2,
name="name2",
login_email="name2@gmail.com",
lets_index_this="not",
),
)
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name1", "name1@gmail.com", "not")],
)
def test_on_conflict_do_update_exotic_targets_two(self, connection):
users = self.tables.users_xtra
conn = connection
self._exotic_targets_fixture(conn)
# try primary key constraint: cause an upsert on unique id column
i = insert(users)
i = i.on_conflict_do_update(
index_elements=users.primary_key.columns,
set_=dict(
name=i.excluded.name, login_email=i.excluded.login_email
),
)
result = conn.execute(
i,
dict(
id=1,
name="name2",
login_email="name1@gmail.com",
lets_index_this="not",
),
)
eq_(result.inserted_primary_key, (1,))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[(1, "name2", "name1@gmail.com", "not")],
)
def test_on_conflict_do_update_exotic_targets_three(self, connection):
users = self.tables.users_xtra
conn = connection
self._exotic_targets_fixture(conn)
# try unique constraint: cause an upsert on target
# login_email, not id
i = insert(users)
i = i.on_conflict_do_update(
index_elements=["login_email"],
set_=dict(
id=i.excluded.id,
name=i.excluded.name,
login_email=i.excluded.login_email,
),
)
# note: lets_index_this value totally ignored in SET clause.
result = conn.execute(
i,
dict(
id=42,
name="nameunique",
login_email="name2@gmail.com",
lets_index_this="unique",
),
)
eq_(result.inserted_primary_key, (42,))
eq_(
conn.execute(
users.select().where(users.c.login_email == "name2@gmail.com")
).fetchall(),
[(42, "nameunique", "name2@gmail.com", "not")],
)
def test_on_conflict_do_update_exotic_targets_four(self, connection):
users = self.tables.users_xtra
conn = connection
self._exotic_targets_fixture(conn)
# try unique constraint by name: cause an
# upsert on target login_email, not id
i = insert(users)
i = i.on_conflict_do_update(
index_elements=["login_email"],
set_=dict(
id=i.excluded.id,
name=i.excluded.name,
login_email=i.excluded.login_email,
),
)
# note: lets_index_this value totally ignored in SET clause.
result = conn.execute(
i,
dict(
id=43,
name="nameunique2",
login_email="name2@gmail.com",
lets_index_this="unique",
),
)
eq_(result.inserted_primary_key, (43,))
eq_(
conn.execute(
users.select().where(users.c.login_email == "name2@gmail.com")
).fetchall(),
[(43, "nameunique2", "name2@gmail.com", "not")],
)
def test_on_conflict_do_update_exotic_targets_four_no_pk(self, connection):
users = self.tables.users_xtra
conn = connection
self._exotic_targets_fixture(conn)
# try unique constraint by name: cause an
# upsert on target login_email, not id
i = insert(users)
i = i.on_conflict_do_update(
index_elements=[users.c.login_email],
set_=dict(
id=i.excluded.id,
name=i.excluded.name,
login_email=i.excluded.login_email,
),
)
conn.execute(i, dict(name="name3", login_email="name1@gmail.com"))
eq_(
conn.execute(users.select().where(users.c.id == 1)).fetchall(),
[],
)
eq_(
conn.execute(users.select().order_by(users.c.id)).fetchall(),
[
(2, "name2", "name2@gmail.com", "not"),
(3, "name3", "name1@gmail.com", "not"),
],
)
def test_on_conflict_do_update_exotic_targets_five(self, connection):
users = self.tables.users_xtra
conn = connection
self._exotic_targets_fixture(conn)
# try bogus index
i = insert(users)
i = i.on_conflict_do_update(
index_elements=self.bogus_index.columns,
index_where=self.bogus_index.dialect_options["sqlite"]["where"],
set_=dict(
name=i.excluded.name, login_email=i.excluded.login_email
),
)
assert_raises(
exc.OperationalError,
conn.execute,
i,
dict(
id=1,
name="namebogus",
login_email="bogus@gmail.com",
lets_index_this="bogus",
),
)
def test_on_conflict_do_update_exotic_targets_six(self, connection):
users = self.tables.users_xtra
conn = connection
conn.execute(
insert(users),
dict(
id=1,
name="name1",
login_email="mail1@gmail.com",
lets_index_this="unique_name",
),
)
i = insert(users)
i = i.on_conflict_do_update(
index_elements=self.unique_partial_index.columns,
index_where=self.unique_partial_index.dialect_options["sqlite"][
"where"
],
set_=dict(
name=i.excluded.name, login_email=i.excluded.login_email
),
)
conn.execute(
i,
[
dict(
name="name1",
login_email="mail2@gmail.com",
lets_index_this="unique_name",
)
],
)
eq_(
conn.execute(users.select()).fetchall(),
[(1, "name1", "mail2@gmail.com", "unique_name")],
)
def test_on_conflict_do_update_no_row_actually_affected(self, connection):
users = self.tables.users_xtra
conn = connection
self._exotic_targets_fixture(conn)
i = insert(users)
i = i.on_conflict_do_update(
index_elements=[users.c.login_email],
set_=dict(name="new_name"),
where=(i.excluded.name == "other_name"),
)
result = conn.execute(
i, dict(name="name2", login_email="name1@gmail.com")
)
# The last inserted primary key should be 2 here
# it is taking the result from the exotic fixture
eq_(result.inserted_primary_key, (2,))
eq_(
conn.execute(users.select()).fetchall(),
[
(1, "name1", "name1@gmail.com", "not"),
(2, "name2", "name2@gmail.com", "not"),
],
)
def test_on_conflict_do_update_special_types_in_set(self, connection):
bind_targets = self.tables.bind_targets
conn = connection
i = insert(bind_targets)
conn.execute(i, {"id": 1, "data": "initial data"})
eq_(
conn.scalar(sql.select(bind_targets.c.data)),
"initial data processed",
)
i = insert(bind_targets)
i = i.on_conflict_do_update(
index_elements=[bind_targets.c.id],
set_=dict(data="new updated data"),
)
conn.execute(i, {"id": 1, "data": "new inserted data"})
eq_(
conn.scalar(sql.select(bind_targets.c.data)),
"new updated data processed",
)
| OnConflictTest |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 21024,
"end": 22412
} | class ____(Request):
"""
Adds a task entry to the queue.
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "add_task"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(AddTaskRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| AddTaskRequest |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 192850,
"end": 196640
} | class ____:
def test_real_no_repeat(self, xp):
p = xp.asarray([-1.0, -0.5, 0.3, 1.2, 10.0])
unique, multiplicity = unique_roots(p)
assert_almost_equal(unique, p, decimal=15)
xp_assert_equal(multiplicity, xp.ones(len(p), dtype=int))
def test_real_repeat(self, xp):
p = xp.asarray([-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
assert_almost_equal(unique, xp.asarray([-1.0, -0.89, 0.5, 1.0]), decimal=15)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 1, 2]))
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
assert_almost_equal(unique, xp.asarray([-0.95, -0.8, 0.5, 1.05]), decimal=15)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 1, 2]))
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
assert_almost_equal(unique, xp.asarray([-0.975, -0.845, 0.5, 1.025]),
decimal=15)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 1, 2]))
def test_complex_no_repeat(self, xp):
p = xp.asarray([-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j])
unique, multiplicity = unique_roots(p)
assert_almost_equal(unique, p, decimal=15)
xp_assert_equal(multiplicity, xp.ones(len(p), dtype=int))
def test_complex_repeat(self, xp):
p = xp.asarray([-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0,
0.5 + 0.5j, 0.45 + 0.55j])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
assert_almost_equal(unique,
xp.asarray([-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j]),
decimal=15)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 1, 2]))
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
assert_almost_equal(
unique,
xp.asarray(
[-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j]
),
decimal=15,
)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 1, 2]))
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
assert_almost_equal(
unique,
xp.asarray([-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j]),
decimal=15,
)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 1, 2]))
def test_gh_4915(self, xp):
p = xp.asarray(np.roots(np.convolve(np.ones(5), np.ones(5))))
true_roots = xp.asarray(
[-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)]
)
unique, multiplicity = unique_roots(p)
unique = xp.sort(unique)
assert_almost_equal(xp.sort(unique), true_roots, decimal=7)
xp_assert_equal(multiplicity, xp.asarray([2, 2, 2, 2]))
def test_complex_roots_extra(self, xp):
unique, multiplicity = unique_roots(xp.asarray([1.0, 1.0j, 1.0]))
assert_almost_equal(unique, xp.asarray([1.0, 1.0j]), decimal=15)
xp_assert_equal(multiplicity, xp.asarray([2, 1]))
unique, multiplicity = unique_roots(
xp.asarray([1, 1 + 2e-9, 1e-9 + 1j]), tol=0.1
)
assert_almost_equal(unique, xp.asarray([1.0, 1e-9 + 1.0j]), decimal=15)
xp_assert_equal(multiplicity, xp.asarray([2, 1]))
def test_single_unique_root(self, xp):
p = xp.asarray(np.random.rand(100) + 1j * np.random.rand(100))
unique, multiplicity = unique_roots(p, 2)
assert_almost_equal(unique, xp.asarray([np.min(p)]), decimal=15)
xp_assert_equal(multiplicity, xp.asarray([100]))
def test_gh_22684():
actual = signal.resample_poly(np.arange(2000, dtype=np.complex64), 6, 4)
assert actual.dtype == np.complex64
| TestUniqueRoots |
python | getsentry__sentry | src/sentry/notifications/validators.py | {
"start": 1108,
"end": 1716
} | class ____(
UserNotificationSettingsOptionsDetailsSerializer
):
value = serializers.CharField()
def validate_value(self, value):
if value not in NOTIFICATION_SETTING_CHOICES:
raise serializers.ValidationError("Invalid value")
return value
def validate(self, data):
try:
enum_type = validate_type(data["type"])
validate_value(enum_type, data["value"])
except ParameterValidationError:
raise serializers.ValidationError("Invalid type for value")
return data
| UserNotificationSettingOptionWithValueSerializer |
python | plotly__plotly.py | plotly/graph_objs/_deprecations.py | {
"start": 7219,
"end": 8091
} | class ____(dict):
"""
plotly.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorY
- plotly.graph_objs.histogram.ErrorY
- etc.
"""
def __init__(self, *args, **kwargs):
"""
plotly.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorY
- plotly.graph_objs.histogram.ErrorY
- etc.
"""
warnings.warn(
"""plotly.graph_objs.ErrorY is deprecated.
Please replace it with one of the following more specific types
- plotly.graph_objs.scatter.ErrorY
- plotly.graph_objs.histogram.ErrorY
- etc.
""",
DeprecationWarning,
)
super().__init__(*args, **kwargs)
| ErrorY |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 45309,
"end": 45547
} | class ____(TestSubsets, TestCase):
left = OrderedSet()
right = OrderedSet()
name = "both empty"
cases = "==", "<=", ">="
# ------------------------------------------------------------------------------
| TestSubsetEqualEmpty |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mysqldb.py | {
"start": 3316,
"end": 3387
} | class ____(MySQLExecutionContext):
pass
| MySQLExecutionContext_mysqldb |
python | Pylons__pyramid | docs/quick_tutorial/databases/tutorial/models.py | {
"start": 378,
"end": 536
} | class ____(Base):
__tablename__ = 'wikipages'
uid = Column(Integer, primary_key=True)
title = Column(Text, unique=True)
body = Column(Text)
| Page |
python | zarr-developers__zarr-python | src/zarr/core/sync.py | {
"start": 6323,
"end": 7319
} | class ____:
def _sync(self, coroutine: Coroutine[Any, Any, T]) -> T:
# TODO: refactor this to to take *args and **kwargs and pass those to the method
# this should allow us to better type the sync wrapper
return sync(
coroutine,
timeout=config.get("async.timeout"),
)
def _sync_iter(self, async_iterator: AsyncIterator[T]) -> list[T]:
async def iter_to_list() -> list[T]:
return [item async for item in async_iterator]
return self._sync(iter_to_list())
async def _with_semaphore(
func: Callable[[], Awaitable[T]], semaphore: asyncio.Semaphore | None = None
) -> T:
"""
Await the result of invoking the no-argument-callable ``func`` within the context manager
provided by a Semaphore, if one is provided. Otherwise, just await the result of invoking
``func``.
"""
if semaphore is None:
return await func()
async with semaphore:
return await func()
| SyncMixin |
python | pytest-dev__pytest-django | tests/test_environment.py | {
"start": 8956,
"end": 13223
} | class ____:
"""Test that Django's code to setup and teardown the databases uses
pytest's verbosity level."""
@pytest.fixture
def pytester(self, django_pytester: DjangoPytester) -> pytest.Pytester:
django_pytester.create_test_module(
"""
import pytest
@pytest.mark.django_db
def test_inner_testrunner():
pass
"""
)
return django_pytester
def test_default(self, pytester: pytest.Pytester) -> None:
"""Not verbose by default."""
result = pytester.runpytest_subprocess("-s")
result.stdout.fnmatch_lines(["tpkg/test_the_test.py .*"])
def test_vq_verbosity_0(self, pytester: pytest.Pytester) -> None:
"""-v and -q results in verbosity 0."""
result = pytester.runpytest_subprocess("-s", "-v", "-q")
result.stdout.fnmatch_lines(["tpkg/test_the_test.py .*"])
def test_verbose_with_v(self, pytester: pytest.Pytester) -> None:
"""Verbose output with '-v'."""
result = pytester.runpytest_subprocess("-s", "-v")
result.stdout.fnmatch_lines_random(["tpkg/test_the_test.py:*", "*PASSED*"])
result.stderr.fnmatch_lines(["*Destroying test database for alias 'default'*"])
def test_more_verbose_with_vv(self, pytester: pytest.Pytester) -> None:
"""More verbose output with '-v -v'."""
result = pytester.runpytest_subprocess("-s", "-v", "-v")
result.stdout.fnmatch_lines_random(
[
"tpkg/test_the_test.py:*",
"*Operations to perform:*",
"*Apply all migrations:*",
"*PASSED*",
]
)
result.stderr.fnmatch_lines(
[
"*Creating test database for alias*",
"*Destroying test database for alias 'default'*",
]
)
def test_more_verbose_with_vv_and_reusedb(self, pytester: pytest.Pytester) -> None:
"""More verbose output with '-v -v', and --create-db."""
result = pytester.runpytest_subprocess("-s", "-v", "-v", "--create-db")
result.stdout.fnmatch_lines(["tpkg/test_the_test.py:*", "*PASSED*"])
result.stderr.fnmatch_lines(["*Creating test database for alias*"])
assert "*Destroying test database for alias 'default' ('*')...*" not in result.stderr.str()
@pytest.mark.django_db
@pytest.mark.parametrize("site_name", ["site1", "site2"])
def test_clear_site_cache(site_name: str, rf, monkeypatch: pytest.MonkeyPatch) -> None:
request = rf.get("/")
monkeypatch.setattr(request, "get_host", lambda: "foo.com")
Site.objects.create(domain="foo.com", name=site_name)
assert Site.objects.get_current(request=request).name == site_name
@pytest.mark.django_db
@pytest.mark.parametrize("site_name", ["site1", "site2"])
def test_clear_site_cache_check_site_cache_size(site_name: str, settings) -> None:
assert len(site_models.SITE_CACHE) == 0
site = Site.objects.create(domain="foo.com", name=site_name)
settings.SITE_ID = site.id
assert Site.objects.get_current() == site
assert len(site_models.SITE_CACHE) == 1
@pytest.mark.django_project(
project_root="django_project_root",
create_manage_py=True,
extra_settings="""
TEST_RUNNER = 'pytest_django.runner.TestRunner'
""",
)
def test_manage_test_runner(django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
import pytest
@pytest.mark.django_db
def test_inner_testrunner():
pass
"""
)
result = django_pytester.run(*[sys.executable, "django_project_root/manage.py", "test"])
assert "1 passed" in "\n".join(result.outlines)
@pytest.mark.django_project(
project_root="django_project_root",
create_manage_py=True,
)
def test_manage_test_runner_without(django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
import pytest
@pytest.mark.django_db
def test_inner_testrunner():
pass
"""
)
result = django_pytester.run(*[sys.executable, "django_project_root/manage.py", "test"])
assert "Found 0 test(s)." in "\n".join(result.outlines)
| TestrunnerVerbosity |
python | Pylons__pyramid | tests/test_paster.py | {
"start": 5906,
"end": 6131
} | class ____:
def __init__(self):
self.registry = dummy_registry
def make_dummyapp(global_conf, **settings):
app = DummyApp()
app.settings = settings
app.global_conf = global_conf
return app
| DummyApp |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datacatalog.py | {
"start": 17201,
"end": 18495
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook")
def test_assert_valid_hook_call(self, mock_hook) -> None:
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogDeleteEntryOperator(
task_id="task_id",
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_entry.assert_called_once_with(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudDataCatalogDeleteEntryOperator |
python | huggingface__transformers | src/transformers/models/deepseek_vl/processing_deepseek_vl.py | {
"start": 1619,
"end": 7355
} | class ____(ProcessorMixin):
r"""
Constructs a DeepseekVL processor which wraps a DeepseekVL Image Processor and a Llama tokenizer into a single processor.
[`DeepseekVLProcessor`] offers all the functionalities of [`DeepseekVLImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~DeepseekVLProcessor.__call__`] and [`~DeepseekVLProcessor.decode`] for more information.
Args:
image_processor ([`DeepseekVLImageProcessor`]):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
num_image_tokens (`int`, *optional*, defaults to 576):
The number of special image tokens used as placeholders for visual content in text sequences.
"""
def __init__(
self,
image_processor,
tokenizer,
chat_template=None,
num_image_tokens=576,
):
self.image_token = tokenizer.image_token
self.num_image_tokens = num_image_tokens
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
images: Optional[ImageInput] = None,
**kwargs: Unpack[DeepseekVLProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
DeepseekVLImageProcessor's [`~DeepseekVLImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
DeepseekVLProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs
)
if text is None and images is None:
raise ValueError("You must specify either text or images.")
if text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
prompt_strings = []
one_img_tokens = self.image_token * self.num_image_tokens
for prompt in text:
prompt = prompt.replace(self.image_token, one_img_tokens)
prompt_strings.append(prompt)
data = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
# process images if pixel_values are provided
if images is not None:
data["pixel_values"] = self.image_processor(images, **output_kwargs["images_kwargs"])["pixel_values"]
return BatchFeature(data=data)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
__all__ = ["DeepseekVLProcessor"]
| DeepseekVLProcessor |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0040_remove_old_jsonfields.py | {
"start": 120,
"end": 538
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0039_migrate_config_data"),
]
operations = [
migrations.RemoveField(
model_name="build",
name="_config",
),
migrations.RenameField(
model_name="build",
old_name="_config_json",
new_name="_config",
),
]
| Migration |
python | marshmallow-code__apispec | src/apispec/core.py | {
"start": 843,
"end": 15372
} | class ____:
"""Stores OpenAPI components
Components are top-level fields in OAS v2.
They became sub-fields of "components" top-level field in OAS v3.
"""
def __init__(
self,
plugins: Sequence[BasePlugin],
openapi_version: Version,
) -> None:
self._plugins = plugins
self.openapi_version = openapi_version
self.schemas: dict[str, dict] = {}
self.responses: dict[str, dict] = {}
self.parameters: dict[str, dict] = {}
self.headers: dict[str, dict] = {}
self.examples: dict[str, dict] = {}
self.security_schemes: dict[str, dict] = {}
self.schemas_lazy: dict[str, dict] = {}
self.responses_lazy: dict[str, dict] = {}
self.parameters_lazy: dict[str, dict] = {}
self.headers_lazy: dict[str, dict] = {}
self.examples_lazy: dict[str, dict] = {}
self._subsections = {
"schema": self.schemas,
"response": self.responses,
"parameter": self.parameters,
"header": self.headers,
"example": self.examples,
"security_scheme": self.security_schemes,
}
self._subsections_lazy = {
"schema": self.schemas_lazy,
"response": self.responses_lazy,
"parameter": self.parameters_lazy,
"header": self.headers_lazy,
"example": self.examples_lazy,
}
def to_dict(self) -> dict[str, dict]:
return {
COMPONENT_SUBSECTIONS[self.openapi_version.major][k]: v
for k, v in self._subsections.items()
if v != {}
}
def _register_component(
self,
obj_type: str,
component_id: str,
component: dict,
*,
lazy: bool = False,
) -> None:
subsection = (self._subsections if lazy is False else self._subsections_lazy)[
obj_type
]
subsection[component_id] = component
def _do_register_lazy_component(
self,
obj_type: str,
component_id: str,
) -> None:
component_buffer = self._subsections_lazy[obj_type]
# If component was lazy registered, register it for real
if component_id in component_buffer:
self._subsections[obj_type][component_id] = component_buffer.pop(
component_id
)
def get_ref(
self,
obj_type: str,
obj_or_component_id: dict | str,
) -> dict:
"""Return object or reference
If obj is a dict, it is assumed to be a complete description and it is returned as is.
Otherwise, it is assumed to be a reference name as string and the corresponding $ref
string is returned.
:param str subsection: "schema", "parameter", "response" or "security_scheme"
:param dict|str obj: object in dict form or as ref_id string
"""
if isinstance(obj_or_component_id, dict):
return obj_or_component_id
# Register the component if it was lazy registered
self._do_register_lazy_component(obj_type, obj_or_component_id)
return build_reference(
obj_type, self.openapi_version.major, obj_or_component_id
)
def schema(
self,
component_id: str,
component: dict | None = None,
*,
lazy: bool = False,
**kwargs: typing.Any,
) -> Components:
"""Add a new schema to the spec.
:param str component_id: identifier by which schema may be referenced
:param dict component: schema definition
:param bool lazy: register component only when referenced in the spec
:param kwargs: plugin-specific arguments
.. note::
If you are using `apispec.ext.marshmallow`, you can pass fields' metadata as
additional keyword arguments.
For example, to add ``enum`` and ``description`` to your field: ::
status = fields.String(
required=True,
metadata={
"description": "Status (open or closed)",
"enum": ["open", "closed"],
},
)
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject
"""
if component_id in self.schemas:
raise DuplicateComponentNameError(
f'Another schema with name "{component_id}" is already registered.'
)
ret = deepcopy(component) or {}
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.schema_helper(component_id, ret, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._resolve_refs_in_schema(ret)
self._register_component("schema", component_id, ret, lazy=lazy)
return self
def response(
self,
component_id: str,
component: dict | None = None,
*,
lazy: bool = False,
**kwargs: typing.Any,
) -> Components:
"""Add a response which can be referenced.
:param str component_id: ref_id to use as reference
:param dict component: response fields
:param bool lazy: register component only when referenced in the spec
:param kwargs: plugin-specific arguments
"""
if component_id in self.responses:
raise DuplicateComponentNameError(
f'Another response with name "{component_id}" is already registered.'
)
ret = deepcopy(component) or {}
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.response_helper(ret, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._resolve_refs_in_response(ret)
self._register_component("response", component_id, ret, lazy=lazy)
return self
def parameter(
self,
component_id: str,
location: str,
component: dict | None = None,
*,
lazy: bool = False,
**kwargs: typing.Any,
) -> Components:
"""Add a parameter which can be referenced.
:param str component_id: identifier by which parameter may be referenced
:param str location: location of the parameter
:param dict component: parameter fields
:param bool lazy: register component only when referenced in the spec
:param kwargs: plugin-specific arguments
"""
if component_id in self.parameters:
raise DuplicateComponentNameError(
f'Another parameter with name "{component_id}" is already registered.'
)
ret = deepcopy(component) or {}
ret.setdefault("name", component_id)
ret["in"] = location
# if "in" is set to "path", enforce required flag to True
if location == "path":
ret["required"] = True
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.parameter_helper(ret, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._resolve_refs_in_parameter_or_header(ret)
self._register_component("parameter", component_id, ret, lazy=lazy)
return self
def header(
self,
component_id: str,
component: dict,
*,
lazy: bool = False,
**kwargs: typing.Any,
) -> Components:
"""Add a header which can be referenced.
:param str component_id: identifier by which header may be referenced
:param dict component: header fields
:param bool lazy: register component only when referenced in the spec
:param kwargs: plugin-specific arguments
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#headerObject
"""
ret = deepcopy(component) or {}
if component_id in self.headers:
raise DuplicateComponentNameError(
f'Another header with name "{component_id}" is already registered.'
)
# Execute all helpers from plugins
for plugin in self._plugins:
try:
ret.update(plugin.header_helper(ret, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._resolve_refs_in_parameter_or_header(ret)
self._register_component("header", component_id, ret, lazy=lazy)
return self
def example(
self, component_id: str, component: dict, *, lazy: bool = False
) -> Components:
"""Add an example which can be referenced
:param str component_id: identifier by which example may be referenced
:param dict component: example fields
:param bool lazy: register component only when referenced in the spec
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.1.md#exampleObject
"""
if component_id in self.examples:
raise DuplicateComponentNameError(
f'Another example with name "{component_id}" is already registered.'
)
self._register_component("example", component_id, component, lazy=lazy)
return self
def security_scheme(self, component_id: str, component: dict) -> Components:
"""Add a security scheme which can be referenced.
:param str component_id: component_id to use as reference
:param dict component: security scheme fields
"""
if component_id in self.security_schemes:
raise DuplicateComponentNameError(
f'Another security scheme with name "{component_id}" is already registered.'
)
self._register_component("security_scheme", component_id, component)
return self
def _resolve_schema(self, obj) -> None:
"""Replace schema reference as string with a $ref if needed
Also resolve references in the schema
"""
if "schema" in obj:
obj["schema"] = self.get_ref("schema", obj["schema"])
self._resolve_refs_in_schema(obj["schema"])
def _resolve_examples(self, obj) -> None:
"""Replace example reference as string with a $ref"""
for name, example in obj.get("examples", {}).items():
obj["examples"][name] = self.get_ref("example", example)
def _resolve_refs_in_schema(self, schema: dict) -> None:
if "properties" in schema:
for key in schema["properties"]:
schema["properties"][key] = self.get_ref(
"schema", schema["properties"][key]
)
self._resolve_refs_in_schema(schema["properties"][key])
if "items" in schema:
schema["items"] = self.get_ref("schema", schema["items"])
self._resolve_refs_in_schema(schema["items"])
for key in ("allOf", "oneOf", "anyOf"):
if key in schema:
schema[key] = [self.get_ref("schema", s) for s in schema[key]]
for sch in schema[key]:
self._resolve_refs_in_schema(sch)
if "not" in schema:
schema["not"] = self.get_ref("schema", schema["not"])
self._resolve_refs_in_schema(schema["not"])
def _resolve_refs_in_parameter_or_header(self, parameter_or_header) -> None:
self._resolve_schema(parameter_or_header)
self._resolve_examples(parameter_or_header)
# parameter content is OpenAPI v3+
for media_type in parameter_or_header.get("content", {}).values():
self._resolve_schema(media_type)
def _resolve_refs_in_request_body(self, request_body) -> None:
# requestBody is OpenAPI v3+
for media_type in request_body["content"].values():
self._resolve_schema(media_type)
self._resolve_examples(media_type)
def _resolve_refs_in_response(self, response) -> None:
if self.openapi_version.major < 3:
self._resolve_schema(response)
else:
for media_type in response.get("content", {}).values():
self._resolve_schema(media_type)
self._resolve_examples(media_type)
for name, header in response.get("headers", {}).items():
response["headers"][name] = self.get_ref("header", header)
self._resolve_refs_in_parameter_or_header(response["headers"][name])
# TODO: Resolve link refs when Components supports links
def _resolve_refs_in_operation(self, operation) -> None:
if "parameters" in operation:
parameters = []
for parameter in operation["parameters"]:
parameter = self.get_ref("parameter", parameter)
self._resolve_refs_in_parameter_or_header(parameter)
parameters.append(parameter)
operation["parameters"] = parameters
if "callbacks" in operation:
for callback in operation["callbacks"].values():
if isinstance(callback, dict):
for path in callback.values():
self.resolve_refs_in_path(path)
if "requestBody" in operation:
self._resolve_refs_in_request_body(operation["requestBody"])
if "responses" in operation:
responses = {}
for code, response in operation["responses"].items():
response = self.get_ref("response", response)
self._resolve_refs_in_response(response)
responses[code] = response
operation["responses"] = responses
def resolve_refs_in_path(self, path) -> None:
if "parameters" in path:
parameters = []
for parameter in path["parameters"]:
parameter = self.get_ref("parameter", parameter)
self._resolve_refs_in_parameter_or_header(parameter)
parameters.append(parameter)
path["parameters"] = parameters
for method in (
"get",
"put",
"post",
"delete",
"options",
"head",
"patch",
"trace",
):
if method in path:
self._resolve_refs_in_operation(path[method])
| Components |
python | pypa__warehouse | tests/unit/organizations/test_tasks.py | {
"start": 1154,
"end": 3496
} | class ____:
def test_update_invitation_status(
self, db_request, user_service, organization_service
):
organization = OrganizationFactory.create()
organization.record_event = pretend.call_recorder(lambda *a, **kw: None)
user = UserFactory.create()
user.record_event = pretend.call_recorder(lambda *a, **kw: None)
invite = OrganizationInvitationFactory(user=user, organization=organization)
token_service = pretend.stub(loads=pretend.raiser(TokenExpired))
db_request.find_service = pretend.call_recorder(lambda *a, **kw: token_service)
update_organization_invitation_status(db_request)
assert db_request.find_service.calls == [
pretend.call(ITokenService, name="email")
]
assert invite.invite_status == OrganizationInvitationStatus.Expired
assert user.record_event.calls == [
pretend.call(
tag=EventTag.Account.OrganizationRoleExpireInvite,
request=db_request,
additional={"organization_name": invite.organization.name},
)
]
assert organization.record_event.calls == [
pretend.call(
tag=EventTag.Organization.OrganizationRoleExpireInvite,
request=db_request,
additional={"target_user_id": str(invite.user.id)},
)
]
def test_no_updates(self, db_request, user_service, organization_service):
organization = OrganizationFactory.create()
organization.record_event = pretend.call_recorder(lambda *a, **kw: None)
user = UserFactory.create()
user.record_event = pretend.call_recorder(lambda *a, **kw: None)
invite = OrganizationInvitationFactory(user=user, organization=organization)
token_service = pretend.stub(loads=lambda token: {})
db_request.find_service = pretend.call_recorder(lambda *a, **kw: token_service)
update_organization_invitation_status(db_request)
assert db_request.find_service.calls == [
pretend.call(ITokenService, name="email")
]
assert invite.invite_status == OrganizationInvitationStatus.Pending
assert user.record_event.calls == []
assert organization.record_event.calls == []
| TestUpdateInvitationStatus |
python | mwaskom__seaborn | tests/test_base.py | {
"start": 49072,
"end": 54572
} | class ____:
def test_unique_dashes(self):
n = 24
dashes = unique_dashes(n)
assert len(dashes) == n
assert len(set(dashes)) == n
assert dashes[0] == ""
for spec in dashes[1:]:
assert isinstance(spec, tuple)
assert not len(spec) % 2
def test_unique_markers(self):
n = 24
markers = unique_markers(n)
assert len(markers) == n
assert len(set(markers)) == n
for m in markers:
assert mpl.markers.MarkerStyle(m).is_filled()
def test_variable_type(self):
s = pd.Series([1., 2., 3.])
assert variable_type(s) == "numeric"
assert variable_type(s.astype(int)) == "numeric"
assert variable_type(s.astype(object)) == "numeric"
assert variable_type(s.to_numpy()) == "numeric"
assert variable_type(s.to_list()) == "numeric"
s = pd.Series([1, 2, 3, np.nan], dtype=object)
assert variable_type(s) == "numeric"
s = pd.Series([np.nan, np.nan])
assert variable_type(s) == "numeric"
s = pd.Series([pd.NA, pd.NA])
assert variable_type(s) == "numeric"
s = pd.Series([1, 2, pd.NA], dtype="Int64")
assert variable_type(s) == "numeric"
s = pd.Series(["1", "2", "3"])
assert variable_type(s) == "categorical"
assert variable_type(s.to_numpy()) == "categorical"
assert variable_type(s.to_list()) == "categorical"
# This should arguably be datmetime, but we don't currently handle it correctly
# Test is mainly asserting that this doesn't fail on the boolean check.
s = pd.timedelta_range(1, periods=3, freq="D").to_series()
assert variable_type(s) == "categorical"
s = pd.Series([True, False, False])
assert variable_type(s) == "numeric"
assert variable_type(s, boolean_type="categorical") == "categorical"
s_cat = s.astype("category")
assert variable_type(s_cat, boolean_type="categorical") == "categorical"
assert variable_type(s_cat, boolean_type="numeric") == "categorical"
s = pd.Series([pd.Timestamp(1), pd.Timestamp(2)])
assert variable_type(s) == "datetime"
assert variable_type(s.astype(object)) == "datetime"
assert variable_type(s.to_numpy()) == "datetime"
assert variable_type(s.to_list()) == "datetime"
def test_infer_orient(self):
nums = pd.Series(np.arange(6))
cats = pd.Series(["a", "b"] * 3)
dates = pd.date_range("1999-09-22", "2006-05-14", 6)
assert infer_orient(cats, nums) == "x"
assert infer_orient(nums, cats) == "y"
assert infer_orient(cats, dates, require_numeric=False) == "x"
assert infer_orient(dates, cats, require_numeric=False) == "y"
assert infer_orient(nums, None) == "y"
with pytest.warns(UserWarning, match="Vertical .+ `x`"):
assert infer_orient(nums, None, "v") == "y"
assert infer_orient(None, nums) == "x"
with pytest.warns(UserWarning, match="Horizontal .+ `y`"):
assert infer_orient(None, nums, "h") == "x"
infer_orient(cats, None, require_numeric=False) == "y"
with pytest.raises(TypeError, match="Horizontal .+ `x`"):
infer_orient(cats, None)
infer_orient(cats, None, require_numeric=False) == "x"
with pytest.raises(TypeError, match="Vertical .+ `y`"):
infer_orient(None, cats)
assert infer_orient(nums, nums, "vert") == "x"
assert infer_orient(nums, nums, "hori") == "y"
assert infer_orient(cats, cats, "h", require_numeric=False) == "y"
assert infer_orient(cats, cats, "v", require_numeric=False) == "x"
assert infer_orient(cats, cats, require_numeric=False) == "x"
with pytest.raises(TypeError, match="Vertical .+ `y`"):
infer_orient(cats, cats, "x")
with pytest.raises(TypeError, match="Horizontal .+ `x`"):
infer_orient(cats, cats, "y")
with pytest.raises(TypeError, match="Neither"):
infer_orient(cats, cats)
with pytest.raises(ValueError, match="`orient` must start with"):
infer_orient(cats, nums, orient="bad value")
def test_categorical_order(self):
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = categorical_order(x)
assert out == ["a", "c", "b", "d"]
out = categorical_order(x, order)
assert out == order
out = categorical_order(x, ["b", "a"])
assert out == ["b", "a"]
out = categorical_order(np.array(x))
assert out == ["a", "c", "b", "d"]
out = categorical_order(pd.Series(x))
assert out == ["a", "c", "b", "d"]
out = categorical_order(y)
assert out == [1, 2, 3, 4, 5]
out = categorical_order(np.array(y))
assert out == [1, 2, 3, 4, 5]
out = categorical_order(pd.Series(y))
assert out == [1, 2, 3, 4, 5]
x = pd.Categorical(x, order)
out = categorical_order(x)
assert out == list(x.categories)
x = pd.Series(x)
out = categorical_order(x)
assert out == list(x.cat.categories)
out = categorical_order(x, ["b", "a"])
assert out == ["b", "a"]
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = categorical_order(x)
assert out == ["a", "c", "b", "d"]
| TestCoreFunc |
python | pytorch__pytorch | torch/nested/_internal/nested_tensor.py | {
"start": 1556,
"end": 14741
} | class ____(torch.Tensor):
_values: torch.Tensor # type: ignore[assignment]
_offsets: torch.Tensor
_lengths: Optional[torch.Tensor]
# NOTE [ Nested ints for ragged sizes and strides ]
#
# Jagged layout tensors are tensors that represent a n-dim tensor with a
# ragged dimension, but are backed by an (n-1)-dim tensor underneath, e.g.,
# a jagged tensor with outer shape [B, x, D] is represented internally by a
# tensor with shape [sum(x), D] where we introduce what we call a nested int
# denoted as "x" here (but sometimes denoted with "*" to
# represent the ragged dimension, and sum(x) represents the dim of the inner
# tensor or equivalently the sum of all the sizes of the constituent
# tensors' varying lengths.
#
# We also use nested ints to represent the strides of this tensor.
# For example, a jagged tensor with shape [B, x, D] can be strided in two
# ways: [xD, D, 1] and [x, 1, sum(x)], where xD represents x multiplied by D
_size: tuple[int, ...]
_strides: tuple[int, ...]
# Indicates that the nth dimension is ragged
_ragged_idx: int
_metadata_cache: Dict[str, Any]
@staticmethod
def __new__(
cls,
values,
offsets,
*,
lengths=None,
**kwargs,
):
ks = DispatchKeySet(DispatchKey.NestedTensor)
ks = ks.add(DispatchKey.AutogradNestedTensor)
# Only support jagged for now.
assert offsets is not None
assert offsets.ndim == 1
assert not isinstance(values, NestedTensor)
assert values.device == offsets.device
# Query cache for the symint associated with offsets or lengths
# (create a new one if needed).
ragged_source = offsets if lengths is None else lengths
ragged_size = get_tensor_symint(ragged_source, coeff=1)
_ragged_idx = kwargs.get("_ragged_idx", 1)
B = offsets.shape[0] - 1
if lengths is not None:
assert B == lengths.shape[0]
# subtract 1 to convert to values dim space
r = _ragged_idx - 1
_size = (B, *values.shape[:r], ragged_size, *values.shape[r + 1 :])
stride = values.stride()
_strides = (ragged_size * stride[r], *stride)
r = torch.Tensor._make_wrapper_subclass(
cls,
_size,
_strides,
0,
torch.contiguous_format,
values.dtype,
torch.jagged,
values.device,
False,
kwargs.get("requires_grad", False),
"sizes",
False,
True, # dispatch_layout
ks,
# don't try to calculate storage based on non-zero size
storage_size=values.untyped_storage().size(),
)
r._ragged_idx = _ragged_idx
r._size = _size
r._strides = _strides
return r
def __init__(self, values, offsets, *, lengths=None, **kwargs) -> None:
super().__init__()
self._values = values
self._offsets = offsets
self._lengths = lengths
# holds properties that are computed lazily
self._metadata_cache = kwargs.get("_metadata_cache") or {}
# collapsed ragged dim must always be dynamic
torch._dynamo.maybe_mark_dynamic(self, self._ragged_idx)
torch._dynamo.maybe_mark_dynamic(self._values, self._ragged_idx - 1)
# min / max sequence length should be dynamic if present
max_seqlen_tensor = self._metadata_cache.get("max_seqlen", None)
if max_seqlen_tensor is not None:
torch._dynamo.mark_dynamic(max_seqlen_tensor, 0)
min_seqlen_tensor = self._metadata_cache.get("min_seqlen", None)
if min_seqlen_tensor is not None:
torch._dynamo.mark_dynamic(min_seqlen_tensor, 0)
def values(self):
# dispatch to get proper view relationship
return torch._nested_get_values(self) # type: ignore[attr-defined]
def offsets(self):
return self._offsets
def lengths(self):
return self._lengths
# Private accessor functions for min / max sequence length. They're
# purposefully not @properties because those don't work with PT2 (yet).
# These compute / cache if not present.
# TODO: Revisit this when @properties are better supported by PT2. I think the ideal
# state would be to have public @properties for min / max sequence length that compile
# (including setters).
def _get_max_seqlen(self):
max_seqlen_tensor = self._max_seqlen_tensor
if max_seqlen_tensor is None:
# compute & cache
max_val = _get_sdpa_extreme_seqlen(
torch.max,
self._offsets.diff() if self._lengths is None else self._lengths,
)
max_seqlen_tensor = _store_val_in_tensor(max_val)
self._metadata_cache["max_seqlen"] = max_seqlen_tensor
return _load_val_from_tensor(max_seqlen_tensor)
def _get_min_seqlen(self):
min_seqlen_tensor = self._min_seqlen_tensor
if min_seqlen_tensor is None:
# compute & cache
min_val = _get_sdpa_extreme_seqlen(
torch.min,
self._offsets.diff() if self._lengths is None else self._lengths,
)
min_seqlen_tensor = _store_val_in_tensor(min_val)
self._metadata_cache["min_seqlen"] = min_seqlen_tensor
return _load_val_from_tensor(min_seqlen_tensor)
# Private accessors used for treating min / max seqlen as inner tensors for
# flatten / unflatten. These must be properties to work with the traceable wrapper
# subclass logic. These do not compute / cache if not present.
@property
def _max_seqlen_tensor(self) -> Optional[torch.Tensor]:
return self._metadata_cache.get("max_seqlen", None)
@_max_seqlen_tensor.setter
def _max_seqlen_tensor(self, val: Optional[torch.Tensor]) -> None:
self._metadata_cache["max_seqlen"] = val
@property
def _min_seqlen_tensor(self) -> Optional[torch.Tensor]:
return self._metadata_cache.get("min_seqlen", None)
@_min_seqlen_tensor.setter
def _min_seqlen_tensor(self, val: Optional[torch.Tensor]) -> None:
self._metadata_cache["min_seqlen"] = val
# These are old private @property accessors that are kept around for internal BC
# reasons. TODO: Remove these!
@property
def _max_seqlen(self):
return self._get_max_seqlen()
@property
def _min_seqlen(self):
return self._get_min_seqlen()
# Convenience accessors that return a min / max seqlen if one is present and do NOT
# compute / cache them if they're not.
@property
def _maybe_max_seqlen(self) -> Optional[int]:
mt = self._max_seqlen_tensor
return None if mt is None else _load_val_from_tensor(mt)
@property
def _maybe_min_seqlen(self) -> Optional[int]:
mt = self._min_seqlen_tensor
return None if mt is None else _load_val_from_tensor(mt)
def _is_contiguous_or_false(self):
if self.lengths() is not None:
return False
from torch._prims_common import is_contiguous_for_memory_format_or_false
return is_contiguous_for_memory_format_or_false(
self._values, memory_format=torch.contiguous_format
)
def __repr__(self) -> str: # type: ignore[override]
# We should implement this in torch/_tensor_str.py instead
grad_fn_str = (
f", requires_grad={self.requires_grad}" if self.requires_grad else ""
)
if self.grad_fn:
grad_fn_str = f", grad_fn={self.grad_fn}"
return f"NestedTensor(size={self._size}, offsets={self._offsets}{grad_fn_str}, contiguous={self._is_contiguous_or_false()})"
# TODO: Remove this in favor of the default tensor subclass serialization logic.
# We don't do this today because of https://github.com/pytorch/pytorch/issues/125622.
def __reduce_ex__(self, proto):
state = torch._utils._get_obj_state(self)
# Cached PyCapsules for sizes / strides are not serializable.
# See Note [Tensor Subclass custom size/stride caching strategy]
self._clear_non_serializable_cached_data()
# SymNodes are not serializable
assert "_size" in state and "_strides" in state
state = dict(state)
del state["_size"]
del state["_strides"]
func = _rebuild_njt
constructor_kwargs = {
"values": self._values,
"offsets": self._offsets,
"lengths": self._lengths,
"_ragged_idx": self._ragged_idx,
"_metadata_cache": self._metadata_cache,
"requires_grad": self.requires_grad,
}
args = (constructor_kwargs,)
return (torch._tensor._rebuild_from_type_v2, (func, type(self), args, state))
def __tensor_flatten__(self):
ctx = {
"requires_grad": self.requires_grad,
"ragged_idx": self._ragged_idx,
}
inner_tensors = ["_values", "_offsets"]
if self._lengths is not None:
inner_tensors.append("_lengths")
if self._min_seqlen_tensor is not None:
inner_tensors.append("_min_seqlen_tensor")
if self._max_seqlen_tensor is not None:
inner_tensors.append("_max_seqlen_tensor")
return inner_tensors, ctx
@staticmethod
def __tensor_unflatten__(inner_tensors: Dict, meta, outer_size, outer_stride):
from torch._subclasses.fake_tensor import FakeTensor
# inner tensors: _values, _offsets, [_lengths], [_min_seqlen], [_max_seqlen]
assert len(inner_tensors) >= 2 and len(inner_tensors) <= 5
values = inner_tensors["_values"]
offsets = inner_tensors["_offsets"]
lengths = inner_tensors.get("_lengths", None)
min_seqlen_tensor = inner_tensors.get("_min_seqlen_tensor", None)
max_seqlen_tensor = inner_tensors.get("_max_seqlen_tensor", None)
metadata_cache = {}
if min_seqlen_tensor is not None:
metadata_cache["min_seqlen"] = min_seqlen_tensor
if max_seqlen_tensor is not None:
metadata_cache["max_seqlen"] = max_seqlen_tensor
ragged_idx = meta["ragged_idx"]
# Alternatively, we could make it the caller's responsibility to
# cache it. But this heuristic seems simple enough.
ragged_source = offsets if lengths is None else lengths
if isinstance(ragged_source, FakeTensor):
ragged_size = outer_size[ragged_idx]
ragged_source.nested_int_memo = ragged_size
return NestedTensor(
values,
offsets=offsets,
lengths=lengths,
requires_grad=meta["requires_grad"],
_ragged_idx=ragged_idx,
_metadata_cache=metadata_cache,
)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None): # type: ignore[override]
# If you're wondering why there's a nested tensor with one of its
# size = -1, see note: [NJT outer_size in AOTDispatcher]
kwargs = {} if kwargs is None else kwargs
# Lazy import to avoid circular dependency
from .ops import lookup_jagged
fn = lookup_jagged(func, *args, **kwargs)
if fn is not None:
return fn(*args, **kwargs)
# Poor man's redispatch for composite ops. This becomes relevant under inference
# mode, where disabling autograd key dispatch prevents decomposition.
all_dks = (
# We want to handle both the cases where NestedTensor overrides the
# composite implicit autograd kernel, and the case where it doesn't.
# Prioritize calling into NestedTensor's kernel if it exists.
torch._C.DispatchKey.CompositeImplicitAutogradNestedTensor,
torch._C.DispatchKey.CompositeImplicitAutograd,
)
for dk in all_dks:
if torch._C._dispatch_has_kernel_for_dispatch_key(func.name(), dk):
with torch.overrides.enable_reentrant_dispatch():
return func._op_dk(dk, *args, **kwargs)
raise NotImplementedError(func)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
from torch.fx.experimental.proxy_tensor import maybe_enable_thunkify
from .ops import jagged_torch_function
# This should be removed after
# https://github.com/pytorch/pytorch/pull/125941/ lands
with maybe_enable_thunkify():
try:
return jagged_torch_function(func, *args, **kwargs)
except NotImplementedError:
pass
with torch._C.DisableTorchFunctionSubclass():
return func(*args, **kwargs)
# NB: These fake view autograd.Functions are superseded by real view ops. Don't use them!
# TODO: Remove ViewBufferFromNested, ViewNestedFromBuffer, and buffer_from_jagged once the
# internal BC period has passed.
# Not actually a view!
| NestedTensor |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/models/secrets.py | {
"start": 2644,
"end": 2891
} | class ____(SecretStore):
def _fetch_secret(self, name: str) -> str:
try:
return os.environ[name]
except KeyError:
raise SecretNotFoundError(f"The environment variable {name} is not set.")
| EnvVarSecretStore |
python | coleifer__peewee | tests/models.py | {
"start": 162717,
"end": 164925
} | class ____(ModelTestCase):
requires = [Task]
def setUp(self):
super(TestMultiSelfJoin, self).setUp()
with self.database.atomic():
p_dev = Task.create(title='dev', type=Task.PROJECT)
p_p = Task.create(title='peewee', project=p_dev, type=Task.PROJECT)
p_h = Task.create(title='huey', project=p_dev, type=Task.PROJECT)
heading_data = (
('peewee-1', p_p, 2),
('peewee-2', p_p, 0),
('huey-1', p_h, 1),
('huey-2', p_h, 1))
for title, proj, n_subtasks in heading_data:
t = Task.create(title=title, project=proj, type=Task.HEADING)
for i in range(n_subtasks):
Task.create(title='%s-%s' % (title, i + 1), project=proj,
heading=t, type=Task.HEADING)
def test_multi_self_join(self):
Project = Task.alias()
Heading = Task.alias()
query = (Task
.select(Task, Project, Heading)
.join(Heading, JOIN.LEFT_OUTER,
on=(Task.heading == Heading.id).alias('heading'))
.switch(Task)
.join(Project, JOIN.LEFT_OUTER,
on=(Task.project == Project.id).alias('project'))
.order_by(Task.id))
with self.assertQueryCount(1):
accum = []
for task in query:
h_title = task.heading.title if task.heading else None
p_title = task.project.title if task.project else None
accum.append((task.title, h_title, p_title))
self.assertEqual(accum, [
# title - heading - project
('dev', None, None),
('peewee', None, 'dev'),
('huey', None, 'dev'),
('peewee-1', None, 'peewee'),
('peewee-1-1', 'peewee-1', 'peewee'),
('peewee-1-2', 'peewee-1', 'peewee'),
('peewee-2', None, 'peewee'),
('huey-1', None, 'huey'),
('huey-1-1', 'huey-1', 'huey'),
('huey-2', None, 'huey'),
('huey-2-1', 'huey-2', 'huey'),
])
| TestMultiSelfJoin |
python | arrow-py__arrow | arrow/locales.py | {
"start": 143130,
"end": 144679
} | class ____(Locale):
names = ["hy", "hy-am"]
past = "{0} առաջ"
future = "{0}ից"
and_word = "Եվ" # Yev
timeframes = {
"now": "հիմա",
"second": "վայրկյան",
"seconds": "{0} վայրկյան",
"minute": "րոպե",
"minutes": "{0} րոպե",
"hour": "ժամ",
"hours": "{0} ժամ",
"day": "օր",
"days": "{0} օր",
"month": "ամիս",
"months": "{0} ամիս",
"year": "տարին",
"years": "{0} տարին",
"week": "շաբաթ",
"weeks": "{0} շաբաթ",
}
meridians = {
"am": "Ամ",
"pm": "պ.մ.",
"AM": "Ամ",
"PM": "պ.մ.",
}
month_names = [
"",
"հունվար",
"փետրվար",
"մարտ",
"ապրիլ",
"մայիս",
"հունիս",
"հուլիս",
"օգոստոս",
"սեպտեմբեր",
"հոկտեմբեր",
"նոյեմբեր",
"դեկտեմբեր",
]
month_abbreviations = [
"",
"հունվար",
"փետրվար",
"մարտ",
"ապրիլ",
"մայիս",
"հունիս",
"հուլիս",
"օգոստոս",
"սեպտեմբեր",
"հոկտեմբեր",
"նոյեմբեր",
"դեկտեմբեր",
]
day_names = [
"",
"երկուշաբթի",
"երեքշաբթի",
"չորեքշաբթի",
"հինգշաբթի",
"ուրբաթ",
"շաբաթ",
"կիրակի",
]
day_abbreviations = [
"",
"երկ.",
"երեք.",
"չորեք.",
"հինգ.",
"ուրբ.",
"շաբ.",
"կիր.",
]
| ArmenianLocale |
python | python-pillow__Pillow | src/PIL/ImImagePlugin.py | {
"start": 2992,
"end": 11567
} | class ____(ImageFile.ImageFile):
format = "IM"
format_description = "IFUNC Image Memory"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# Quick rejection: if there's not an LF among the first
# 100 bytes, this is (probably) not a text header.
if b"\n" not in self.fp.read(100):
msg = "not an IM file"
raise SyntaxError(msg)
self.fp.seek(0)
n = 0
# Default values
self.info[MODE] = "L"
self.info[SIZE] = (512, 512)
self.info[FRAMES] = 1
self.rawmode = "L"
while True:
s = self.fp.read(1)
# Some versions of IFUNC uses \n\r instead of \r\n...
if s == b"\r":
continue
if not s or s == b"\0" or s == b"\x1a":
break
# FIXME: this may read whole file if not a text file
s = s + self.fp.readline()
if len(s) > 100:
msg = "not an IM file"
raise SyntaxError(msg)
if s.endswith(b"\r\n"):
s = s[:-2]
elif s.endswith(b"\n"):
s = s[:-1]
try:
m = split.match(s)
except re.error as e:
msg = "not an IM file"
raise SyntaxError(msg) from e
if m:
k, v = m.group(1, 2)
# Don't know if this is the correct encoding,
# but a decent guess (I guess)
k = k.decode("latin-1", "replace")
v = v.decode("latin-1", "replace")
# Convert value as appropriate
if k in [FRAMES, SCALE, SIZE]:
v = v.replace("*", ",")
v = tuple(map(number, v.split(",")))
if len(v) == 1:
v = v[0]
elif k == MODE and v in OPEN:
v, self.rawmode = OPEN[v]
# Add to dictionary. Note that COMMENT tags are
# combined into a list of strings.
if k == COMMENT:
if k in self.info:
self.info[k].append(v)
else:
self.info[k] = [v]
else:
self.info[k] = v
if k in TAGS:
n += 1
else:
msg = f"Syntax error in IM header: {s.decode('ascii', 'replace')}"
raise SyntaxError(msg)
if not n:
msg = "Not an IM file"
raise SyntaxError(msg)
# Basic attributes
self._size = self.info[SIZE]
self._mode = self.info[MODE]
# Skip forward to start of image data
while s and not s.startswith(b"\x1a"):
s = self.fp.read(1)
if not s:
msg = "File truncated"
raise SyntaxError(msg)
if LUT in self.info:
# convert lookup table to palette or lut attribute
palette = self.fp.read(768)
greyscale = 1 # greyscale palette
linear = 1 # linear greyscale palette
for i in range(256):
if palette[i] == palette[i + 256] == palette[i + 512]:
if palette[i] != i:
linear = 0
else:
greyscale = 0
if self.mode in ["L", "LA", "P", "PA"]:
if greyscale:
if not linear:
self.lut = list(palette[:256])
else:
if self.mode in ["L", "P"]:
self._mode = self.rawmode = "P"
elif self.mode in ["LA", "PA"]:
self._mode = "PA"
self.rawmode = "PA;L"
self.palette = ImagePalette.raw("RGB;L", palette)
elif self.mode == "RGB":
if not greyscale or not linear:
self.lut = list(palette)
self.frame = 0
self.__offset = offs = self.fp.tell()
self._fp = self.fp # FIXME: hack
if self.rawmode.startswith("F;"):
# ifunc95 formats
try:
# use bit decoder (if necessary)
bits = int(self.rawmode[2:])
if bits not in [8, 16, 32]:
self.tile = [
ImageFile._Tile(
"bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1)
)
]
return
except ValueError:
pass
if self.rawmode in ["RGB;T", "RYB;T"]:
# Old LabEye/3PC files. Would be very surprised if anyone
# ever stumbled upon such a file ;-)
size = self.size[0] * self.size[1]
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offs, ("G", 0, -1)),
ImageFile._Tile("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)),
ImageFile._Tile(
"raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)
),
]
else:
# LabEye/IFUNC files
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))
]
@property
def n_frames(self) -> int:
return self.info[FRAMES]
@property
def is_animated(self) -> bool:
return self.info[FRAMES] > 1
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.frame = frame
if self.mode == "1":
bits = 1
else:
bits = 8 * len(self.mode)
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
offs = self.__offset + frame * size
self.fp = self._fp
self.tile = [
ImageFile._Tile("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))
]
def tell(self) -> int:
return self.frame
#
# --------------------------------------------------------------------
# Save IM files
SAVE = {
# mode: (im type, raw mode)
"1": ("0 1", "1"),
"L": ("Greyscale", "L"),
"LA": ("LA", "LA;L"),
"P": ("Greyscale", "P"),
"PA": ("LA", "PA;L"),
"I": ("L 32S", "I;32S"),
"I;16": ("L 16", "I;16"),
"I;16L": ("L 16L", "I;16L"),
"I;16B": ("L 16B", "I;16B"),
"F": ("L 32F", "F;32F"),
"RGB": ("RGB", "RGB;L"),
"RGBA": ("RGBA", "RGBA;L"),
"RGBX": ("RGBX", "RGBX;L"),
"CMYK": ("CMYK", "CMYK;L"),
"YCbCr": ("YCC", "YCbCr;L"),
}
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
try:
image_type, rawmode = SAVE[im.mode]
except KeyError as e:
msg = f"Cannot save {im.mode} images as IM"
raise ValueError(msg) from e
frames = im.encoderinfo.get("frames", 1)
fp.write(f"Image type: {image_type} image\r\n".encode("ascii"))
if filename:
# Each line must be 100 characters or less,
# or: SyntaxError("not an IM file")
# 8 characters are used for "Name: " and "\r\n"
# Keep just the filename, ditch the potentially overlong path
if isinstance(filename, bytes):
filename = filename.decode("ascii")
name, ext = os.path.splitext(os.path.basename(filename))
name = "".join([name[: 92 - len(ext)], ext])
fp.write(f"Name: {name}\r\n".encode("ascii"))
fp.write(f"Image size (x*y): {im.size[0]}*{im.size[1]}\r\n".encode("ascii"))
fp.write(f"File size (no of images): {frames}\r\n".encode("ascii"))
if im.mode in ["P", "PA"]:
fp.write(b"Lut: 1\r\n")
fp.write(b"\000" * (511 - fp.tell()) + b"\032")
if im.mode in ["P", "PA"]:
im_palette = im.im.getpalette("RGB", "RGB;L")
colors = len(im_palette) // 3
palette = b""
for i in range(3):
palette += im_palette[colors * i : colors * (i + 1)]
palette += b"\x00" * (256 - colors)
fp.write(palette) # 768 bytes
ImageFile._save(
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))]
)
#
# --------------------------------------------------------------------
# Registry
Image.register_open(ImImageFile.format, ImImageFile)
Image.register_save(ImImageFile.format, _save)
Image.register_extension(ImImageFile.format, ".im")
| ImImageFile |
python | scikit-learn__scikit-learn | sklearn/ensemble/_base.py | {
"start": 6026,
"end": 10604
} | class ____(
MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta
):
"""Base class for heterogeneous ensemble of learners.
Parameters
----------
estimators : list of (str, estimator) tuples
The ensemble of estimators to use in the ensemble. Each element of the
list is defined as a tuple of string (i.e. name of the estimator) and
an estimator instance. An estimator can be set to `'drop'` using
`set_params`.
Attributes
----------
estimators_ : list of estimators
The elements of the estimators parameter, having been fitted on the
training data. If an estimator has been set to `'drop'`, it will not
appear in `estimators_`.
"""
@property
def named_estimators(self):
"""Dictionary to access any fitted sub-estimators by name.
Returns
-------
:class:`~sklearn.utils.Bunch`
"""
return Bunch(**dict(self.estimators))
@abstractmethod
def __init__(self, estimators):
self.estimators = estimators
def _validate_estimators(self):
if len(self.estimators) == 0 or not all(
isinstance(item, (tuple, list)) and isinstance(item[0], str)
for item in self.estimators
):
raise ValueError(
"Invalid 'estimators' attribute, 'estimators' should be a "
"non-empty list of (string, estimator) tuples."
)
names, estimators = zip(*self.estimators)
# defined by MetaEstimatorMixin
self._validate_names(names)
has_estimator = any(est != "drop" for est in estimators)
if not has_estimator:
raise ValueError(
"All estimators are dropped. At least one is required "
"to be an estimator."
)
is_estimator_type = is_classifier if is_classifier(self) else is_regressor
for est in estimators:
if est != "drop" and not is_estimator_type(est):
raise ValueError(
"The estimator {} should be a {}.".format(
est.__class__.__name__, is_estimator_type.__name__[3:]
)
)
return names, estimators
def set_params(self, **params):
"""
Set the parameters of an estimator from the ensemble.
Valid parameter keys can be listed with `get_params()`. Note that you
can directly set the parameters of the estimators contained in
`estimators`.
Parameters
----------
**params : keyword arguments
Specific parameters using e.g.
`set_params(parameter_name=new_value)`. In addition, to setting the
parameters of the estimator, the individual estimator of the
estimators can also be set, or can be removed by setting them to
'drop'.
Returns
-------
self : object
Estimator instance.
"""
super()._set_params("estimators", **params)
return self
def get_params(self, deep=True):
"""
Get the parameters of an estimator from the ensemble.
Returns the parameters given in the constructor as well as the
estimators contained within the `estimators` parameter.
Parameters
----------
deep : bool, default=True
Setting it to True gets the various estimators and the parameters
of the estimators as well.
Returns
-------
params : dict
Parameter and estimator names mapped to their values or parameter
names mapped to their values.
"""
return super()._get_params("estimators", deep=deep)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
try:
tags.input_tags.allow_nan = all(
get_tags(est[1]).input_tags.allow_nan if est[1] != "drop" else True
for est in self.estimators
)
tags.input_tags.sparse = all(
get_tags(est[1]).input_tags.sparse if est[1] != "drop" else True
for est in self.estimators
)
except Exception:
# If `estimators` does not comply with our API (list of tuples) then it will
# fail. In this case, we assume that `allow_nan` and `sparse` are False but
# the parameter validation will raise an error during `fit`.
pass # pragma: no cover
return tags
| _BaseHeterogeneousEnsemble |
python | spack__spack | lib/spack/spack/llnl/util/filesystem.py | {
"start": 35003,
"end": 46504
} | class ____(RuntimeError):
def __init__(self, inner_exception, outer_exception):
self.inner_exception = inner_exception
self.outer_exception = outer_exception
@contextmanager
@system_path_filter
def replace_directory_transaction(directory_name):
"""Temporarily renames a directory in the same parent dir. If the operations
executed within the context manager don't raise an exception, the renamed directory
is deleted. If there is an exception, the move is undone.
Args:
directory_name (path): absolute path of the directory name
Returns:
temporary directory where ``directory_name`` has been moved
"""
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
directory_name = os.path.abspath(directory_name)
assert os.path.isdir(directory_name), "Not a directory: " + directory_name
# Note: directory_name is normalized here, meaning the trailing slash is dropped,
# so dirname is the directory's parent not the directory itself.
tmpdir = tempfile.mkdtemp(dir=os.path.dirname(directory_name), prefix=".backup")
# We have to jump through hoops to support Windows, since
# os.rename(directory_name, tmpdir) errors there.
backup_dir = os.path.join(tmpdir, "backup")
os.rename(directory_name, backup_dir)
tty.debug("Directory moved [src={0}, dest={1}]".format(directory_name, backup_dir))
try:
yield backup_dir
except (Exception, KeyboardInterrupt, SystemExit) as inner_exception:
# Try to recover the original directory, if this fails, raise a
# composite exception.
try:
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
os.rename(backup_dir, directory_name)
except Exception as outer_exception:
raise CouldNotRestoreDirectoryBackup(inner_exception, outer_exception)
tty.debug("Directory recovered [{0}]".format(directory_name))
raise
else:
# Otherwise delete the temporary directory
shutil.rmtree(tmpdir, ignore_errors=True)
tty.debug("Temporary directory deleted [{0}]".format(tmpdir))
@system_path_filter
def hash_directory(directory, ignore=[]):
"""Hashes recursively the content of a directory.
Args:
directory (path): path to a directory to be hashed
Returns:
hash of the directory content
"""
assert os.path.isdir(directory), '"directory" must be a directory!'
md5_hash = hashlib.md5()
# Adapted from https://stackoverflow.com/a/3431835/771663
for root, dirs, files in os.walk(directory):
for name in sorted(files):
filename = os.path.join(root, name)
if filename not in ignore:
# TODO: if caching big files becomes an issue, convert this to
# TODO: read in chunks. Currently it's used only for testing
# TODO: purposes.
with open(filename, "rb") as f:
md5_hash.update(f.read())
return md5_hash.hexdigest()
@contextmanager
@system_path_filter
def write_tmp_and_move(filename: str, *, encoding: Optional[str] = None):
"""Write to a temporary file, then move into place."""
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
tmp = os.path.join(dirname, ".%s.tmp" % basename)
with open(tmp, "w", encoding=encoding) as f:
yield f
shutil.move(tmp, filename)
@system_path_filter
def touch(path):
"""Creates an empty file at the specified path."""
if sys.platform == "win32":
perms = os.O_WRONLY | os.O_CREAT
else:
perms = os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY
fd = None
try:
fd = os.open(path, perms)
os.utime(path, None)
finally:
if fd is not None:
os.close(fd)
@system_path_filter
def touchp(path):
"""Like ``touch``, but creates any parent directories needed for the file."""
mkdirp(os.path.dirname(path))
touch(path)
@system_path_filter
def force_symlink(src: str, dest: str) -> None:
"""Create a symlink at ``dest`` pointing to ``src``. Similar to ``ln -sf``."""
try:
symlink(src, dest)
except OSError:
os.remove(dest)
symlink(src, dest)
@system_path_filter
def join_path(prefix, *args) -> str:
"""Alias for :func:`os.path.join`"""
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
@system_path_filter
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
@system_path_filter
def get_single_file(directory):
fnames = os.listdir(directory)
if len(fnames) != 1:
raise ValueError("Expected exactly 1 file, got {0}".format(str(len(fnames))))
return fnames[0]
@system_path_filter
def windows_sfn(path: os.PathLike):
"""Returns 8.3 Filename (SFN) representation of
path
8.3 Filenames (SFN or short filename) is a file
naming convention used prior to Win95 that Windows
still (and will continue to) support. This convention
caps filenames at 8 characters, and most importantly
does not allow for spaces in addition to other specifications.
The scheme is generally the same as a normal Windows
file scheme, but all spaces are removed and the filename
is capped at 6 characters. The remaining characters are
replaced with ~N where N is the number file in a directory
that a given file represents i.e. Program Files and Program Files (x86)
would be PROGRA~1 and PROGRA~2 respectively.
Further, all file/directory names are all caps (although modern Windows
is case insensitive in practice).
Conversion is accomplished by fileapi.h GetShortPathNameW
Returns paths in 8.3 Filename form
Note: this method is a no-op on Linux
Args:
path: Path to be transformed into SFN (8.3 filename) format
"""
# This should not be run-able on linux/macos
if sys.platform != "win32":
return path
path = str(path)
import ctypes
k32 = ctypes.WinDLL("kernel32", use_last_error=True)
# Method with null values returns size of short path name
sz = k32.GetShortPathNameW(path, None, 0)
# stub Windows types TCHAR[LENGTH]
TCHAR_arr = ctypes.c_wchar * sz
ret_str = TCHAR_arr()
k32.GetShortPathNameW(path, ctypes.byref(ret_str), sz)
return ret_str.value
@contextmanager
def temp_cwd():
tmp_dir = tempfile.mkdtemp()
try:
with working_dir(tmp_dir):
yield tmp_dir
finally:
kwargs = {}
if sys.platform == "win32":
kwargs["ignore_errors"] = False
kwargs["onerror"] = readonly_file_handler(ignore_errors=True)
shutil.rmtree(tmp_dir, **kwargs)
@system_path_filter
def can_access(file_name):
"""True if the current process has read and write access to the file."""
return os.access(file_name, os.R_OK | os.W_OK)
@system_path_filter
def traverse_tree(
source_root: str,
dest_root: str,
rel_path: str = "",
*,
order: str = "pre",
ignore: Optional[Callable[[str], bool]] = None,
follow_nonexisting: bool = True,
follow_links: bool = False,
):
"""Traverse two filesystem trees simultaneously.
Walks the LinkTree directory in pre or post order. Yields each
file in the source directory with a matching path from the dest
directory, along with whether the file is a directory.
e.g., for this tree::
root/
a/
file1
file2
b/
file3
When called on dest, this yields::
("root", "dest")
("root/a", "dest/a")
("root/a/file1", "dest/a/file1")
("root/a/file2", "dest/a/file2")
("root/b", "dest/b")
("root/b/file3", "dest/b/file3")
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are ``"pre"`` and ``"post"``
ignore (typing.Callable): function indicating which files to ignore. This will also
ignore symlinks if they point to an ignored file (regardless of whether the symlink
is explicitly ignored); note this only supports one layer of indirection (i.e. if
you have x -> y -> z, and z is ignored but x/y are not, then y would be ignored
but not x). To avoid this, make sure the ignore function also ignores the symlink
paths too.
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
"""
if order not in ("pre", "post"):
raise ValueError("Order must be 'pre' or 'post'.")
# List of relative paths to ignore under the src root.
ignore = ignore or (lambda filename: False)
# Don't descend into ignored directories
if ignore(rel_path):
return
source_path = os.path.join(source_root, rel_path)
dest_path = os.path.join(dest_root, rel_path)
# preorder yields directories before children
if order == "pre":
yield (source_path, dest_path)
for f in os.listdir(source_path):
source_child = os.path.join(source_path, f)
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# If the source path is a link and the link's source is ignored, then ignore the link too,
# but only do this if the ignore is defined.
if ignore is not None:
if islink(source_child) and not follow_links:
target = readlink(source_child)
all_parents = accumulate(target.split(os.sep), lambda x, y: os.path.join(x, y))
if any(map(ignore, all_parents)):
tty.warn(
f"Skipping {source_path} because the source or a part of the source's "
f"path is included in the ignores."
)
continue
# Treat as a directory
# TODO: for symlinks, os.path.isdir looks for the link target. If the
# target is relative to the link, then that may not resolve properly
# relative to our cwd - see resolve_link_target_relative_to_the_link
if os.path.isdir(source_child) and (follow_links or not islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
tuples = traverse_tree(
source_root,
dest_root,
rel_child,
order=order,
ignore=ignore,
follow_nonexisting=follow_nonexisting,
follow_links=follow_links,
)
for t in tuples:
yield t
# Treat as a file.
elif not ignore(os.path.join(rel_path, f)):
yield (source_child, dest_child)
if order == "post":
yield (source_path, dest_path)
| CouldNotRestoreDirectoryBackup |
python | vyperlang__vyper | vyper/codegen/external_call.py | {
"start": 702,
"end": 9385
} | class ____:
value: IRnode
gas: IRnode
skip_contract_check: bool
default_return_value: IRnode
def _pack_arguments(fn_type, args, context):
# abi encoding just treats all args as a big tuple
args_tuple_t = TupleT([x.typ for x in args])
args_as_tuple = IRnode.from_list(["multi"] + [x for x in args], typ=args_tuple_t)
args_abi_t = args_tuple_t.abi_type
# sanity typecheck - make sure the arguments can be assigned
dst_tuple_t = TupleT(fn_type.argument_types[: len(args)])
check_assign(dummy_node_for_type(dst_tuple_t), args_as_tuple)
if fn_type.return_type is not None:
return_abi_t = calculate_type_for_external_return(fn_type.return_type).abi_type
# we use the same buffer for args and returndata,
# so allocate enough space here for the returndata too.
buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound())
else:
buflen = args_abi_t.size_bound()
buflen += 32 # padding for the method id
buf_t = get_type_for_exact_size(buflen)
buf = context.new_internal_variable(buf_t)
args_ofst = add_ofst(buf, 28)
args_len = args_abi_t.size_bound() + 4
abi_signature = fn_type.name + dst_tuple_t.abi_type.selector_name()
# layout:
# 32 bytes | args
# 0x..00<method_id_4bytes> | args
# the reason for the left padding is just so the alignment is easier.
# XXX: we could align to buf (and also keep code size small) by using
# (mstore buf (shl signature.method_id 224))
pack_args = ["seq"]
pack_args.append(["mstore", buf, util.method_id_int(abi_signature)])
if len(args) != 0:
encode_buf = add_ofst(buf, 32)
encode_buflen = buflen - 32
pack_args.append(abi_encode(encode_buf, args_as_tuple, context, bufsz=encode_buflen))
return buf, pack_args, args_ofst, args_len
def _unpack_returndata(buf, fn_type, call_kwargs, contract_address, context, expr):
return_t = fn_type.return_type
if return_t is None:
return ["pass"], 0, 0
wrapped_return_t = calculate_type_for_external_return(return_t)
abi_return_t = wrapped_return_t.abi_type
min_return_size = abi_return_t.static_size()
max_return_size = abi_return_t.size_bound()
assert 0 < min_return_size <= max_return_size
ret_ofst = buf
ret_len = max_return_size
encoding = Encoding.ABI
assert buf.location == MEMORY
buf = copy.copy(buf)
buf.typ = wrapped_return_t
buf.encoding = encoding
buf.annotation = f"{expr.node_source_code} returndata buffer"
unpacker = ["seq"]
assert isinstance(wrapped_return_t, TupleT)
# unpack strictly
if not needs_clamp(wrapped_return_t, encoding):
# revert when returndatasize is not in bounds
# NOTE: there is an optimization here: when needs_clamp is True,
# make_setter (implicitly) checks returndatasize during abi
# decoding.
# since make_setter is not called in this branch, we need to check
# returndatasize here, but we avoid a redundant check by only doing
# the returndatasize check inside of this branch (and not in the
# `needs_clamp==True` branch).
# in the future, this check could be moved outside of the branch, and
# instead rely on the optimizer to optimize out the redundant check,
# it would need the optimizer to do algebraic reductions (along the
# lines of `a>b and b>c and a>c` reduced to `a>b and b>c`).
# another thing we could do instead once we have the machinery is to
# simply always use make_setter instead of having this assertion, and
# rely on memory analyser to optimize out the memory movement.
assertion = IRnode.from_list(
["assert", ["ge", "returndatasize", min_return_size]],
error_msg="returndatasize too small",
)
unpacker.append(assertion)
return_buf = buf
else:
return_buf = context.new_internal_variable(wrapped_return_t)
# note: make_setter does ABI decoding and clamps
payload_bound = IRnode.from_list(
["select", ["lt", ret_len, "returndatasize"], ret_len, "returndatasize"]
)
with payload_bound.cache_when_complex("payload_bound") as (b1, payload_bound):
unpacker.append(
b1.resolve(make_setter(return_buf, buf, hi=add_ofst(buf, payload_bound)))
)
if call_kwargs.default_return_value is not None:
# if returndatasize == 0:
# copy return override to buf
# else:
# do the other stuff
override_value = wrap_value_for_external_return(call_kwargs.default_return_value)
stomp_return_buffer = ["seq"]
if not call_kwargs.skip_contract_check:
stomp_return_buffer.append(_extcodesize_check(contract_address))
stomp_return_buffer.append(make_setter(return_buf, override_value))
unpacker = ["if", ["eq", "returndatasize", 0], stomp_return_buffer, unpacker]
unpacker = ["seq", unpacker, return_buf]
return unpacker, ret_ofst, ret_len
def _parse_kwargs(call_expr, context):
from vyper.codegen.expr import Expr # TODO rethink this circular import
def _bool(x):
assert x.value in (0, 1), "type checker missed this"
return bool(x.value)
# note: codegen for kwarg values in AST order
call_kwargs = {kw.arg: Expr(kw.value, context).ir_node for kw in call_expr.keywords}
ret = _CallKwargs(
value=unwrap_location(call_kwargs.pop("value", IRnode(0))),
gas=unwrap_location(call_kwargs.pop("gas", IRnode("gas"))),
skip_contract_check=_bool(call_kwargs.pop("skip_contract_check", IRnode(0))),
default_return_value=call_kwargs.pop("default_return_value", None),
)
if len(call_kwargs) != 0: # pragma: nocover
raise TypeCheckFailure(f"Unexpected keyword arguments: {call_kwargs}")
return ret
def _extcodesize_check(address):
return IRnode.from_list(["assert", ["extcodesize", address]], error_msg="extcodesize is zero")
def _external_call_helper(contract_address, args_ir, call_kwargs, call_expr, context):
fn_type = call_expr.func._metadata["type"]
# sanity check
assert fn_type.n_positional_args <= len(args_ir) <= fn_type.n_total_args
ret = ["seq"]
# this is a sanity check to prevent double evaluation of the external call
# in the codegen pipeline. if the external call gets doubly evaluated,
# a duplicate label exception will get thrown during assembly.
ret.append(eval_once_check(_freshname(call_expr.node_source_code)))
buf, arg_packer, args_ofst, args_len = _pack_arguments(fn_type, args_ir, context)
ret_unpacker, ret_ofst, ret_len = _unpack_returndata(
buf, fn_type, call_kwargs, contract_address, context, call_expr
)
ret += arg_packer
if fn_type.return_type is None and not call_kwargs.skip_contract_check:
# if we do not expect return data, check that a contract exists at the
# target address. we must perform this check BEFORE the call because
# the contract might selfdestruct. on the other hand we can omit this
# when we _do_ expect return data because we later check
# `returndatasize` (that check works even if the contract
# selfdestructs).
ret.append(_extcodesize_check(contract_address))
gas = call_kwargs.gas
value = call_kwargs.value
use_staticcall = fn_type.mutability in (StateMutability.VIEW, StateMutability.PURE)
if context.is_constant():
assert use_staticcall, "typechecker missed this"
if use_staticcall:
call_op = ["staticcall", gas, contract_address, args_ofst, args_len, buf, ret_len]
else:
call_op = ["call", gas, contract_address, value, args_ofst, args_len, buf, ret_len]
ret.append(check_external_call(call_op))
return_t = fn_type.return_type
if return_t is not None:
ret.append(ret_unpacker)
return IRnode.from_list(ret, typ=return_t, location=MEMORY)
def ir_for_external_call(call_expr, context):
from vyper.codegen.expr import Expr # TODO rethink this circular import
contract_address = Expr.parse_value_expr(call_expr.func.value, context)
assert isinstance(contract_address.typ, InterfaceT)
args_ir = [Expr(x, context).ir_node for x in call_expr.args]
call_kwargs = _parse_kwargs(call_expr, context)
with contract_address.cache_when_complex("external_contract") as (b1, contract_address):
return b1.resolve(
_external_call_helper(contract_address, args_ir, call_kwargs, call_expr, context)
)
| _CallKwargs |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 961,
"end": 1059
} | class ____:
SIMPLE = 'simple'
IF = 'if'
IF_ELSE = 'if_else'
ELSE = 'else'
| FormatKind |
python | coleifer__peewee | tests/fields.py | {
"start": 27728,
"end": 27819
} | class ____(TestModel):
key = BareField()
value = BareField(adapt=int, null=True)
| Bare |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/azure_fileshare_to_gcs.py | {
"start": 1534,
"end": 8248
} | class ____(BaseOperator):
"""
Sync an Azure FileShare directory with a Google Cloud Storage destination path.
Does not include subdirectories. May be filtered by prefix.
:param share_name: The Azure FileShare share where to find the objects. (templated)
:param directory_name: (Deprecated) Path to Azure FileShare directory which content is to be transferred.
Defaults to root directory (templated)
:param directory_path: (Optional) Path to Azure FileShare directory which content is to be transferred.
Defaults to root directory. Use this instead of ``directory_name``. (templated)
:param prefix: Prefix string which filters objects whose name begin with
such prefix. (templated)
:param azure_fileshare_conn_id: The source WASB connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param dest_gcs: The destination Google Cloud Storage bucket and prefix
where you want to store the files. (templated)
:param replace: Whether you want to replace existing destination files
or not.
:param gzip: Option to compress file for upload
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
Note that ``share_name``, ``directory_path``, ``prefix``, and ``dest_gcs`` are
templated, so you can use variables in them if you wish.
"""
template_fields: Sequence[str] = (
"share_name",
"directory_name",
"directory_path",
"prefix",
"dest_gcs",
)
def __init__(
self,
*,
share_name: str,
dest_gcs: str,
directory_name: str | None = None,
directory_path: str | None = None,
prefix: str = "",
azure_fileshare_conn_id: str = "azure_fileshare_default",
gcp_conn_id: str = "google_cloud_default",
replace: bool = False,
gzip: bool = False,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.share_name = share_name
self.directory_path = directory_path
self.directory_name = directory_name
if self.directory_path is None and self.directory_name is not None:
self.directory_path = self.directory_name
warnings.warn(
"Use 'directory_path' instead of 'directory_name'.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.prefix = prefix
self.azure_fileshare_conn_id = azure_fileshare_conn_id
self.gcp_conn_id = gcp_conn_id
self.dest_gcs = dest_gcs
self.replace = replace
self.gzip = gzip
self.google_impersonation_chain = google_impersonation_chain
def _check_inputs(self) -> None:
if self.dest_gcs and not gcs_object_is_directory(self.dest_gcs):
self.log.info(
"Destination Google Cloud Storage path is not a valid "
'"directory", define a path that ends with a slash "/" or '
"leave it empty for the root of the bucket."
)
raise AirflowException(
'The destination Google Cloud Storage path must end with a slash "/" or be empty.'
)
def execute(self, context: Context):
self._check_inputs()
azure_fileshare_hook = AzureFileShareHook(
share_name=self.share_name,
azure_fileshare_conn_id=self.azure_fileshare_conn_id,
directory_path=self.directory_path,
)
files = azure_fileshare_hook.list_files()
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
dest_gcs_bucket, dest_gcs_object_prefix = _parse_gcs_url(self.dest_gcs)
if not self.replace:
# if we are not replacing -> list all files in the GCS bucket
# and only keep those files which are present in
# S3 and not in Google Cloud Storage
existing_files_prefixed = gcs_hook.list(dest_gcs_bucket, prefix=dest_gcs_object_prefix)
existing_files = []
# Remove the object prefix itself, an empty directory was found
if dest_gcs_object_prefix in existing_files_prefixed:
existing_files_prefixed.remove(dest_gcs_object_prefix)
# Remove the object prefix from all object string paths
for file in existing_files_prefixed:
if file.startswith(dest_gcs_object_prefix):
existing_files.append(file[len(dest_gcs_object_prefix) :])
else:
existing_files.append(file)
files = list(set(files) - set(existing_files))
if files:
self.log.info("%s files are going to be synced.", len(files))
if self.directory_path is None:
raise RuntimeError("The directory_name must be set!.")
for file in files:
azure_fileshare_hook = AzureFileShareHook(
share_name=self.share_name,
azure_fileshare_conn_id=self.azure_fileshare_conn_id,
directory_path=self.directory_path,
file_path=file,
)
with NamedTemporaryFile() as temp_file:
azure_fileshare_hook.get_file_to_stream(stream=temp_file)
temp_file.flush()
# There will always be a '/' before file because it is
# enforced at instantiation time
dest_gcs_object = dest_gcs_object_prefix + file
gcs_hook.upload(dest_gcs_bucket, dest_gcs_object, temp_file.name, gzip=self.gzip)
self.log.info("All done, uploaded %d files to Google Cloud Storage.", len(files))
else:
self.log.info("There are no new files to sync. Have a nice day!")
self.log.info("In sync, no files needed to be uploaded to Google Cloud Storage")
return files
| AzureFileShareToGCSOperator |
python | pypa__pip | tests/unit/test_logging.py | {
"start": 3821,
"end": 6475
} | class ____:
def _make_log_record(self) -> logging.LogRecord:
attrs = {
"msg": "my error",
}
record = logging.makeLogRecord(attrs)
return record
def test_broken_pipe_in_stderr_flush(self) -> None:
"""
Test sys.stderr.flush() raising BrokenPipeError.
This error should _not_ trigger an error in the logging framework.
"""
record = self._make_log_record()
with redirect_stderr(StringIO()) as stderr:
console = PipConsole(file=stderr, no_color=True, soft_wrap=True)
handler = RichPipStreamHandler(console)
with patch("sys.stderr.flush") as mock_flush:
mock_flush.side_effect = BrokenPipeError()
# The emit() call raises no exception.
handler.emit(record)
err_text = stderr.getvalue()
assert err_text.startswith("my error")
# Check that the logging framework tried to log the exception.
assert "Logging error" in err_text
assert "BrokenPipeError" in err_text
assert "Message: 'my error'" in err_text
def test_broken_pipe_in_stdout_write(self) -> None:
"""
Test sys.stdout.write() raising BrokenPipeError.
This error _should_ trigger an error in the logging framework.
"""
record = self._make_log_record()
with redirect_stdout(StringIO()) as stdout:
console = PipConsole(file=stdout, no_color=True, soft_wrap=True)
handler = RichPipStreamHandler(console)
with patch("sys.stdout.write") as mock_write:
mock_write.side_effect = BrokenPipeError()
with pytest.raises(BrokenStdoutLoggingError):
handler.emit(record)
def test_broken_pipe_in_stdout_flush(self) -> None:
"""
Test sys.stdout.flush() raising BrokenPipeError.
This error _should_ trigger an error in the logging framework.
"""
record = self._make_log_record()
with redirect_stdout(StringIO()) as stdout:
console = PipConsole(file=stdout, no_color=True, soft_wrap=True)
handler = RichPipStreamHandler(console)
with patch("sys.stdout.flush") as mock_flush:
mock_flush.side_effect = BrokenPipeError()
with pytest.raises(BrokenStdoutLoggingError):
handler.emit(record)
output = stdout.getvalue()
# Sanity check that the log record was written, since flush() happens
# after write().
assert output.startswith("my error")
| TestColorizedStreamHandler |
python | pandas-dev__pandas | asv_bench/benchmarks/strings.py | {
"start": 7521,
"end": 8076
} | class ____:
def setup(self):
self.series_arr = np.array([str(i) * 10 for i in range(10**5)], dtype=object)
self.series_arr_nan = np.concatenate([self.series_arr, np.array([NA] * 1000)])
def time_string_array_construction(self):
StringArray(self.series_arr, dtype=StringDtype())
def time_string_array_with_nan_construction(self):
StringArray(self.series_arr_nan, dtype=StringDtype())
def peakmem_stringarray_construction(self):
StringArray(self.series_arr, dtype=StringDtype())
| StringArrayConstruction |
python | huggingface__transformers | tests/models/speecht5/test_modeling_speecht5.py | {
"start": 69127,
"end": 70411
} | class ____(unittest.TestCase):
@cached_property
def default_processor(self):
return SpeechT5Processor.from_pretrained("microsoft/speecht5_vc")
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_generation_librispeech(self):
model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(1)
input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device)
speaker_embeddings = torch.zeros((1, 512), device=torch_device)
generated_speech = model.generate_speech(input_values, speaker_embeddings=speaker_embeddings)
self.assertEqual(generated_speech.shape[1], model.config.num_mel_bins)
self.assertGreaterEqual(generated_speech.shape[0], 300)
self.assertLessEqual(generated_speech.shape[0], 310)
| SpeechT5ForSpeechToSpeechIntegrationTests |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 18707,
"end": 19348
} | class ____(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ("items", "ctx")
items: list[Expr]
ctx: str
def as_const(self, eval_ctx: EvalContext | None = None) -> tuple[t.Any, ...]:
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self) -> bool:
for item in self.items:
if not item.can_assign():
return False
return True
| Tuple |
python | huggingface__transformers | tests/models/biogpt/test_modeling_biogpt.py | {
"start": 17193,
"end": 19095
} | class ____(unittest.TestCase):
@slow
def test_inference_lm_head_model(self):
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
input_ids = torch.tensor([[2, 4805, 9, 656, 21]])
output = model(input_ids)[0]
vocab_size = 42384
expected_shape = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_biogpt_generation_beam_search(self):
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt")
model.to(torch_device)
torch.manual_seed(0)
tokenized = tokenizer("COVID-19 is", return_tensors="pt").to(torch_device)
output_ids = model.generate(
**tokenized,
min_length=100,
max_length=1024,
num_beams=5,
early_stopping=True,
)
output_str = tokenizer.decode(output_ids[0])
EXPECTED_OUTPUT_STR = (
"</s>"
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths. "
"</s>"
)
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
| BioGptModelIntegrationTest |
python | GoogleCloudPlatform__python-docs-samples | dialogflow-cx/streaming_detect_intent_infinite.py | {
"start": 2776,
"end": 8661
} | class ____:
"""Audio Input / Output"""
def __init__(
self,
rate: int,
chunk_size: int,
) -> None:
self._rate = rate
self.chunk_size = chunk_size
self._buff = asyncio.Queue()
self.closed = False
self.start_time = None # only set when first audio received
self.audio_input = []
self._audio_interface = pyaudio.PyAudio()
self._input_audio_stream = None
self._output_audio_stream = None
# Get default input device info
try:
input_device_info = self._audio_interface.get_default_input_device_info()
self.input_device_name = input_device_info["name"]
logger.info(f"Using input device: {self.input_device_name}")
except IOError:
logger.error("Could not get default input device info. Exiting.")
sys.exit(1)
# Get default output device info
try:
output_device_info = self._audio_interface.get_default_output_device_info()
self.output_device_name = output_device_info["name"]
logger.info(f"Using output device: {self.output_device_name}")
except IOError:
logger.error("Could not get default output device info. Exiting.")
sys.exit(1)
# setup input audio stream
try:
self._input_audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self.chunk_size,
stream_callback=self._fill_buffer,
)
except OSError as e:
logger.error(f"Could not open input stream: {e}. Exiting.")
sys.exit(1)
# setup output audio stream
try:
self._output_audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
channels=1,
rate=self._rate,
output=True,
frames_per_buffer=self.chunk_size,
)
self._output_audio_stream.stop_stream()
except OSError as e:
logger.error(f"Could not open output stream: {e}. Exiting.")
sys.exit(1)
def __enter__(self) -> "AudioIO":
"""Opens the stream."""
self.closed = False
return self
def __exit__(self, *args: any) -> None:
"""Closes the stream and releases resources."""
self.closed = True
if self._input_audio_stream:
self._input_audio_stream.stop_stream()
self._input_audio_stream.close()
self._input_audio_stream = None
if self._output_audio_stream:
self._output_audio_stream.stop_stream()
self._output_audio_stream.close()
self._output_audio_stream = None
# Signal the generator to terminate
self._buff.put_nowait(None)
self._audio_interface.terminate()
def _fill_buffer(
self, in_data: bytes, frame_count: int, time_info: dict, status_flags: int
) -> tuple[None, int]:
"""Continuously collect data from the audio stream, into the buffer."""
# Capture the true start time when the first chunk is received
if self.start_time is None:
self.start_time = get_current_time()
# only capture microphone input when output audio stream is stopped
if self._output_audio_stream and self._output_audio_stream.is_stopped():
self._buff.put_nowait(in_data)
self.audio_input.append(in_data)
return None, pyaudio.paContinue
async def generator(self) -> AsyncGenerator[bytes, None]:
"""Stream Audio from microphone to API and to local buffer."""
while not self.closed:
try:
chunk = await asyncio.wait_for(self._buff.get(), timeout=1)
if chunk is None:
logger.debug("[generator] Received None chunk, ending stream")
return
data = [chunk]
while True:
try:
chunk = self._buff.get_nowait()
if chunk is None:
logger.debug(
"[generator] Received None chunk (nowait), ending stream"
)
return
data.append(chunk)
except asyncio.QueueEmpty:
break
combined_data = b"".join(data)
yield combined_data
except asyncio.TimeoutError:
logger.debug(
"[generator] No audio chunk received within timeout, continuing..."
)
continue
def play_audio(self, audio_data: bytes) -> None:
"""Plays audio from the given bytes data, removing WAV header if needed."""
# Remove WAV header if present
if audio_data.startswith(b"RIFF"):
try:
# Attempt to unpack the WAV header to determine header size.
header_size = struct.calcsize("<4sI4s4sIHHIIHH4sI")
header = struct.unpack("<4sI4s4sIHHIIHH4sI", audio_data[:header_size])
logger.debug(f"WAV header detected: {header}")
audio_data = audio_data[header_size:] # Remove the header
except struct.error as e:
logger.error(f"Error unpacking WAV header: {e}")
# If header parsing fails, play the original data; may not be a valid WAV
# Play the raw PCM audio
try:
self._output_audio_stream.start_stream()
self._output_audio_stream.write(audio_data)
finally:
self._output_audio_stream.stop_stream()
| AudioIO |
python | pytorch__pytorch | test/distributed/test_collective_utils.py | {
"start": 5871,
"end": 7162
} | class ____(TestCase):
def setUp(self):
super().setUp()
if not c10d.is_initialized():
self.rank = 0
self.world_size = 4096
store = FakeStore()
c10d.init_process_group(
backend="fake",
world_size=self.world_size,
rank=self.rank,
store=store,
)
def tearDown(self):
c10d.destroy_process_group()
def test_summarize_ranks(self):
mesh_dim_names = ("pp", "dp", "tp")
mesh = init_device_mesh("cpu", (8, 64, 8), mesh_dim_names=mesh_dim_names)
ranks_lists = {name: mesh[name].mesh.tolist() for name in mesh_dim_names}
summaries = {
name: _summarize_ranks(ranks_lists[name]) for name in mesh_dim_names
}
self.assertEqual(summaries["pp"], "0:4096:512")
self.assertEqual(summaries["dp"], "0:512:8")
self.assertEqual(summaries["tp"], "0:8")
self.assertEqual(
_summarize_ranks([1, 2, 3, 6, 7, 8, 10, 12, 14, 16]),
"1:4,6:9,10:18:2",
)
self.assertEqual(
_summarize_ranks([1]),
"1",
)
instantiate_parametrized_tests(TestCollectiveUtils)
if __name__ == "__main__":
run_tests()
| TestUtils |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/winapi.py | {
"start": 7600,
"end": 10708
} | class ____(ctypes.Structure):
_fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD),
("Action", ctypes.wintypes.DWORD),
("FileNameLength", ctypes.wintypes.DWORD),
#("FileName", (ctypes.wintypes.WCHAR * 1))]
("FileName", (ctypes.c_char * 1))]
LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION)
# We don't need to recalculate these flags every time a call is made to
# the win32 API functions.
WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS
WATCHDOG_FILE_SHARE_FLAGS = reduce(
lambda x, y: x | y, [
FILE_SHARE_READ,
FILE_SHARE_WRITE,
FILE_SHARE_DELETE,
])
WATCHDOG_FILE_NOTIFY_FLAGS = reduce(
lambda x, y: x | y, [
FILE_NOTIFY_CHANGE_FILE_NAME,
FILE_NOTIFY_CHANGE_DIR_NAME,
FILE_NOTIFY_CHANGE_ATTRIBUTES,
FILE_NOTIFY_CHANGE_SIZE,
FILE_NOTIFY_CHANGE_LAST_WRITE,
FILE_NOTIFY_CHANGE_SECURITY,
FILE_NOTIFY_CHANGE_LAST_ACCESS,
FILE_NOTIFY_CHANGE_CREATION,
])
BUFFER_SIZE = 2048
def _parse_event_buffer(readBuffer, nBytes):
results = []
while nBytes > 0:
fni = ctypes.cast(readBuffer, LPFNI)[0]
ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset
#filename = ctypes.wstring_at(ptr, fni.FileNameLength)
filename = ctypes.string_at(ptr, fni.FileNameLength)
results.append((fni.Action, filename.decode('utf-16')))
numToSkip = fni.NextEntryOffset
if numToSkip <= 0:
break
readBuffer = readBuffer[numToSkip:]
nBytes -= numToSkip # numToSkip is long. nBytes should be long too.
return results
def get_directory_handle(path):
"""Returns a Windows handle to the specified directory path."""
return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS,
None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None)
def close_directory_handle(handle):
try:
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
CloseHandle(handle) # close directory handle
except WindowsError:
try:
CloseHandle(handle) # close directory handle
except:
return
def read_directory_changes(handle, recursive):
"""Read changes to the directory using the specified directory handle.
http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html
"""
event_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
nbytes = ctypes.wintypes.DWORD()
try:
ReadDirectoryChangesW(handle, ctypes.byref(event_buffer),
len(event_buffer), recursive,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(nbytes), None, None)
except WindowsError as e:
if e.winerror == ERROR_OPERATION_ABORTED:
return [], 0
raise e
# Python 2/3 compat
try:
int_class = long
except NameError:
int_class = int
return event_buffer.raw, int_class(nbytes.value)
| FILE_NOTIFY_INFORMATION |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.