language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/sam/processing_sam.py
|
{
"start": 1572,
"end": 11069
}
|
class ____(ProcessorMixin):
r"""
Constructs a SAM processor which wraps a SAM image processor and an 2D points & Bounding boxes processor into a
single processor.
[`SamProcessor`] offers all the functionalities of [`SamImageProcessor`]. See the docstring of
[`~SamImageProcessor.__call__`] for more information.
Args:
image_processor (`SamImageProcessor`):
An instance of [`SamImageProcessor`]. The image processor is a required input.
"""
def __init__(self, image_processor):
super().__init__(image_processor)
self.target_size = self.image_processor.size["longest_edge"]
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
**kwargs,
) -> BatchEncoding:
"""
This method uses [`SamImageProcessor.__call__`] method to prepare image(s) for the model. It also prepares 2D
points and bounding boxes for the model if they are provided.
"""
output_kwargs = self._merge_kwargs(
SamProcessorKwargs,
tokenizer_init_kwargs={},
**kwargs,
)
input_points = output_kwargs["images_kwargs"].pop("input_points", None)
input_labels = output_kwargs["images_kwargs"].pop("input_labels", None)
input_boxes = output_kwargs["images_kwargs"].pop("input_boxes", None)
point_pad_value = output_kwargs["images_kwargs"].pop("point_pad_value", None)
encoding_image_processor = self.image_processor(
images,
**output_kwargs["images_kwargs"],
)
# pop arguments that are not used in the forward but used nevertheless
original_sizes = encoding_image_processor["original_sizes"]
if hasattr(original_sizes, "numpy"):
original_sizes = original_sizes.numpy()
input_points, input_labels, input_boxes = self._check_and_preprocess_points(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
)
encoding_image_processor = self._normalize_and_convert(
encoding_image_processor,
original_sizes,
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
return_tensors=output_kwargs["images_kwargs"].get("return_tensors"),
point_pad_value=point_pad_value,
)
return encoding_image_processor
def _normalize_and_convert(
self,
encoding_image_processor,
original_sizes,
input_points=None,
input_labels=None,
input_boxes=None,
return_tensors="pt",
point_pad_value=-10,
):
if input_points is not None:
if len(original_sizes) != len(input_points):
input_points = [
self._normalize_coordinates(self.target_size, point, original_sizes[0]) for point in input_points
]
else:
input_points = [
self._normalize_coordinates(self.target_size, point, original_size)
for point, original_size in zip(input_points, original_sizes)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
input_points, input_labels = self._pad_points_and_labels(
input_points, input_labels, point_pad_value
)
input_points = np.array(input_points)
if input_labels is not None:
input_labels = np.array(input_labels)
if input_boxes is not None:
if len(original_sizes) != len(input_boxes):
input_boxes = [
self._normalize_coordinates(self.target_size, box, original_sizes[0], is_bounding_box=True)
for box in input_boxes
]
else:
input_boxes = [
self._normalize_coordinates(self.target_size, box, original_size, is_bounding_box=True)
for box, original_size in zip(input_boxes, original_sizes)
]
input_boxes = np.array(input_boxes)
if input_boxes is not None:
if return_tensors == "pt":
input_boxes = torch.from_numpy(input_boxes)
# boxes batch size of 1 by default
input_boxes = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes})
if input_points is not None:
if return_tensors == "pt":
input_points = torch.from_numpy(input_points)
# point batch size of 1 by default
input_points = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"input_points": input_points})
if input_labels is not None:
if return_tensors == "pt":
input_labels = torch.from_numpy(input_labels)
# point batch size of 1 by default
input_labels = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels})
return encoding_image_processor
def _pad_points_and_labels(self, input_points, input_labels, point_pad_value):
r"""
The method pads the 2D points and labels to the maximum number of points in the batch.
"""
expected_nb_points = max(point.shape[0] for point in input_points)
processed_input_points = []
for i, point in enumerate(input_points):
if point.shape[0] != expected_nb_points:
point = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + point_pad_value], axis=0
)
input_labels[i] = np.append(input_labels[i], [point_pad_value])
processed_input_points.append(point)
input_points = processed_input_points
return input_points, input_labels
def _normalize_coordinates(
self, target_size: int, coords: np.ndarray, original_size, is_bounding_box=False
) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.image_processor._get_preprocess_shape(original_size, longest_edge=target_size)
coords = deepcopy(coords).astype(float)
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _check_and_preprocess_points(
self,
input_points=None,
input_labels=None,
input_boxes=None,
):
r"""
Check and preprocesses the 2D points, labels and bounding boxes. It checks if the input is valid and if they
are, it converts the coordinates of the points and bounding boxes. If a user passes directly a `torch.Tensor`,
it is converted to a `numpy.ndarray` and then to a `list`.
"""
if input_points is not None:
if hasattr(input_points, "numpy"):
input_points = input_points.numpy().tolist()
if not isinstance(input_points, list) or not isinstance(input_points[0], list):
raise ValueError("Input points must be a list of list of floating points.")
input_points = [np.array(input_point) for input_point in input_points]
else:
input_points = None
if input_labels is not None:
if hasattr(input_labels, "numpy"):
input_labels = input_labels.numpy().tolist()
if not isinstance(input_labels, list) or not isinstance(input_labels[0], list):
raise ValueError("Input labels must be a list of list integers.")
input_labels = [np.array(label) for label in input_labels]
else:
input_labels = None
if input_boxes is not None:
if hasattr(input_boxes, "numpy"):
input_boxes = input_boxes.numpy().tolist()
if (
not isinstance(input_boxes, list)
or not isinstance(input_boxes[0], list)
or not isinstance(input_boxes[0][0], list)
):
raise ValueError("Input boxes must be a list of list of list of floating points.")
input_boxes = [np.array(box).astype(np.float32) for box in input_boxes]
else:
input_boxes = None
return input_points, input_labels, input_boxes
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ["original_sizes", "reshaped_input_sizes"])
def post_process_masks(self, *args, **kwargs):
return self.image_processor.post_process_masks(*args, **kwargs)
__all__ = ["SamProcessor"]
|
SamProcessor
|
python
|
ray-project__ray
|
rllib/models/tf/layers/skip_connection.py
|
{
"start": 252,
"end": 1653
}
|
class ____(tf.keras.layers.Layer if tf else object):
"""Skip connection layer.
Adds the original input to the output (regular residual layer) OR uses
input as hidden state input to a given fan_in_layer.
"""
def __init__(self, layer: Any, fan_in_layer: Optional[Any] = None, **kwargs):
"""Initializes a SkipConnection keras layer object.
Args:
layer (tf.keras.layers.Layer): Any layer processing inputs.
fan_in_layer (Optional[tf.keras.layers.Layer]): An optional
layer taking two inputs: The original input and the output
of `layer`.
"""
if log_once("skip_connection"):
deprecation_warning(
old="rllib.models.tf.layers.SkipConnection",
)
super().__init__(**kwargs)
self._layer = layer
self._fan_in_layer = fan_in_layer
def call(self, inputs: TensorType, **kwargs) -> TensorType:
# del kwargs
outputs = self._layer(inputs, **kwargs)
# Residual case, just add inputs to outputs.
if self._fan_in_layer is None:
outputs = outputs + inputs
# Fan-in e.g. RNN: Call fan-in with `inputs` and `outputs`.
else:
# NOTE: In the GRU case, `inputs` is the state input.
outputs = self._fan_in_layer((inputs, outputs))
return outputs
|
SkipConnection
|
python
|
ray-project__ray
|
python/ray/serve/_private/test_utils.py
|
{
"start": 4482,
"end": 4545
}
|
class ____:
def remote(self):
pass
|
FakeRemoteFunction
|
python
|
google__pytype
|
pytype/abstract/_function_base.py
|
{
"start": 10140,
"end": 15792
}
|
class ____(Generic[_SomeFunction], _base.BaseValue):
"""An function type which has had an argument bound into it."""
underlying: _SomeFunction
def __init__(
self, callself: "cfg.Variable", underlying: _SomeFunction
) -> None:
super().__init__(underlying.name, underlying.ctx)
self.cls = _classes.FunctionPyTDClass(self, self.ctx)
self._callself = callself
self.underlying = underlying
self.is_attribute_of_class = False
self.is_class_builder = False
# If the function belongs to `ParameterizedClass`, we will annotate the
# `self` when do argument matching
self._self_annot = None
inst = abstract_utils.get_atomic_value(
self._callself, default=self.ctx.convert.unsolvable
)
if self.underlying.should_set_self_annot():
self._self_annot = self._get_self_annot(inst)
if isinstance(inst, _instance_base.SimpleValue):
self.alias_map = inst.instance_type_parameters.aliases
elif isinstance(inst, _typing.TypeParameterInstance):
self.alias_map = inst.instance.instance_type_parameters.aliases
else:
self.alias_map = None
def _get_self_annot(self, callself: "cfg.Variable") -> _base.BaseValue:
if isinstance(self.underlying, SignedFunction):
self_type = self.underlying.get_self_type_param()
else:
self_type = None
if not self_type:
return abstract_utils.get_generic_type(callself)
if "classmethod" in self.underlying.decorators:
return _classes.ParameterizedClass(
self.ctx.convert.type_type, {abstract_utils.T: self_type}, self.ctx
)
else:
return self_type
def argcount(self, node: "cfg.CFGNode") -> int:
return self.underlying.argcount(node) - 1 # account for self
@property
def signature(self) -> function.Signature:
return self.underlying.signature.drop_first_parameter() # pytype: disable=attribute-error
@property
def callself(self) -> "cfg.Variable":
return self._callself
def call(
self,
node: "cfg.CFGNode",
func: "cfg.Binding",
args: function.Args,
alias_map: "datatypes.UnionFind | None" = None,
) -> "tuple[cfg.CFGNode, cfg.Variable]":
if self.name.endswith(".__init__"):
self.ctx.callself_stack.append(self._callself)
# The "self" parameter is automatically added to the list of arguments, but
# only if the function actually takes any arguments.
if self.argcount(node) >= 0:
args = args.replace(posargs=(self._callself,) + args.posargs)
try:
if self._self_annot:
should_set_self_annot = True
else:
# If a function is recursively calling itself and has set the `self`
# annotation for the previous call, we want to clear it for this one.
should_set_self_annot = (
isinstance(self.underlying, SignedFunction)
and self.underlying.has_self_annot
)
if should_set_self_annot:
context = self.underlying.set_self_annot(self._self_annot) # pytype: disable=attribute-error
else:
context = contextlib.nullcontext()
with context:
node, ret = self.underlying.call(
node, func, args, alias_map=self.alias_map
)
except error_types.InvalidParameters as e:
if self._callself and self._callself.bindings:
if "." in e.name:
# match_args will try to prepend the parent's name to the error name.
# Overwrite it with _callself instead, which may be more exact.
_, _, e.name = e.name.rpartition(".")
e.name = f"{self._callself.data[0].name}.{e.name}"
raise
finally:
if self.name.endswith(".__init__"):
self.ctx.callself_stack.pop()
return node, ret
def get_positional_names(self) -> Sequence[str]:
return self.underlying.get_positional_names() # pytype: disable=attribute-error
def has_varargs(self) -> bool:
return self.underlying.has_varargs()
def has_kwargs(self) -> bool:
return self.underlying.has_kwargs()
@property
def is_abstract(self) -> bool:
return self.underlying.is_abstract
@is_abstract.setter
def is_abstract(self, value: bool) -> None:
self.underlying.is_abstract = value
@property
def is_classmethod(self) -> bool:
return self.underlying.is_classmethod
def repr_names(
self, callself_repr: "Callable[[cfg.Variable], str] | None" = None
) -> Sequence[str]:
"""Names to use in the bound function's string representation.
This function can return multiple names because there may be multiple
bindings in callself.
Args:
callself_repr: Optionally, a repr function for callself.
Returns:
A non-empty iterable of string names.
"""
callself_repr = callself_repr or (lambda v: v.name)
if self._callself and self._callself.bindings:
callself_names = [callself_repr(v) for v in self._callself.data]
else:
callself_names = ["<class>"]
# We don't need to recursively call repr_names() because we replace the
# parent name with the callself.
underlying = self.underlying.name
if underlying.count(".") > 0:
underlying = underlying.split(".", 1)[-1]
return [callself + "." + underlying for callself in callself_names]
def __repr__(self) -> str:
return self.repr_names()[0] + "(...)"
def get_special_attribute(
self, node: "cfg.CFGNode", name: str, valself: "cfg.Variable"
) -> "cfg.Variable | None":
if name == "__self__":
return self.callself
elif name == "__func__":
return self.underlying.to_variable(node)
return super().get_special_attribute(node, name, valself)
|
BoundFunction
|
python
|
airbytehq__airbyte
|
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py
|
{
"start": 10867,
"end": 11375
}
|
class ____(BaseConfig):
future_state_path: Optional[str] = Field(description="Path to a state file with values in far future")
missing_streams: List[EmptyStreamConfiguration] = Field(default=[], description="List of missing streams with valid bypass reasons.")
bypass_reason: Optional[str]
cursor_format: Optional[FutureStateCursorFormatConfiguration] = Field(
default_factory=FutureStateCursorFormatConfiguration,
description=("Expected cursor format"),
)
|
FutureStateConfig
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/losses/triplet.py
|
{
"start": 7090,
"end": 9015
}
|
class ____(nn.Module):
"""TripletPairwiseEmbeddingLoss – proof of concept criterion.
Still work in progress.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, margin: float = 0.3, reduction: str = "mean"):
"""
Args:
margin: margin parameter
reduction: criterion reduction type
"""
super().__init__()
self.margin = margin
self.reduction = reduction or "none"
def forward(self, embeddings_pred, embeddings_true):
"""
Work in progress.
Args:
embeddings_pred: predicted embeddings
with shape [batch_size, embedding_size]
embeddings_true: true embeddings
with shape [batch_size, embedding_size]
Returns:
torch.Tensor: loss
"""
device = embeddings_pred.device
# s - state space
# d - embeddings space
# a - action space
# [batch_size, embedding_size] x [batch_size, embedding_size]
# -> [batch_size, batch_size]
pairwise_similarity = torch.einsum("se,ae->sa", embeddings_pred, embeddings_true)
bs = embeddings_pred.shape[0]
batch_idx = torch.arange(bs, device=device)
negative_similarity = pairwise_similarity + torch.diag(
torch.full([bs], -(10 ** 9), device=device)
)
# TODO argsort, take k worst
hard_negative_ids = negative_similarity.argmax(dim=-1)
negative_similarities = pairwise_similarity[batch_idx, hard_negative_ids]
positive_similarities = pairwise_similarity[batch_idx, batch_idx]
loss = torch.relu(self.margin - positive_similarities + negative_similarities)
if self.reduction == "mean":
loss = torch.sum(loss) / bs
elif self.reduction == "sum":
loss = torch.sum(loss)
return loss
|
TripletPairwiseEmbeddingLoss
|
python
|
huggingface__transformers
|
src/transformers/models/bert/modeling_bert.py
|
{
"start": 53698,
"end": 55825
}
|
class ____(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
|
BertForTokenClassification
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/views/actions.py
|
{
"start": 4203,
"end": 5731
}
|
class ____(mixins.ProcessViewTemplateNames, generic.DetailView):
context_object_name = "process"
flow_class = None
pk_url_kwarg = "process_pk"
template_filename = "process_cancel.html"
def get_queryset(self):
"""Flow processes."""
return self.flow_class.process_class._default_manager.all()
@cached_property
def active_activations(self):
return [
task.flow_task.activation_class(task)
for task in self.object.task_set.exclude(
status__in=[STATUS.DONE, STATUS.CANCELED, STATUS.REVIVED]
)
]
def post(self, request, *args, **kwargs):
"""Cancel active tasks and the process."""
self.object = self.get_object()
if self.object.status in [PROCESS.DONE, PROCESS.CANCELED]:
messages.add_message(
self.request,
messages.ERROR,
_("Process #{self.object.pk} can not be canceled."),
fail_silently=True,
)
return HttpResponseRedirect("../")
elif "_cancel_process" in request.POST:
self.object.flow_class.instance.cancel(self.object)
messages.add_message(
self.request,
messages.SUCCESS,
_("Process #{self.object.pk} has been canceled."),
fail_silently=True,
)
return HttpResponseRedirect("../")
else:
return self.get(request, *args, **kwargs)
|
CancelProcessView
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared/serdes/serdes.py
|
{
"start": 2968,
"end": 3950
}
|
class ____(Mapping[_K, _V]):
"""Wrapper class for non-scalar key mappings, used to performantly type check when serializing
without impacting the performance of serializing the more common scalar key dicts.
May be replaceable with a different clever scheme.
"""
def __init__(self, mapping: Mapping[_K, _V] = {}) -> None:
self.mapping: Mapping[_K, _V] = mapping
def __setitem__(self, key: _K, item: _V):
raise NotImplementedError("SerializableNonScalarKeyMapping is immutable")
def __getitem__(self, item: _K) -> _V:
return self.mapping[item]
def __len__(self) -> int:
return len(self.mapping)
def __iter__(self) -> Iterator[_K]:
return iter(self.mapping)
###################################################################################################
# Whitelisting
###################################################################################################
|
SerializableNonScalarKeyMapping
|
python
|
nedbat__coveragepy
|
coverage/templite.py
|
{
"start": 540,
"end": 644
}
|
class ____(ValueError):
"""Raised when a template has a syntax error."""
pass
|
TempliteSyntaxError
|
python
|
tornadoweb__tornado
|
tornado/httpclient.py
|
{
"start": 24608,
"end": 28784
}
|
class ____:
"""HTTP Response object.
Attributes:
* ``request``: HTTPRequest object
* ``code``: numeric HTTP status code, e.g. 200 or 404
* ``reason``: human-readable reason phrase describing the status code
* ``headers``: `tornado.httputil.HTTPHeaders` object
* ``effective_url``: final location of the resource after following any
redirects
* ``buffer``: ``cStringIO`` object for response body
* ``body``: response body as bytes (created on demand from ``self.buffer``)
* ``error``: Exception object, if any
* ``request_time``: seconds from request start to finish. Includes all
network operations from DNS resolution to receiving the last byte of
data. Does not include time spent in the queue (due to the
``max_clients`` option). If redirects were followed, only includes
the final request.
* ``start_time``: Time at which the HTTP operation started, based on
`time.time` (not the monotonic clock used by `.IOLoop.time`). May
be ``None`` if the request timed out while in the queue.
* ``time_info``: dictionary of diagnostic timing information from the
request. Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
.. versionadded:: 5.1
Added the ``start_time`` attribute.
.. versionchanged:: 5.1
The ``request_time`` attribute previously included time spent in the queue
for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
is excluded in both implementations. ``request_time`` is now more accurate for
``curl_httpclient`` because it uses a monotonic clock when available.
"""
# I'm not sure why these don't get type-inferred from the references in __init__.
error = None # type: Optional[BaseException]
_error_is_response_code = False
request = None # type: HTTPRequest
def __init__(
self,
request: HTTPRequest,
code: int,
headers: Optional[httputil.HTTPHeaders] = None,
buffer: Optional[BytesIO] = None,
effective_url: Optional[str] = None,
error: Optional[BaseException] = None,
request_time: Optional[float] = None,
time_info: Optional[Dict[str, float]] = None,
reason: Optional[str] = None,
start_time: Optional[float] = None,
) -> None:
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None # type: Optional[bytes]
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
self._error_is_response_code = False
if error is None:
if self.code < 200 or self.code >= 300:
self._error_is_response_code = True
self.error = HTTPError(self.code, message=self.reason, response=self)
else:
self.error = None
else:
self.error = error
self.start_time = start_time
self.request_time = request_time
self.time_info = time_info or {}
@property
def body(self) -> bytes:
if self.buffer is None:
return b""
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
def rethrow(self) -> None:
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self) -> str:
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return f"{self.__class__.__name__}({args})"
|
HTTPResponse
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/util/test_hex.py
|
{
"start": 2045,
"end": 2751
}
|
class ____:
def test_default_aspect_pointytop(self) -> None:
x = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])
y = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])
q, r = buh.cartesian_to_axial(x, y, 1, "pointytop")
assert list(zip(q, r)) == [
(0,0), (-1, 0), (1,0), (0,-1), (-1, 1), (1, -1), (0,1),
]
def test_default_aspect_flattop(self) -> None:
x = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])
y = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])
q, r = buh.cartesian_to_axial(x, y, 1, "flattop")
assert list(zip(q, r)) == [
(0,0), (0,1), (0,-1), (1, 0), (-1, 1), (1, -1), (-1,0),
]
|
Test_cartesian_to_axial
|
python
|
spack__spack
|
lib/spack/spack/test/cray_manifest.py
|
{
"start": 2218,
"end": 15417
}
|
class ____:
def __init__(self, *, name, version, arch=None, executables=None, prefix=None):
self.name = name
self.version = version
self.arch = arch or JsonArchEntry("anyplatform", "anyos", "anytarget")
self.executables = executables or {"cc": "cc", "cxx": "cxx", "fc": "fc"}
self.prefix = prefix
def compiler_json(self):
result = {
"name": self.name,
"version": self.version,
"arch": self.arch.compiler_json(),
"executables": self.executables,
}
# See https://github.com/spack/spack/pull/40061
if self.prefix is not None:
result["prefix"] = self.prefix
return result
def spec_json(self):
"""The compiler spec only lists the name/version, not
arch/executables.
"""
return {"name": self.name, "version": self.version}
@pytest.fixture
def _common_arch(test_platform):
generic = spack.vendor.archspec.cpu.TARGETS[test_platform.default].family
return JsonArchEntry(platform=test_platform.name, os="redhat6", target=generic.name)
@pytest.fixture
def _common_compiler(_common_arch):
return JsonCompilerEntry(
name="gcc",
version="10.2.0.2112",
arch=_common_arch,
executables={
"cc": "/path/to/compiler/cc",
"cxx": "/path/to/compiler/cxx",
"fc": "/path/to/compiler/fc",
},
)
@pytest.fixture
def _other_compiler(_common_arch):
return JsonCompilerEntry(
name="clang",
version="3.0.0",
arch=_common_arch,
executables={
"cc": "/path/to/compiler/clang",
"cxx": "/path/to/compiler/clang++",
"fc": "/path/to/compiler/flang",
},
)
@pytest.fixture
def _raw_json_x(_common_arch):
return {
"name": "packagex",
"hash": "hash-of-x",
"prefix": "/path/to/packagex-install/",
"version": "1.0",
"arch": _common_arch.spec_json(),
"compiler": {"name": "gcc", "version": "10.2.0.2112"},
"dependencies": {"packagey": {"hash": "hash-of-y", "type": ["link"]}},
"parameters": {"precision": ["double", "float"]},
}
def test_manifest_compatibility(_common_arch, _common_compiler, _raw_json_x):
"""Make sure that JsonSpecEntry outputs the expected JSON structure
by comparing it with JSON parsed from an example string. This
ensures that the testing objects like JsonSpecEntry produce the
same JSON structure as the expected file format.
"""
y = JsonSpecEntry(
name="packagey",
hash="hash-of-y",
prefix="/path/to/packagey-install/",
version="1.0",
arch=_common_arch.spec_json(),
compiler=_common_compiler.spec_json(),
dependencies={},
parameters={},
)
x = JsonSpecEntry(
name="packagex",
hash="hash-of-x",
prefix="/path/to/packagex-install/",
version="1.0",
arch=_common_arch.spec_json(),
compiler=_common_compiler.spec_json(),
dependencies=dict([y.as_dependency(deptypes=["link"])]),
parameters={"precision": ["double", "float"]},
)
x_from_entry = x.to_dict()
assert x_from_entry == _raw_json_x
def test_compiler_from_entry(mock_executable):
"""Tests that we can detect a compiler from a valid entry in the Cray manifest"""
cc = mock_executable("gcc", output="echo 7.5.0")
cxx = mock_executable("g++", output="echo 7.5.0")
fc = mock_executable("gfortran", output="echo 7.5.0")
compiler = compiler_from_entry(
JsonCompilerEntry(
name="gcc",
version="7.5.0",
arch=JsonArchEntry(platform="linux", os="centos8", target="x86_64"),
prefix=str(cc.parent),
executables={"cc": "gcc", "cxx": "g++", "fc": "gfortran"},
).compiler_json(),
manifest_path="/example/file",
)
assert compiler.satisfies("gcc@7.5.0 target=x86_64 os=centos8")
assert compiler.extra_attributes["compilers"]["c"] == str(cc)
assert compiler.extra_attributes["compilers"]["cxx"] == str(cxx)
assert compiler.extra_attributes["compilers"]["fortran"] == str(fc)
@pytest.fixture
def generate_openmpi_entries(_common_arch, _common_compiler):
"""Generate two example JSON entries that refer to an OpenMPI
installation and a hwloc dependency.
"""
# The hashes need to be padded with 'a' at the end to align with 8-byte
# boundaries (for base-32 decoding)
hwloc = JsonSpecEntry(
name="hwloc",
hash="hwlocfakehashaaa",
prefix="/path/to/hwloc-install/",
version="2.0.3",
arch=_common_arch.spec_json(),
compiler=_common_compiler.spec_json(),
dependencies={},
parameters={},
)
# This includes a variant which is guaranteed not to appear in the
# OpenMPI package: we need to make sure we can use such package
# descriptions.
openmpi = JsonSpecEntry(
name="openmpi",
hash="openmpifakehasha",
prefix="/path/to/openmpi-install/",
version="4.1.0",
arch=_common_arch.spec_json(),
compiler=_common_compiler.spec_json(),
dependencies=dict([hwloc.as_dependency(deptypes=["link"])]),
parameters={"internal-hwloc": False, "fabrics": ["psm"], "missing_variant": True},
)
return list(x.to_dict() for x in [openmpi, hwloc])
def test_generate_specs_from_manifest(generate_openmpi_entries):
"""Given JSON entries, check that we can form a set of Specs
including dependency references.
"""
specs = entries_to_specs(generate_openmpi_entries)
(openmpi_spec,) = list(x for x in specs.values() if x.name == "openmpi")
assert openmpi_spec["hwloc"]
def test_translate_cray_platform_to_linux(monkeypatch, _common_compiler):
"""Manifests might list specs on newer Cray platforms as being "cray",
but Spack identifies such platforms as "linux". Make sure we
automatically transform these entries.
"""
test_linux_platform = spack.platforms.test.Test("linux")
def the_host_is_linux():
return test_linux_platform
monkeypatch.setattr(spack.platforms, "host", the_host_is_linux)
cray_arch = JsonArchEntry(platform="cray", os="rhel8", target="x86_64")
spec_json = JsonSpecEntry(
name="mpich",
hash="craympichfakehashaaa",
prefix="/path/to/cray-mpich/",
version="1.0.0",
arch=cray_arch.spec_json(),
compiler=_common_compiler.spec_json(),
dependencies={},
parameters={},
).to_dict()
(spec,) = entries_to_specs([spec_json]).values()
assert spec.architecture.platform == "linux"
@pytest.mark.parametrize(
"name_in_manifest,expected_name",
[("nvidia", "nvhpc"), ("rocm", "llvm-amdgpu"), ("clang", "llvm")],
)
def test_translated_compiler_name(name_in_manifest, expected_name):
assert cray_manifest.translated_compiler_name(name_in_manifest) == expected_name
def test_failed_translate_compiler_name(_common_arch):
unknown_compiler = JsonCompilerEntry(name="unknown", version="1.0")
with pytest.raises(spack.compilers.config.UnknownCompilerError):
compiler_from_entry(unknown_compiler.compiler_json(), manifest_path="/example/file")
spec_json = JsonSpecEntry(
name="packagey",
hash="hash-of-y",
prefix="/path/to/packagey-install/",
version="1.0",
arch=_common_arch.spec_json(),
compiler=unknown_compiler.spec_json(),
dependencies={},
parameters={},
).to_dict()
with pytest.raises(spack.compilers.config.UnknownCompilerError):
entries_to_specs([spec_json])
@pytest.fixture
def manifest_content(generate_openmpi_entries, _common_compiler, _other_compiler):
return {
"_meta": {
"file-type": "cray-pe-json",
"system-type": "EX",
"schema-version": "1.3",
"cpe-version": "22.06",
},
"specs": generate_openmpi_entries,
"compilers": [_common_compiler.compiler_json(), _other_compiler.compiler_json()],
}
def test_read_cray_manifest(temporary_store, manifest_file):
"""Check that (a) we can read the cray manifest and add it to the Spack
Database and (b) we can concretize specs based on that.
"""
cray_manifest.read(str(manifest_file), True)
query_specs = temporary_store.db.query("openmpi")
assert any(x.dag_hash() == "openmpifakehasha" for x in query_specs)
concretized_spec = spack.concretize.concretize_one("depends-on-openmpi ^/openmpifakehasha")
assert concretized_spec["hwloc"].dag_hash() == "hwlocfakehashaaa"
def test_read_cray_manifest_add_compiler_failure(temporary_store, manifest_file, monkeypatch):
"""Tests the Cray manifest can be read even if some compilers cannot be added."""
def _mock(entry, *, manifest_path):
if entry["name"] == "clang":
raise RuntimeError("cannot determine the compiler")
return spack.spec.Spec(f"{entry['name']}@{entry['version']}")
monkeypatch.setattr(cray_manifest, "compiler_from_entry", _mock)
cray_manifest.read(str(manifest_file), True)
query_specs = spack.store.STORE.db.query("openmpi")
assert any(x.dag_hash() == "openmpifakehasha" for x in query_specs)
def test_read_cray_manifest_twice_no_duplicates(
mutable_config, temporary_store, manifest_file, monkeypatch, tmp_path: pathlib.Path
):
def _mock(entry, *, manifest_path):
return spack.spec.Spec(f"{entry['name']}@{entry['version']}", external_path=str(tmp_path))
monkeypatch.setattr(cray_manifest, "compiler_from_entry", _mock)
# Read the manifest twice
cray_manifest.read(str(manifest_file), True)
cray_manifest.read(str(manifest_file), True)
config_data = mutable_config.get("packages")["gcc"]
assert "externals" in config_data
specs = [spack.spec.Spec(x["spec"]) for x in config_data["externals"]]
assert len(specs) == len(set(specs))
assert len([c for c in specs if c.satisfies("gcc@10.2.0.2112")]) == 1
def test_read_old_manifest_v1_2(tmp_path: pathlib.Path, temporary_store):
"""Test reading a file using the older format ('version' instead of 'schema-version')."""
manifest = tmp_path / "manifest_dir" / "test.json"
manifest.parent.mkdir(parents=True)
manifest.write_text(
"""\
{
"_meta": {
"file-type": "cray-pe-json",
"system-type": "EX",
"version": "1.3"
},
"specs": []
}
"""
)
cray_manifest.read(str(manifest), True)
def test_convert_validation_error(
tmp_path: pathlib.Path, mutable_config, mock_packages, temporary_store
):
manifest_dir = tmp_path / "manifest_dir"
manifest_dir.mkdir()
# Does not parse as valid JSON
invalid_json_path = manifest_dir / "invalid-json.json"
with open(invalid_json_path, "w", encoding="utf-8") as f:
f.write(
"""\
{
"""
)
with pytest.raises(cray_manifest.ManifestValidationError) as e:
cray_manifest.read(invalid_json_path, True)
str(e)
# Valid JSON, but does not conform to schema (schema-version is not a string
# of length > 0)
invalid_schema_path = manifest_dir / "invalid-schema.json"
with open(invalid_schema_path, "w", encoding="utf-8") as f:
f.write(
"""\
{
"_meta": {
"file-type": "cray-pe-json",
"system-type": "EX",
"schema-version": ""
},
"specs": []
}
"""
)
with pytest.raises(cray_manifest.ManifestValidationError) as e:
cray_manifest.read(invalid_schema_path, True)
@pytest.fixture
def manifest_file(tmp_path: pathlib.Path, manifest_content):
"""Create a manifest file in a directory. Used by 'spack external'."""
filename = tmp_path / "external-db.json"
with open(filename, "w", encoding="utf-8") as db_file:
json.dump(manifest_content, db_file)
return filename
def test_find_external_nonempty_default_manifest_dir(
temporary_store, mutable_mock_repo, monkeypatch, manifest_file
):
"""The user runs 'spack external find'; the default manifest directory
contains a manifest file. Ensure that the specs are read.
"""
monkeypatch.setenv("PATH", "")
monkeypatch.setattr(spack.cray_manifest, "default_path", str(manifest_file.parent))
spack.cmd.external._collect_and_consume_cray_manifest_files(ignore_default_dir=False)
specs = temporary_store.db.query("hwloc")
assert any(x.dag_hash() == "hwlocfakehashaaa" for x in specs)
def test_reusable_externals_cray_manifest(temporary_store, manifest_file):
"""The concretizer should be able to reuse specs imported from a manifest without a
externals config entry in packages.yaml"""
cray_manifest.read(path=str(manifest_file), apply_updates=True)
# Get any imported spec
spec = temporary_store.db.query_local()[0]
# Reusable if imported locally
assert spack.solver.reuse._is_reusable(spec, packages={}, local=True)
# If cray manifest entries end up in a build cache somehow, they are not reusable
assert not spack.solver.reuse._is_reusable(spec, packages={}, local=False)
|
JsonCompilerEntry
|
python
|
django__django
|
django/db/models/lookups.py
|
{
"start": 21058,
"end": 22430
}
|
class ____(BuiltinLookup):
param_pattern = "%%%s%%"
prepare_rhs = False
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if hasattr(self.rhs, "as_sql") or self.bilateral_transforms:
pattern = connection.pattern_ops[self.lookup_name].format(
connection.pattern_esc
)
return pattern.format(rhs)
else:
return super().get_rhs_op(connection, rhs)
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if self.rhs_is_direct_value() and params and not self.bilateral_transforms:
params = (
self.param_pattern % connection.ops.prep_for_like_query(params[0]),
*params[1:],
)
return rhs, params
@Field.register_lookup
|
PatternLookup
|
python
|
apache__airflow
|
providers/sftp/tests/unit/sftp/operators/test_sftp.py
|
{
"start": 2085,
"end": 27958
}
|
class ____:
def setup_method(self):
hook = SSHHook(ssh_conn_id="ssh_default")
hook.no_host_key_check = True
self.hook = hook
sftp_hook = SFTPHook(ssh_conn_id="ssh_default")
sftp_hook.no_host_key_check = True
self.sftp_hook = sftp_hook
self.test_dir = "/tmp"
self.test_local_dir = "/tmp/tmp2"
self.test_remote_dir = "/tmp/tmp1"
self.test_local_filename = "test_local_file"
self.test_remote_filename = "test_remote_file"
self.test_remote_file_content = (
b"This is remote file content \n which is also multiline "
b"another line here \n this is last line. EOF"
)
self.test_local_filepath = f"{self.test_dir}/{self.test_local_filename}"
# Local Filepath with Intermediate Directory
self.test_local_filepath_int_dir = f"{self.test_local_dir}/{self.test_local_filename}"
self.test_remote_filepath = f"{self.test_dir}/{self.test_remote_filename}"
# Remote Filepath with Intermediate Directory
self.test_remote_filepath_int_dir = f"{self.test_remote_dir}/{self.test_remote_filename}"
def teardown_method(self):
if os.path.exists(self.test_local_filepath):
os.remove(self.test_local_filepath)
if os.path.exists(self.test_local_filepath_int_dir):
os.remove(self.test_local_filepath_int_dir)
if os.path.exists(self.test_local_dir):
os.rmdir(self.test_local_dir)
if os.path.exists(self.test_remote_filepath):
os.remove(self.test_remote_filepath)
if os.path.exists(self.test_remote_filepath_int_dir):
os.remove(self.test_remote_filepath_int_dir)
if os.path.exists(self.test_remote_dir):
os.rmdir(self.test_remote_dir)
def test_default_args(self):
operator = SFTPOperator(
task_id="test_default_args",
remote_filepath="/tmp/remote_file",
)
assert operator.operation == SFTPOperation.PUT
assert operator.confirm is True
assert operator.create_intermediate_dirs is False
assert operator.concurrency == 1
assert operator.prefetch is True
assert operator.local_filepath is None
assert operator.sftp_hook is None
assert operator.ssh_conn_id is None
assert operator.remote_host is None
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Pickle support is removed in Airflow 3")
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_pickle_file_transfer_put(self, dag_maker):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, "wb") as file:
file.write(test_local_file_content)
with dag_maker(dag_id="unit_tests_sftp_op_pickle_file_transfer_put", start_date=DEFAULT_DATE):
SFTPOperator( # Put test file to remote.
task_id="put_test_task",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
create_intermediate_dirs=True,
)
SSHOperator( # Check the remote file content.
task_id="check_file_task",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath}",
do_xcom_push=True,
)
tis = {ti.task_id: ti for ti in dag_maker.create_dagrun().task_instances}
tis["put_test_task"].run()
tis["check_file_task"].run()
pulled = tis["check_file_task"].xcom_pull(task_ids="check_file_task", key="return_value")
assert pulled.strip() == test_local_file_content
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Pickle support is removed in Airflow 3")
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_file_transfer_no_intermediate_dir_error_put(self, create_task_instance_of_operator):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, "wb") as file:
file.write(test_local_file_content)
# Try to put test file to remote. This should raise an error with
# "No such file" as the directory does not exist.
ti2 = create_task_instance_of_operator(
SFTPOperator,
dag_id="unit_tests_sftp_op_file_transfer_no_intermediate_dir_error_put",
task_id="test_sftp",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath_int_dir,
operation=SFTPOperation.PUT,
create_intermediate_dirs=False,
)
with (
pytest.raises(AirflowException) as ctx,
):
ti2.run()
assert "No such file" in str(ctx.value)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Pickle support is removed in Airflow 3")
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_file_transfer_with_intermediate_dir_put(self, dag_maker):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, "wb") as file:
file.write(test_local_file_content)
with dag_maker(dag_id="unit_tests_sftp_op_file_transfer_with_intermediate_dir_put"):
SFTPOperator( # Put test file to remote.
task_id="test_sftp",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath_int_dir,
operation=SFTPOperation.PUT,
create_intermediate_dirs=True,
)
SSHOperator( # Check the remote file content.
task_id="test_check_file",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath_int_dir}",
do_xcom_push=True,
)
dagrun = dag_maker.create_dagrun(logical_date=timezone.utcnow())
tis = {ti.task_id: ti for ti in dagrun.task_instances}
tis["test_sftp"].run()
tis["test_check_file"].run()
pulled = tis["test_check_file"].xcom_pull(task_ids="test_check_file", key="return_value")
assert pulled.strip() == test_local_file_content
@conf_vars({("core", "enable_xcom_pickling"): "False"})
def test_json_file_transfer_put(self, dag_maker):
test_local_file_content = (
b"This is local file content \n which is multiline "
b"continuing....with other character\nanother line here \n this is last line"
)
# create a test file locally
with open(self.test_local_filepath, "wb") as file:
file.write(test_local_file_content)
with dag_maker(dag_id="unit_tests_sftp_op_json_file_transfer_put"):
SFTPOperator( # Put test file to remote.
task_id="put_test_task",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
)
SSHOperator( # Check the remote file content.
task_id="check_file_task",
ssh_hook=self.hook,
command=f"cat {self.test_remote_filepath}",
do_xcom_push=True,
)
dagrun = dag_maker.create_dagrun(logical_date=timezone.utcnow())
tis = {ti.task_id: ti for ti in dagrun.task_instances}
tis["put_test_task"].run()
tis["check_file_task"].run()
pulled = tis["check_file_task"].xcom_pull(task_ids="check_file_task", key="return_value")
assert pulled.strip() == b64encode(test_local_file_content).decode("utf-8")
@pytest.fixture
def create_remote_file_and_cleanup(self):
with open(self.test_remote_filepath, "wb") as file:
file.write(self.test_remote_file_content)
yield
os.remove(self.test_remote_filepath)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Pickle support is removed in Airflow 3")
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_pickle_file_transfer_get(self, dag_maker, create_remote_file_and_cleanup):
with dag_maker(dag_id="unit_tests_sftp_op_pickle_file_transfer_get"):
SFTPOperator( # Get remote file to local.
task_id="test_sftp",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
)
for ti in dag_maker.create_dagrun(logical_date=timezone.utcnow()).task_instances:
ti.run()
# Test the received content.
with open(self.test_local_filepath, "rb") as file:
content_received = file.read()
assert content_received == self.test_remote_file_content
@conf_vars({("core", "enable_xcom_pickling"): "False"})
def test_json_file_transfer_get(self, dag_maker, create_remote_file_and_cleanup):
with dag_maker(dag_id="unit_tests_sftp_op_json_file_transfer_get"):
SFTPOperator( # Get remote file to local.
task_id="test_sftp",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
)
for ti in dag_maker.create_dagrun(logical_date=timezone.utcnow()).task_instances:
ti.run()
# Test the received content.
content_received = None
with open(self.test_local_filepath, "rb") as file:
content_received = file.read()
assert content_received == self.test_remote_file_content
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Pickle support is removed in Airflow 3")
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_file_transfer_no_intermediate_dir_error_get(self, dag_maker, create_remote_file_and_cleanup):
with dag_maker(dag_id="unit_tests_sftp_op_file_transfer_no_intermediate_dir_error_get"):
SFTPOperator( # Try to GET test file from remote.
task_id="test_sftp",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath_int_dir,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
)
for ti in dag_maker.create_dagrun(logical_date=timezone.utcnow()).task_instances:
# This should raise an error with "No such file" as the directory
# does not exist.
with (
pytest.raises(AirflowException) as ctx,
):
ti.run()
assert "No such file" in str(ctx.value)
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Pickle support is removed in Airflow 3")
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_file_transfer_with_intermediate_dir_error_get(self, dag_maker, create_remote_file_and_cleanup):
with dag_maker(dag_id="unit_tests_sftp_op_file_transfer_with_intermediate_dir_error_get"):
SFTPOperator( # Get remote file to local.
task_id="test_sftp",
sftp_hook=self.sftp_hook,
local_filepath=self.test_local_filepath_int_dir,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
create_intermediate_dirs=True,
)
for ti in dag_maker.create_dagrun(logical_date=timezone.utcnow()).task_instances:
ti.run()
# Test the received content.
content_received = None
with open(self.test_local_filepath_int_dir, "rb") as file:
content_received = file.read()
assert content_received == self.test_remote_file_content
@mock.patch.dict("os.environ", {"AIRFLOW_CONN_" + TEST_CONN_ID.upper(): "ssh://test_id@localhost"})
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_directory")
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_file")
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.create_directory")
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.store_file")
def test_arg_checking(self, mock_store_file, mock_create_dir, mock_retrieve_file, mock_retrieve_dir):
dag = DAG(
dag_id="unit_tests_sftp_op_arg_checking",
schedule=None,
default_args={"start_date": DEFAULT_DATE},
)
# Exception should be raised if ssh_conn_id is not provided
task_0 = SFTPOperator(
task_id="test_sftp_0",
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
with pytest.raises(AirflowException, match="Cannot operate without sftp_hook or ssh_conn_id."):
task_0.execute(None)
# use ssh_conn_id to create SSHHook
task_1 = SFTPOperator(
task_id="test_sftp_1",
ssh_conn_id=TEST_CONN_ID,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
with contextlib.suppress(Exception):
task_1.execute(None)
assert task_1.sftp_hook.ssh_conn_id == TEST_CONN_ID
task_2 = SFTPOperator(
task_id="test_sftp_2",
ssh_conn_id=TEST_CONN_ID,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
with contextlib.suppress(Exception):
task_2.execute(None)
assert task_2.sftp_hook.ssh_conn_id == TEST_CONN_ID
task_3 = SFTPOperator(
task_id="test_sftp_3",
ssh_conn_id=TEST_CONN_ID,
remote_host="remotehost",
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=dag,
)
with contextlib.suppress(Exception):
task_3.execute(None)
assert task_3.sftp_hook.remote_host == "remotehost"
def test_unequal_local_remote_file_paths(self):
with pytest.raises(ValueError, match="1 paths in local_filepath != 2 paths in remote_filepath"):
SFTPOperator(
task_id="test_sftp_unequal_paths",
local_filepath="/tmp/test",
remote_filepath=["/tmp/test1", "/tmp/test2"],
).execute(None)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_file")
def test_str_filepaths_get(self, mock_get):
local_filepath = "/tmp/test"
remote_filepath = "/tmp/remotetest"
SFTPOperator(
task_id="test_str_to_list",
sftp_hook=self.sftp_hook,
local_filepath=local_filepath,
remote_filepath=remote_filepath,
operation=SFTPOperation.GET,
).execute(None)
assert mock_get.call_count == 1
args, _ = mock_get.call_args_list[0]
assert args == (remote_filepath, local_filepath)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_file")
def test_multiple_paths_get(self, mock_get):
local_filepath = ["/tmp/ltest1", "/tmp/ltest2"]
remote_filepath = ["/tmp/rtest1", "/tmp/rtest2"]
sftp_op = SFTPOperator(
task_id="test_multiple_paths_get",
sftp_hook=self.sftp_hook,
local_filepath=local_filepath,
remote_filepath=remote_filepath,
operation=SFTPOperation.GET,
)
sftp_op.execute(None)
assert mock_get.call_count == 2
args0, _ = mock_get.call_args_list[0]
args1, _ = mock_get.call_args_list[1]
assert args0 == (remote_filepath[0], local_filepath[0])
assert args1 == (remote_filepath[1], local_filepath[1])
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_directory")
def test_str_dirpaths_get(self, mock_get):
local_dirpath = "/tmp_local"
remote_dirpath = "/tmp"
SFTPOperator(
task_id="test_str_to_list",
sftp_hook=self.sftp_hook,
local_filepath=local_dirpath,
remote_filepath=remote_dirpath,
operation=SFTPOperation.GET,
).execute(None)
assert mock_get.call_count == 1
args, _ = mock_get.call_args_list[0]
assert args == (remote_dirpath, local_dirpath)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_directory_concurrently")
def test_str_dirpaths_get_concurrently(self, mock_get):
local_dirpath = "/tmp_local"
remote_dirpath = "/tmp"
SFTPOperator(
task_id="test_str_to_list",
sftp_hook=self.sftp_hook,
local_filepath=local_dirpath,
remote_filepath=remote_dirpath,
operation=SFTPOperation.GET,
concurrency=2,
).execute(None)
assert mock_get.call_count == 1
assert mock_get.call_args == mock.call(remote_dirpath, local_dirpath, workers=2, prefetch=True)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.store_file")
def test_str_filepaths_put(self, mock_get):
local_filepath = "/tmp/test"
remote_filepath = "/tmp/remotetest"
SFTPOperator(
task_id="test_str_to_list",
sftp_hook=self.sftp_hook,
local_filepath=local_filepath,
remote_filepath=remote_filepath,
operation=SFTPOperation.PUT,
).execute(None)
assert mock_get.call_count == 1
args, _ = mock_get.call_args_list[0]
assert args == (remote_filepath, local_filepath)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.store_file")
def test_multiple_paths_put(self, mock_put):
local_filepath = ["/tmp/ltest1", "/tmp/ltest2"]
remote_filepath = ["/tmp/rtest1", "/tmp/rtest2"]
sftp_op = SFTPOperator(
task_id="test_multiple_paths_get",
sftp_hook=self.sftp_hook,
local_filepath=local_filepath,
remote_filepath=remote_filepath,
operation=SFTPOperation.PUT,
)
sftp_op.execute(None)
assert mock_put.call_count == 2
args0, _ = mock_put.call_args_list[0]
args1, _ = mock_put.call_args_list[1]
assert args0 == (remote_filepath[0], local_filepath[0])
assert args1 == (remote_filepath[1], local_filepath[1])
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.store_directory")
def test_str_dirpaths_put(self, mock_get):
local_dirpath = "/tmp"
remote_dirpath = "/tmp_remote"
SFTPOperator(
task_id="test_str_dirpaths_put",
sftp_hook=self.sftp_hook,
local_filepath=local_dirpath,
remote_filepath=remote_dirpath,
operation=SFTPOperation.PUT,
).execute(None)
assert mock_get.call_count == 1
args, _ = mock_get.call_args_list[0]
assert args == (remote_dirpath, local_dirpath)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.store_directory_concurrently")
def test_str_dirpaths_put_concurrently(self, mock_get):
local_dirpath = "/tmp"
remote_dirpath = "/tmp_remote"
SFTPOperator(
task_id="test_str_dirpaths_put",
sftp_hook=self.sftp_hook,
local_filepath=local_dirpath,
remote_filepath=remote_dirpath,
operation=SFTPOperation.PUT,
concurrency=2,
).execute(None)
assert mock_get.call_count == 1
args, _ = mock_get.call_args_list[0]
assert args == (remote_dirpath, local_dirpath)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.retrieve_file")
def test_return_str_when_local_filepath_was_str(self, mock_get):
local_filepath = "/tmp/ltest1"
remote_filepath = "/tmp/rtest1"
sftp_op = SFTPOperator(
task_id="test_returns_str",
sftp_hook=self.sftp_hook,
local_filepath=local_filepath,
remote_filepath=remote_filepath,
operation=SFTPOperation.GET,
)
return_value = sftp_op.execute(None)
assert isinstance(return_value, str)
assert return_value == local_filepath
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.delete_file")
def test_str_filepaths_delete(self, mock_delete):
remote_filepath = "/tmp/test"
SFTPOperator(
task_id="test_str_filepaths_delete",
sftp_hook=self.sftp_hook,
remote_filepath=remote_filepath,
operation=SFTPOperation.DELETE,
).execute(None)
assert mock_delete.call_count == 1
args, _ = mock_delete.call_args_list[0]
assert args == (remote_filepath,)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.delete_file")
def test_multiple_filepaths_delete(self, mock_delete):
remote_filepath = ["/tmp/rtest1", "/tmp/rtest2"]
SFTPOperator(
task_id="test_multiple_filepaths_delete",
sftp_hook=self.sftp_hook,
remote_filepath=remote_filepath,
operation=SFTPOperation.DELETE,
).execute(None)
assert mock_delete.call_count == 2
args0, _ = mock_delete.call_args_list[0]
args1, _ = mock_delete.call_args_list[1]
assert args0 == (remote_filepath[0],)
assert args1 == (remote_filepath[1],)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.delete_directory")
def test_str_dirpaths_delete(self, mock_delete):
remote_filepath = "/tmp"
SFTPOperator(
task_id="test_str_dirpaths_delete",
sftp_hook=self.sftp_hook,
remote_filepath=remote_filepath,
operation=SFTPOperation.DELETE,
).execute(None)
assert mock_delete.call_count == 1
args, _ = mock_delete.call_args_list[0]
assert args == (remote_filepath,)
@mock.patch("airflow.providers.sftp.operators.sftp.SFTPHook.delete_file")
def test_local_filepath_exists_error_delete(self, mock_delete):
local_filepath = "/tmp"
remote_filepath = "/tmp_remote"
with pytest.raises(ValueError, match="local_filepath should not be provided for delete operation"):
SFTPOperator(
task_id="test_local_filepath_exists_error_delete",
sftp_hook=self.sftp_hook,
local_filepath=local_filepath,
remote_filepath=remote_filepath,
operation=SFTPOperation.DELETE,
).execute(None)
@pytest.mark.parametrize(
("operation", "expected"),
TEST_GET_PUT_PARAMS,
)
@mock.patch("airflow.providers.ssh.hooks.ssh.SSHHook.get_conn", spec=paramiko.SSHClient)
@mock.patch("airflow.providers.ssh.hooks.ssh.SSHHook.get_connection", spec=Connection)
def test_extract_ssh_conn_id(self, get_connection, get_conn, operation, expected):
get_connection.return_value = Connection(
conn_id="sftp_conn_id",
conn_type="sftp",
host="remotehost",
port=22,
)
dag_id = "sftp_dag"
task_id = "sftp_task"
task = SFTPOperator(
task_id=task_id,
ssh_conn_id="sftp_conn_id",
dag=DAG(dag_id, schedule=None),
start_date=timezone.utcnow(),
local_filepath="/path/local",
remote_filepath="/path/remote",
operation=operation,
)
lineage = task.get_openlineage_facets_on_start()
assert lineage.inputs == expected[0]
assert lineage.outputs == expected[1]
@pytest.mark.parametrize(
("operation", "expected"),
TEST_GET_PUT_PARAMS,
)
@mock.patch("airflow.providers.ssh.hooks.ssh.SSHHook.get_conn", spec=paramiko.SSHClient)
@mock.patch("airflow.providers.ssh.hooks.ssh.SSHHook.get_connection", spec=Connection)
def test_extract_sftp_hook(self, get_connection, get_conn, operation, expected):
get_connection.return_value = Connection(
conn_id="sftp_conn_id",
conn_type="sftp",
host="remotehost",
port=22,
)
dag_id = "sftp_dag"
task_id = "sftp_task"
task = SFTPOperator(
task_id=task_id,
sftp_hook=SFTPHook(ssh_conn_id="sftp_conn_id"),
dag=DAG(dag_id, schedule=None),
start_date=timezone.utcnow(),
local_filepath="/path/local",
remote_filepath="/path/remote",
operation=operation,
)
lineage = task.get_openlineage_facets_on_start()
assert lineage.inputs == expected[0]
assert lineage.outputs == expected[1]
|
TestSFTPOperator
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 974446,
"end": 974842
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("SponsorsActivity", graphql_name="node")
"""The item at the end of the edge."""
|
SponsorsActivityEdge
|
python
|
pypa__hatch
|
tests/cli/version/test_version.py
|
{
"start": 174,
"end": 11485
}
|
class ____:
def test_random_directory(self, hatch, temp_dir, helpers):
with temp_dir.as_cwd():
result = hatch("version")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
No project detected
"""
)
def test_configured_project(self, hatch, temp_dir, helpers, config_file):
project = "foo"
config_file.model.mode = "project"
config_file.model.project = project
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
with temp_dir.as_cwd():
result = hatch("version")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
Project foo (not a project)
"""
)
def test_other_backend_show(hatch, temp_dir, helpers):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
(path / "src" / "my_app" / "__init__.py").write_text('__version__ = "9000.42"')
project = Project(path)
config = dict(project.raw_config)
config["build-system"]["requires"] = ["flit-core"]
config["build-system"]["build-backend"] = "flit_core.buildapi"
del config["project"]["license"]
project.save_config(config)
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Creating environment: hatch-build
Checking dependencies
Syncing dependencies
Inspecting build dependencies
9000.42
"""
)
def test_other_backend_set(hatch, temp_dir, helpers):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(path)
config = dict(project.raw_config)
config["build-system"]["requires"] = ["flit-core"]
config["build-system"]["build-backend"] = "flit_core.buildapi"
del config["project"]["license"]
project.save_config(config)
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version", "1.0.0")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
The version can only be set when Hatchling is the build backend
"""
)
def test_incompatible_environment(hatch, temp_dir, helpers, build_env_config):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(path)
config = dict(project.raw_config)
config["build-system"]["requires"].append("foo")
project.save_config(config)
helpers.update_project_environment(project, "hatch-build", {"python": "9000", **build_env_config})
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
Environment `hatch-build` is incompatible: cannot locate Python: 9000
"""
)
@pytest.mark.usefixtures("mock_backend_process")
def test_show_dynamic(hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Creating environment: hatch-build
Checking dependencies
Syncing dependencies
Inspecting build dependencies
0.0.1
"""
)
@pytest.mark.usefixtures("mock_backend_process")
def test_plugin_dependencies_unmet(hatch, helpers, temp_dir, mock_plugin_installation):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
dependency = os.urandom(16).hex()
(path / DEFAULT_CONFIG_FILE).write_text(
helpers.dedent(
f"""
[env]
requires = ["{dependency}"]
"""
)
)
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Syncing environment plugin requirements
Creating environment: hatch-build
Checking dependencies
Syncing dependencies
Inspecting build dependencies
0.0.1
"""
)
helpers.assert_plugin_installation(mock_plugin_installation, [dependency])
@pytest.mark.requires_internet
@pytest.mark.usefixtures("mock_backend_process")
def test_no_compatibility_check_if_exists(hatch, helpers, temp_dir, mocker):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
project_path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(project_path)
config = dict(project.raw_config)
config["build-system"]["requires"].append("binary")
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Creating environment: hatch-build
Checking dependencies
Syncing dependencies
Inspecting build dependencies
0.0.1
"""
)
mocker.patch("hatch.env.virtual.VirtualEnvironment.check_compatibility", side_effect=Exception("incompatible"))
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Inspecting build dependencies
0.0.1
"""
)
@pytest.mark.usefixtures("mock_backend_process")
def test_set_dynamic(hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version", "minor,rc")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Creating environment: hatch-build
Checking dependencies
Syncing dependencies
Inspecting build dependencies
Old: 0.0.1
New: 0.1.0rc0
"""
)
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Inspecting build dependencies
0.1.0rc0
"""
)
@pytest.mark.usefixtures("mock_backend_process")
def test_set_dynamic_downgrade(hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
(path / "src" / "my_app" / "__about__.py").write_text('__version__ = "21.1.2"')
# This one fails, because it's a downgrade without --force
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version", "21.1.0", catch_exceptions=True)
assert result.exit_code == 1, result.output
assert str(result.exception) == "Version `21.1.0` is not higher than the original version `21.1.2`"
# Try again, this time with --force
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version", "--force", "21.1.0")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Inspecting build dependencies
Old: 21.1.2
New: 21.1.0
"""
)
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
"""
Inspecting build dependencies
21.1.0
"""
)
def test_show_static(hatch, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
project = Project(path)
config = dict(project.raw_config)
config["project"]["version"] = "1.2.3"
config["project"]["dynamic"].remove("version")
config["tool"]["hatch"]["metadata"] = {"hooks": {"foo": {}}}
project.save_config(config)
with path.as_cwd():
result = hatch("version")
assert result.exit_code == 0, result.output
assert result.output == "1.2.3\n"
def test_set_static(hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
project = Project(path)
config = dict(project.raw_config)
config["project"]["version"] = "1.2.3"
config["project"]["dynamic"].remove("version")
project.save_config(config)
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("version", "minor,rc")
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
Cannot set version when it is statically defined by the `project.version` field
"""
)
@pytest.mark.usefixtures("mock_backend_process")
def test_verbose_output_to_stderr(hatch, temp_dir):
"""Test that verbose output (command display and status messages) goes to stderr, not stdout."""
project_name = "My.App"
with temp_dir.as_cwd():
hatch("new", project_name)
path = temp_dir / "my-app"
data_path = temp_dir / "data"
data_path.mkdir()
# Run with verbose flag (-v) and separate stderr from stdout
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch("-v", "version")
assert result.exit_code == 0, result.output
# The actual version should be in stdout
assert result.stdout == "0.0.1\n"
# Verbose output should be in stderr
assert "Inspecting build dependencies" in result.stderr
assert "cmd [1] | python -u -m hatchling version" in result.stderr
# These should NOT be in stdout
assert "Inspecting build dependencies" not in result.stdout
assert "cmd [1]" not in result.stdout
|
TestNoProject
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_primitive.py
|
{
"start": 13590,
"end": 15148
}
|
class ____:
def test_valid(self) -> None:
prop = bcpp.String()
assert prop.is_valid("")
assert prop.is_valid("6")
def test_invalid(self) -> None:
prop = bcpp.String()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid(b"")
assert not prop.is_valid(b"some")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpp.String()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpp.String()
assert str(prop) == "String"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpp, ALL)
|
Test_String
|
python
|
Netflix__metaflow
|
metaflow/plugins/cards/card_modules/basic.py
|
{
"start": 2990,
"end": 3205
}
|
class ____(MetaflowCardComponent):
type = "subtitle"
def __init__(self, text=None):
self._text = text
def render(self):
return dict(type=self.type, text=str(self._text))
|
SubTitleComponent
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_flow_run_states.py
|
{
"start": 99,
"end": 961
}
|
class ____:
async def test_read_flow_run_state(self, flow_run, client, session):
# create a flow run state to read
result = await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run.id,
state=schemas.states.Running(),
)
await session.commit()
# make sure we can read the state
flow_run_state_id = result.state.id
response = await client.get(f"/flow_run_states/{flow_run_state_id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == str(flow_run_state_id)
async def test_read_flow_run_state_returns_404_if_does_not_exist(self, client):
response = await client.get(f"/flow_run_states/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
|
TestReadFlowRunStateById
|
python
|
apache__airflow
|
providers/opensearch/tests/unit/opensearch/log/test_os_json_formatter.py
|
{
"start": 1031,
"end": 3305
}
|
class ____:
JSON_FIELDS = ["asctime", "filename", "lineno", "levelname", "message", "exc_text"]
EXTRA_FIELDS = {
"dag_id": "dag1",
"task_id": "task1",
"execution_date": "2023-11-17",
"try_number": "1",
"log_id": "Some_log_id",
}
@pytest.fixture
def os_json_formatter(self):
return OpensearchJSONFormatter()
@pytest.fixture
def log_record(self):
return logging.LogRecord(
name="test",
level=logging.INFO,
pathname="test_file.txt",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
def test_format_log_record(self, os_json_formatter, log_record):
"""Test the log record formatting."""
os_json_formatter.json_fields = self.JSON_FIELDS
formatted = os_json_formatter.format(log_record)
data = json.loads(formatted)
assert all(key in self.JSON_FIELDS for key in data.keys())
assert data["filename"] == "test_file.txt"
assert data["lineno"] == 1
assert data["levelname"] == "INFO"
assert data["message"] == "Test message"
def test_formattime_in_iso8601_format(self, os_json_formatter, log_record):
os_json_formatter.json_fields = ["asctime"]
iso8601_format = os_json_formatter.formatTime(log_record)
try:
pendulum.parse(iso8601_format, strict=True)
except ValueError:
raise Exception("Time is not in ISO8601 format")
def test_extra_fields(self, os_json_formatter, log_record):
os_json_formatter.json_fields = self.JSON_FIELDS
os_json_formatter.extras = self.EXTRA_FIELDS
formatted = os_json_formatter.format(log_record)
data = json.loads(formatted)
assert all((key in self.JSON_FIELDS or key in self.EXTRA_FIELDS) for key in data.keys())
assert data["filename"] == "test_file.txt"
assert data["lineno"] == 1
assert data["levelname"] == "INFO"
assert data["dag_id"] == "dag1"
assert data["task_id"] == "task1"
assert data["execution_date"] == "2023-11-17"
assert data["try_number"] == "1"
assert data["log_id"] == "Some_log_id"
|
TestOpensearchJSONFormatter
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_complex.py
|
{
"start": 2459,
"end": 4588
}
|
class ____(CUDATestCase):
def basic_values(self):
reals = [-0.0, +0.0, 1, -1, +1.5, -3.5,
float('-inf'), float('+inf'), float('nan')]
return [complex(x, y) for x, y in itertools.product(reals, reals)]
def more_values(self):
reals = [0.0, +0.0, 1, -1, -math.pi, +math.pi,
float('-inf'), float('+inf'), float('nan')]
return [complex(x, y) for x, y in itertools.product(reals, reals)]
def non_nan_values(self):
reals = [-0.0, +0.0, 1, -1, -math.pi, +math.pi,
float('inf'), float('-inf')]
return [complex(x, y) for x, y in itertools.product(reals, reals)]
def run_func(self, pyfunc, sigs, values, ulps=1, ignore_sign_on_zero=False):
for sig in sigs:
if isinstance(sig, types.Type):
sig = sig,
if isinstance(sig, tuple):
# Assume return type is the type of first argument
sig = sig[0](*sig)
prec = ('single'
if sig.args[0] in (types.float32, types.complex64)
else 'double')
cudafunc = compile_scalar_func(pyfunc, sig.args, sig.return_type)
ok_values = []
expected_list = []
for args in values:
if not isinstance(args, (list, tuple)):
args = args,
try:
expected_list.append(pyfunc(*args))
ok_values.append(args)
except ValueError as e:
self.assertIn("math domain error", str(e))
continue
got_list = cudafunc(ok_values)
for got, expected, args in zip(got_list, expected_list, ok_values):
msg = 'for input %r with prec %r' % (args, prec)
self.assertPreciseEqual(got, expected, prec=prec,
ulps=ulps,
ignore_sign_on_zero=ignore_sign_on_zero,
msg=msg)
run_unary = run_func
run_binary = run_func
|
BaseComplexTest
|
python
|
plotly__plotly.py
|
plotly/graph_objs/surface/contours/z/_project.py
|
{
"start": 233,
"end": 5178
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "surface.contours.z"
_path_str = "surface.contours.z.project"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
Determines whether or not these contour lines are projected on
the x plane. If `highlight` is set to True (the default), the
projected lines are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
The 'x' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Determines whether or not these contour lines are projected on
the y plane. If `highlight` is set to True (the default), the
projected lines are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
The 'y' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
Determines whether or not these contour lines are projected on
the z plane. If `highlight` is set to True (the default), the
projected lines are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
The 'z' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Project object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.contours.z.Project`
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to True
(the default), the projected lines are shown on hover.
If `show` is set to True, the projected lines are shown
in permanence.
Returns
-------
Project
"""
super().__init__("project")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.surface.contours.z.Project
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.contours.z.Project`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Project
|
python
|
has2k1__plotnine
|
plotnine/geoms/geom_density_2d.py
|
{
"start": 77,
"end": 494
}
|
class ____(geom_path):
"""
2D density estimate
{usage}
This is a 2d version of [](`~plotnine.geoms.geom_density`).
Parameters
----------
{common_parameters}
See Also
--------
plotnine.stat_density_2d : The default `stat` for this `geom`.
"""
DEFAULT_PARAMS = {
"stat": "density_2d",
"position": "identity",
"na_rm": False,
}
|
geom_density_2d
|
python
|
huggingface__transformers
|
src/transformers/modeling_outputs.py
|
{
"start": 10833,
"end": 14126
}
|
class ____(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) after further processing
through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
the classification token after processing through a linear layer and a tanh activation function. The linear
layer weights are trained from the next sentence prediction (classification) objective during pretraining.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
past_key_values: Optional[Cache] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
|
BaseModelOutputWithPoolingAndCrossAttentions
|
python
|
viewflow__viewflow
|
viewflow/workflow/nodes/handle.py
|
{
"start": 1461,
"end": 2808
}
|
class ____(mixins.NextNodeMixin, Node):
"""
Task to be executed outside of the flow.
"""
task_type = "FUNCTION"
activation_class = HandleActivation
shape = {
"width": 150,
"height": 100,
"text-align": "middle",
"svg": """
<rect class="task" width="150" height="100" rx="5" ry="5"/>
<path class="task-marker"
d="m 15,20 c 10,-6 -8,-8 3,-15 l -9,0 c -11,7 7,9 -3,15 z
m -7,-12 l 5,0 m -4.5,3 l 4.5,0 m -3,3 l 5,0 m -4,3 l 5,0"/>
""",
}
bpmn_element = "scriptTask"
def __init__(self, func=None, undo_func=None, **kwargs):
super().__init__(**kwargs)
self._func = func
self._undo_func = undo_func
def _create_wrapper_function(self, origin_func, task):
def func(**kwargs):
with self.flow_class.lock(task.process.pk):
task.refresh_from_db()
activation = self.activation_class(task)
result = activation.run(origin_func, **kwargs)
activation.execute()
return result
return func
def run(self, task, **kwargs):
func = this.resolve(self.flow_class.instance, self._func)
wrapper = self._create_wrapper_function(func, task)
return wrapper(**kwargs)
|
Handle
|
python
|
great-expectations__great_expectations
|
great_expectations/exceptions/exceptions.py
|
{
"start": 5201,
"end": 5358
}
|
class ____(DataContextError):
def __init__(self, message) -> None:
self.message = message
super().__init__(self.message)
|
InvalidConfigError
|
python
|
kamyu104__LeetCode-Solutions
|
Python/create-binary-tree-from-descriptions.py
|
{
"start": 36,
"end": 611
}
|
class ____(object):
def createBinaryTree(self, descriptions):
"""
:type descriptions: List[List[int]]
:rtype: Optional[TreeNode]
"""
nodes = {}
children = set()
for p, c, l in descriptions:
parent = nodes.setdefault(p, TreeNode(p))
child = nodes.setdefault(c, TreeNode(c))
if l:
parent.left = child
else:
parent.right = child
children.add(c)
return nodes[next(p for p in nodes.iterkeys() if p not in children)]
|
Solution
|
python
|
django__django
|
tests/signals/models.py
|
{
"start": 536,
"end": 701
}
|
class ____(models.Model):
name = models.CharField(max_length=20)
authors = models.ManyToManyField(Author)
def __str__(self):
return self.name
|
Book
|
python
|
airbytehq__airbyte
|
airbyte-integrations/bases/connector-acceptance-test/unit_tests/test_config.py
|
{
"start": 198,
"end": 9374
}
|
class ____:
@pytest.mark.parametrize(
"raw_config, expected_output_config, expected_error",
[
pytest.param(
{"connector_image": "foo", "tests": {"spec": [{"spec_path": "my-spec-path"}]}},
config.Config(
connector_image="foo",
acceptance_tests=config.AcceptanceTestConfigurations(
spec=config.GenericTestConfig(tests=[config.SpecTestConfig(spec_path="my-spec-path")])
),
),
does_not_raise(),
id="Legacy config should be parsed without error.",
),
pytest.param(
{"connector_image": "foo", "acceptance_tests": {}, "test_strictness_level": "extra-low"},
None,
pytest.raises(ValidationError),
id="Invalid test mode: ValidationError",
),
pytest.param(
{"connector_image": "foo", "acceptance_tests": {}, "test_strictness_level": "low"},
config.Config(
connector_image="foo",
test_strictness_level=config.Config.TestStrictnessLevel.low,
acceptance_tests=config.AcceptanceTestConfigurations(),
),
does_not_raise(),
id="Valid test mode: low",
),
pytest.param(
{"connector_image": "foo", "acceptance_tests": {}, "test_strictness_level": "high"},
config.Config(
connector_image="foo",
test_strictness_level=config.Config.TestStrictnessLevel.high,
acceptance_tests=config.AcceptanceTestConfigurations(),
),
does_not_raise(),
id="Valid test mode: high",
),
pytest.param(
{
"connector_image": "foo",
"acceptance_tests": {
"spec": {"bypass_reason": "My good reason to bypass"},
},
},
config.Config(
connector_image="foo",
acceptance_tests=config.AcceptanceTestConfigurations(
spec=config.GenericTestConfig(bypass_reason="My good reason to bypass")
),
),
does_not_raise(),
id="A test can only have a bypass reason.",
),
pytest.param(
{
"connector_image": "foo",
"acceptance_tests": {
"spec": {"bypass_reason": "My good reason to bypass"},
},
},
config.Config(
connector_image="foo",
acceptance_tests=config.AcceptanceTestConfigurations(
spec=config.GenericTestConfig(bypass_reason="My good reason to bypass")
),
),
does_not_raise(),
id="A test can only have a test configuration.",
),
pytest.param(
{
"connector_image": "foo",
"acceptance_tests": {
"spec": {"tests": [{"spec_path": "my-spec-path"}], "bypass_reason": "I'm not bypassing"},
},
},
None,
pytest.raises(ValidationError),
id="A test can't have a bypass reason and a test configuration.",
),
],
)
def test_config_parsing(self, raw_config, expected_output_config, expected_error):
with expected_error:
parsed_config = config.Config.parse_obj(raw_config)
assert parsed_config == expected_output_config
@pytest.mark.parametrize(
"legacy_config, expected_parsed_config",
[
pytest.param(
{
"connector_image": "airbyte/source-pokeapi",
"tests": {
"connection": [
{"config_path": "integration_tests/config.json", "status": "succeed"},
{"config_path": "integration_tests/bad_config.json", "status": "failed"},
],
"discovery": [{"config_path": "integration_tests/config.json"}],
"basic_read": [
{
"config_path": "integration_tests/config.json",
"configured_catalog_path": "integration_tests/configured_catalog.json",
}
],
},
},
config.Config(
connector_image="airbyte/source-pokeapi",
test_strictness_level=config.Config.TestStrictnessLevel.low,
acceptance_tests=config.AcceptanceTestConfigurations(
connection=config.GenericTestConfig(
tests=[
config.ConnectionTestConfig(
config_path="integration_tests/config.json", status=config.ConnectionTestConfig.Status.Succeed
),
config.ConnectionTestConfig(
config_path="integration_tests/bad_config.json", status=config.ConnectionTestConfig.Status.Failed
),
]
),
discovery=config.GenericTestConfig(tests=[config.DiscoveryTestConfig(config_path="integration_tests/config.json")]),
basic_read=config.GenericTestConfig(
tests=[
config.BasicReadTestConfig(
config_path="integration_tests/config.json",
configured_catalog_path="integration_tests/configured_catalog.json",
)
]
),
),
),
id="A legacy raw config is parsed into a new config structure without error.",
),
pytest.param(
{
"connector_image": "airbyte/source-pokeapi",
"test_strictness_level": "high",
"tests": {
"connection": [
{"config_path": "integration_tests/config.json", "status": "succeed"},
{"config_path": "integration_tests/bad_config.json", "status": "failed"},
],
"discovery": [{"config_path": "integration_tests/config.json"}],
"basic_read": [
{
"config_path": "integration_tests/config.json",
"configured_catalog_path": "integration_tests/configured_catalog.json",
}
],
},
},
config.Config(
connector_image="airbyte/source-pokeapi",
test_strictness_level=config.Config.TestStrictnessLevel.high,
acceptance_tests=config.AcceptanceTestConfigurations(
connection=config.GenericTestConfig(
tests=[
config.ConnectionTestConfig(
config_path="integration_tests/config.json", status=config.ConnectionTestConfig.Status.Succeed
),
config.ConnectionTestConfig(
config_path="integration_tests/bad_config.json", status=config.ConnectionTestConfig.Status.Failed
),
]
),
discovery=config.GenericTestConfig(tests=[config.DiscoveryTestConfig(config_path="integration_tests/config.json")]),
basic_read=config.GenericTestConfig(
tests=[
config.BasicReadTestConfig(
config_path="integration_tests/config.json",
configured_catalog_path="integration_tests/configured_catalog.json",
)
]
),
),
),
id="A legacy raw config, with a test_strictness_level defined, is parsed into a new config structure without error.",
),
],
)
def test_legacy_config_migration(self, legacy_config, expected_parsed_config):
assert config.Config.is_legacy(legacy_config)
assert config.Config.parse_obj(legacy_config) == expected_parsed_config
|
TestConfig
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_grpc/types.py
|
{
"start": 11366,
"end": 13775
}
|
class ____(
NamedTuple(
"_ListRepositoriesResponse",
[
("repository_symbols", Sequence[LoadableRepositorySymbol]),
("executable_path", Optional[str]),
("repository_code_pointer_dict", Mapping[str, CodePointer]),
("entry_point", Optional[Sequence[str]]),
("container_image", Optional[str]),
("container_context", Optional[Mapping[str, Any]]),
("dagster_library_versions", Optional[Mapping[str, str]]),
("defs_state_info", Optional[DefsStateInfo]),
],
)
):
def __new__(
cls,
repository_symbols: Sequence[LoadableRepositorySymbol],
executable_path: Optional[str] = None,
repository_code_pointer_dict: Optional[Mapping[str, CodePointer]] = None,
entry_point: Optional[Sequence[str]] = None,
container_image: Optional[str] = None,
container_context: Optional[Mapping] = None,
dagster_library_versions: Optional[Mapping[str, str]] = None,
defs_state_info: Optional[DefsStateInfo] = None,
):
return super().__new__(
cls,
repository_symbols=check.sequence_param(
repository_symbols, "repository_symbols", of_type=LoadableRepositorySymbol
),
executable_path=check.opt_str_param(executable_path, "executable_path"),
repository_code_pointer_dict=check.opt_mapping_param(
repository_code_pointer_dict,
"repository_code_pointer_dict",
key_type=str,
value_type=CodePointer,
),
entry_point=(
check.sequence_param(entry_point, "entry_point", of_type=str)
if entry_point is not None
else None
),
container_image=check.opt_str_param(container_image, "container_image"),
container_context=(
check.dict_param(container_context, "container_context")
if container_context is not None
else None
),
dagster_library_versions=check.opt_nullable_mapping_param(
dagster_library_versions, "dagster_library_versions"
),
defs_state_info=check.opt_inst_param(defs_state_info, "defs_state_info", DefsStateInfo),
)
@whitelist_for_serdes
|
ListRepositoriesResponse
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-sub-islands.py
|
{
"start": 33,
"end": 780
}
|
class ____(object):
def countSubIslands(self, grid1, grid2):
"""
:type grid1: List[List[int]]
:type grid2: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def dfs(grid1, grid2, i, j):
if not (0 <= i < len(grid2) and
0 <= j < len(grid2[0]) and
grid2[i][j] == 1):
return 1
grid2[i][j] = 0
result = grid1[i][j]
for di, dj in directions:
result &= dfs(grid1, grid2, i+di, j+dj)
return result
return sum(dfs(grid1, grid2, i, j) for i in xrange(len(grid2)) for j in xrange(len(grid2[0])) if grid2[i][j])
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-kvdb/destination_kvdb/client.py
|
{
"start": 143,
"end": 2729
}
|
class ____:
base_url = "https://kvdb.io"
PAGE_SIZE = 1000
def __init__(self, bucket_id: str, secret_key: str = None):
self.secret_key = secret_key
self.bucket_id = bucket_id
def write(self, key: str, value: Mapping[str, Any]):
return self.batch_write([(key, value)])
def batch_write(self, keys_and_values: List[Tuple[str, Mapping[str, Any]]]):
"""
https://kvdb.io/docs/api/#execute-transaction
"""
request_body = {"txn": [{"set": key, "value": value} for key, value in keys_and_values]}
return self._request("POST", json=request_body)
def list_keys(self, list_values: bool = False, prefix: str = None) -> Iterable[Union[str, List]]:
"""
https://kvdb.io/docs/api/#list-keys
"""
# TODO handle rate limiting
pagination_complete = False
offset = 0
while not pagination_complete:
response = self._request(
"GET",
params={
"limit": self.PAGE_SIZE,
"skip": offset,
"format": "json",
"prefix": prefix or "",
"values": "true" if list_values else "false",
},
endpoint="/", # the "list" endpoint doesn't work without adding a trailing slash to the URL
)
response_json = response.json()
yield from response_json
pagination_complete = len(response_json) < self.PAGE_SIZE
offset += self.PAGE_SIZE
def delete(self, key: Union[str, List[str]]):
"""
https://kvdb.io/docs/api/#execute-transaction
"""
key_list = key if isinstance(key, List) else [key]
request_body = {"txn": [{"delete": k} for k in key_list]}
return self._request("POST", json=request_body)
def _get_base_url(self) -> str:
return f"{self.base_url}/{self.bucket_id}"
def _get_auth_headers(self) -> Mapping[str, Any]:
return {"Authorization": f"Bearer {self.secret_key}"} if self.secret_key else {}
def _request(
self, http_method: str, endpoint: str = None, params: Mapping[str, Any] = None, json: Mapping[str, Any] = None
) -> requests.Response:
url = self._get_base_url() + (endpoint or "")
headers = {"Accept": "application/json", **self._get_auth_headers()}
response = requests.request(method=http_method, params=params, url=url, headers=headers, json=json)
response.raise_for_status()
return response
|
KvDbClient
|
python
|
sphinx-doc__sphinx
|
sphinx/builders/linkcheck.py
|
{
"start": 12082,
"end": 26528
}
|
class ____(Thread):
"""A worker class for checking the availability of hyperlinks."""
def __init__(
self,
config: Config,
rqueue: Queue[CheckResult],
wqueue: Queue[CheckRequest],
rate_limits: dict[str, RateLimit],
) -> None:
self.rate_limits = rate_limits
self.rqueue = rqueue
self.wqueue = wqueue
self.anchors_ignore: list[re.Pattern[str]] = list(
map(re.compile, config.linkcheck_anchors_ignore)
)
self.anchors_ignore_for_url: list[re.Pattern[str]] = list(
map(re.compile, config.linkcheck_anchors_ignore_for_url)
)
self.documents_exclude: list[re.Pattern[str]] = list(
map(re.compile, config.linkcheck_exclude_documents)
)
self.ignore_case: Sequence[re.Pattern[str]] = tuple(
map(re.compile, config.linkcheck_case_insensitive_urls)
)
self.auth = [
(re.compile(pattern), auth_info)
for pattern, auth_info in config.linkcheck_auth
]
self.timeout: int | float | None = config.linkcheck_timeout
self.request_headers: dict[str, dict[str, str]] = (
config.linkcheck_request_headers
)
self.check_anchors: bool = config.linkcheck_anchors
self.allowed_redirects: dict[re.Pattern[str], re.Pattern[str]]
self.allowed_redirects = config.linkcheck_allowed_redirects
self.retries: int = config.linkcheck_retries
self.rate_limit_timeout = config.linkcheck_rate_limit_timeout
self._allow_unauthorized = config.linkcheck_allow_unauthorized
self._timeout_status: Literal[_Status.BROKEN, _Status.TIMEOUT]
if config.linkcheck_report_timeouts_as_broken:
self._timeout_status = _Status.BROKEN
else:
self._timeout_status = _Status.TIMEOUT
self.user_agent = config.user_agent
self.tls_verify = config.tls_verify
self.tls_cacerts = config.tls_cacerts
self._session = requests._Session(
_ignored_redirects=tuple(map(re.compile, config.linkcheck_ignore))
)
super().__init__(daemon=True)
def run(self) -> None:
while True:
next_check, hyperlink = self.wqueue.get()
if hyperlink is None:
# An empty hyperlink is a signal to shutdown the worker; cleanup resources here
self._session.close()
break
uri, docname, _docpath, lineno = hyperlink
if uri is None:
break
netloc = urlsplit(uri).netloc
with contextlib.suppress(KeyError):
# Refresh rate limit.
# When there are many links in the queue, workers are all stuck waiting
# for responses, but the builder keeps queuing. Links in the queue may
# have been queued before rate limits were discovered.
next_check = self.rate_limits[netloc].next_check
if next_check > time.time():
# Sleep before putting message back in the queue to avoid
# waking up other threads.
time.sleep(QUEUE_POLL_SECS)
self.wqueue.put(CheckRequest(next_check, hyperlink), False)
self.wqueue.task_done()
continue
status, info, code = self._check(docname, uri, hyperlink)
if status == _Status.RATE_LIMITED:
logger.info(
darkgray('-rate limited- ') + uri + darkgray(' | sleeping...') # NoQA: G003
)
else:
self.rqueue.put(CheckResult(uri, docname, lineno, status, info, code))
self.wqueue.task_done()
def _check(self, docname: str, uri: str, hyperlink: Hyperlink) -> _URIProperties:
# check for various conditions without bothering the network
for doc_matcher in self.documents_exclude:
if doc_matcher.match(docname):
info = (
f'{docname} matched {doc_matcher.pattern} from '
'linkcheck_exclude_documents'
)
return _Status.IGNORED, info, 0
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'tel:')):
return _Status.UNCHECKED, '', 0
if not uri.startswith(('http:', 'https:')):
if uri_re.match(uri):
# Non-supported URI schemes (ex. ftp)
return _Status.UNCHECKED, '', 0
if (hyperlink.docpath.parent / uri).exists():
return _Status.WORKING, '', 0
return _Status.BROKEN, '', 0
# need to actually check the URI
status: _Status
status, info, code = _Status.UNKNOWN, '', 0
for _ in range(self.retries):
status, info, code = self._check_uri(uri, hyperlink)
if status != _Status.BROKEN:
break
return status, info, code
def _retrieval_methods(
self,
check_anchors: bool,
anchor: str,
) -> Iterator[tuple[Callable[..., Response], dict[str, bool]]]:
if not check_anchors or not anchor:
yield self._session.head, {'allow_redirects': True}
yield self._session.get, {'stream': True}
def _check_uri(self, uri: str, hyperlink: Hyperlink) -> _URIProperties:
req_url, delimiter, anchor = uri.partition('#')
if delimiter and anchor:
for rex in self.anchors_ignore:
if rex.match(anchor):
anchor = ''
break
else:
for rex in self.anchors_ignore_for_url:
if rex.match(req_url):
anchor = ''
break
anchor = unquote(anchor)
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
req_url = encode_uri(req_url)
# Get auth info, if any
for pattern, auth_info in self.auth: # NoQA: B007 (false positive)
if pattern.match(uri):
break
else:
auth_info = None
# update request headers for the URL
headers = _get_request_headers(uri, self.request_headers)
# Linkcheck HTTP request logic:
#
# - Attempt HTTP HEAD before HTTP GET unless page content is required.
# - Follow server-issued HTTP redirects.
# - Respect server-issued HTTP 429 back-offs.
error_message = ''
status_code = -1
response_url = retry_after = ''
for retrieval_method, kwargs in self._retrieval_methods(
self.check_anchors, anchor
):
try:
with retrieval_method(
url=req_url,
auth=auth_info,
headers=headers,
timeout=self.timeout,
**kwargs,
_user_agent=self.user_agent,
_tls_info=(self.tls_verify, self.tls_cacerts),
) as response:
if anchor and self.check_anchors and response.ok:
try:
found = contains_anchor(response, anchor)
except UnicodeDecodeError:
return (
_Status.IGNORED,
'unable to decode response content',
0,
)
if not found:
return (
_Status.BROKEN,
__("Anchor '%s' not found") % quote(anchor),
0,
)
# Copy data we need from the (closed) response
status_code = response.status_code
redirect_status_code = (
response.history[-1].status_code if response.history else None
)
retry_after = response.headers.get('Retry-After', '')
response_url = f'{response.url}'
response.raise_for_status()
del response
break
except RequestTimeout as err:
return self._timeout_status, str(err), 0
except SSLError as err:
# SSL failure; report that the link is broken.
return _Status.BROKEN, str(err), 0
except (ConnectionError, TooManyRedirects) as err:
# Servers drop the connection on HEAD requests, causing
# ConnectionError.
error_message = str(err)
continue
except requests._IgnoredRedirection as err:
# A redirection to an ignored URI was attempted; report it appropriately
return (
_Status.IGNORED,
f'ignored redirect: {err.destination}',
err.status_code,
)
except HTTPError as err:
error_message = str(err)
# Unauthorized: the client did not provide required credentials
if status_code == 401:
if self._allow_unauthorized:
return _Status.WORKING, 'unauthorized', 0
else:
return _Status.BROKEN, 'unauthorized', 0
# Rate limiting; back-off if allowed, or report failure otherwise
if status_code == 429:
if next_check := self.limit_rate(response_url, retry_after):
self.wqueue.put(CheckRequest(next_check, hyperlink), False)
return _Status.RATE_LIMITED, '', 0
return _Status.BROKEN, error_message, 0
# Don't claim success/failure during server-side outages
if status_code == 503:
return _Status.IGNORED, 'service unavailable', 0
# For most HTTP failures, continue attempting alternate retrieval methods
continue
except Exception as err:
# Unhandled exception (intermittent or permanent); report that
# the link is broken.
return _Status.BROKEN, str(err), 0
else:
# All available retrieval methods have been exhausted; report
# that the link is broken.
return _Status.BROKEN, error_message, 0
# Success; clear rate limits for the origin
netloc = urlsplit(req_url).netloc
self.rate_limits.pop(netloc, None)
# Check if URL should be normalised case-insensitively
ignore_case = any(pat.match(req_url) for pat in self.ignore_case)
normalised_req_url = self._normalise_url(req_url, ignore_case=ignore_case)
normalised_response_url = self._normalise_url(
response_url, ignore_case=ignore_case
)
if (
normalised_response_url == normalised_req_url
or _allowed_redirect(req_url, response_url, self.allowed_redirects)
): # fmt: skip
return _Status.WORKING, '', 0
elif redirect_status_code is not None:
return _Status.REDIRECTED, response_url, redirect_status_code
else:
return _Status.REDIRECTED, response_url, 0
def limit_rate(self, response_url: str, retry_after: str | None) -> float | None:
delay = DEFAULT_DELAY
next_check = None
if retry_after:
try:
# Integer: time to wait before next attempt.
delay = float(retry_after)
except ValueError:
try:
# An HTTP-date: time of next attempt.
next_check = rfc1123_to_epoch(retry_after)
except (ValueError, TypeError):
# TypeError: Invalid date format.
# ValueError: Invalid date, e.g. Oct 52th.
pass
else:
delay = next_check - time.time()
else:
next_check = time.time() + delay
netloc = urlsplit(response_url).netloc
if next_check is None:
max_delay = self.rate_limit_timeout
try:
rate_limit = self.rate_limits[netloc]
except KeyError:
delay = DEFAULT_DELAY
else:
last_wait_time = rate_limit.delay
delay = 2.0 * last_wait_time
if delay > max_delay > last_wait_time:
delay = max_delay
if delay > max_delay:
return None
next_check = time.time() + delay
self.rate_limits[netloc] = RateLimit(delay, next_check)
return next_check
@staticmethod
def _normalise_url(url: str, *, ignore_case: bool) -> str:
normalised_url = url.rstrip('/')
if not ignore_case:
return normalised_url
# URI fragments are case-sensitive
url_part, sep, fragment = normalised_url.partition('#')
if sep:
return f'{url_part.casefold()}#{fragment}'
return url_part.casefold()
def _get_request_headers(
uri: str,
request_headers: dict[str, dict[str, str]],
) -> dict[str, str]:
url = urlsplit(uri)
candidates = (
f'{url.scheme}://{url.netloc}',
f'{url.scheme}://{url.netloc}/',
uri,
'*',
)
for u in candidates:
if u in request_headers:
return {**DEFAULT_REQUEST_HEADERS, **request_headers[u]}
return {}
def contains_anchor(response: Response, anchor: str) -> bool:
"""Determine if an anchor is contained within an HTTP response."""
parser = AnchorCheckParser(anchor)
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
if isinstance(chunk, bytes):
# requests failed to decode, manually try to decode it
chunk = chunk.decode()
parser.feed(chunk)
if parser.found:
break
parser.close()
return parser.found
|
HyperlinkAvailabilityCheckWorker
|
python
|
facebook__pyre-check
|
client/language_server/protocol.py
|
{
"start": 11926,
"end": 12181
}
|
class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
uri: str
language_id: str
version: int
text: str
def document_uri(self) -> DocumentUri:
return DocumentUri.parse(self.uri)
@dataclasses.dataclass(frozen=True)
|
TextDocumentItem
|
python
|
openai__openai-python
|
src/openai/types/containers/file_create_params.py
|
{
"start": 230,
"end": 412
}
|
class ____(TypedDict, total=False):
file: FileTypes
"""The File object (not file name) to be uploaded."""
file_id: str
"""Name of the file to create."""
|
FileCreateParams
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/methodOverride6.py
|
{
"start": 4664,
"end": 4834
}
|
class ____:
@overload
def m1(self, x: int) -> int: ...
@overload
def m1(self, x: str) -> str: ...
def m1(self, x: int | str) -> int | str: ...
|
Parent5
|
python
|
kamyu104__LeetCode-Solutions
|
Python/check-if-a-string-contains-all-binary-codes-of-size-k.py
|
{
"start": 309,
"end": 787
}
|
class ____(object):
def hasAllCodes(self, s, k):
"""
:type s: str
:type k: int
:rtype: bool
"""
lookup = set()
base = 2**k
if base > len(s):
return False
num = 0
for i in xrange(len(s)):
num = (num << 1) + (s[i] == '1')
if i >= k-1:
lookup.add(num)
num -= (s[i-k+1] == '1') * (base//2)
return len(lookup) == base
|
Solution2
|
python
|
RaRe-Technologies__gensim
|
gensim/models/doc2vec.py
|
{
"start": 52959,
"end": 54756
}
|
class ____:
def __init__(self, source):
"""Iterate over a file that contains documents:
one line = :class:`~gensim.models.doc2vec.TaggedDocument` object.
Words are expected to be already preprocessed and separated by whitespace. Document tags are constructed
automatically from the document line number (each document gets a unique integer tag).
Parameters
----------
source : string or a file-like object
Path to the file on disk, or an already-open file object (must support `seek(0)`).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>> from gensim.models.doc2vec import TaggedLineDocument
>>>
>>> for document in TaggedLineDocument(datapath("head500.noblanks.cor")):
... pass
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source.
Yields
------
:class:`~gensim.models.doc2vec.TaggedDocument`
Document from `source` specified in the constructor.
"""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.open(self.source, 'rb') as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [item_no])
|
TaggedLineDocument
|
python
|
ray-project__ray
|
python/ray/tests/test_autoscaling_policy.py
|
{
"start": 1514,
"end": 2045
}
|
class ____:
def __init__(
self,
duration: float,
bundles: List[Dict[str, float]],
strategy: int,
start_callback: Callable[[None], None] = None,
done_callback: Callable[[None], None] = None,
):
self.duration = duration
self.bundles = bundles
self.strategy = strategy
self.start_callback = start_callback
self.done_callback = done_callback
self.start_time = None
self.end_time = None
self.node = None
|
PlacementGroup
|
python
|
apache__airflow
|
providers/yandex/src/airflow/providers/yandex/links/yq.py
|
{
"start": 1115,
"end": 1545
}
|
class ____(BaseOperatorLink):
"""Web link to query in Yandex Query UI."""
name = "Yandex Query"
def get_link(self, operator: BaseOperator, *, ti_key: TaskInstanceKey):
return XCom.get_value(key=XCOM_WEBLINK_KEY, ti_key=ti_key) or "https://yq.cloud.yandex.ru"
@staticmethod
def persist(context: Context, web_link: str) -> None:
context["ti"].xcom_push(key=XCOM_WEBLINK_KEY, value=web_link)
|
YQLink
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-opendal/llama_index/readers/opendal/gcs/base.py
|
{
"start": 318,
"end": 2075
}
|
class ____(BaseReader):
"""General reader for any Gcs file or directory."""
def __init__(
self,
bucket: str,
path: str = "/",
endpoint: str = "",
credentials: str = "",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
) -> None:
"""
Initialize Gcs container, along with credentials if needed.
If key is not set, the entire bucket (filtered by prefix) is parsed.
Args:
bucket (str): the name of your gcs bucket
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
endpoint Optional[str]: the endpoint of the azblob service.
credentials (Optional[str]): provide credential string for GCS OAuth2 directly.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor
# opendal service related config.
self.options = {
"bucket": bucket,
"endpoint": endpoint,
"credentials": credentials,
}
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
loader = OpendalReader(
scheme="gcs",
path=self.path,
file_extractor=self.file_extractor,
**self.options,
)
return loader.load_data()
|
OpendalGcsReader
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_accounts.py
|
{
"start": 2104,
"end": 3331
}
|
class ____(TestCase):
@HttpMocker()
def test_full_refresh(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_create_accounts_request().with_limit(100).build(),
_create_response().with_record(record=_create_record()).build(),
)
source = get_source(config=_CONFIG, state=_NO_STATE)
actual_messages = read(source, config=_CONFIG, catalog=_create_catalog())
assert len(actual_messages.records) == 1
@HttpMocker()
def test_pagination(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_create_accounts_request().with_limit(100).build(),
_create_response().with_record(record=_create_record().with_id("last_record_id_from_first_page")).with_pagination().build(),
)
http_mocker.get(
_create_accounts_request().with_limit(100).with_starting_after("last_record_id_from_first_page").build(),
_create_response().with_record(record=_create_record()).build(),
)
source = get_source(config=_CONFIG, state=_NO_STATE)
actual_messages = read(source, config=_CONFIG, catalog=_create_catalog())
assert len(actual_messages.records) == 2
|
AccountsTest
|
python
|
gevent__gevent
|
src/gevent/tests/test__core_fork.py
|
{
"start": 644,
"end": 2738
}
|
class ____(unittest.TestCase):
def test(self):
self.assertEqual(hub.threadpool.size, 0)
# Use a thread to make us multi-threaded
hub.threadpool.apply(lambda: None)
self.assertEqual(hub.threadpool.size, 1)
# Not all platforms use fork by default, so we want to force it,
# where possible. The test is still useful even if we can't
# fork though.
try:
fork_ctx = multiprocessing.get_context('fork')
except (AttributeError, ValueError):
# ValueError if fork isn't supported.
# AttributeError on Python 2, which doesn't have get_context
fork_ctx = multiprocessing
# If the Queue is global, q.get() hangs on Windows; must pass as
# an argument.
q = fork_ctx.Queue()
p = fork_ctx.Process(target=in_child, args=(q,))
p.start()
p.join()
p_val = q.get()
self.assertIsNone(
newpid,
"The fork watcher ran in the parent for some reason."
)
self.assertIsNotNone(
p_val,
"The child process returned nothing, meaning the fork watcher didn't run in the child."
)
self.assertNotEqual(p_val, pid)
assert p_val != pid
if __name__ == '__main__':
# Must call for Windows to fork properly; the fork can't be in the top-level
multiprocessing.freeze_support()
# fork watchers weren't firing in multi-threading processes.
# This test is designed to prove that they are.
# However, it fails on Windows: The fork watcher never runs!
# This makes perfect sense: on Windows, our patches to os.fork()
# that call gevent.hub.reinit() don't get used; os.fork doesn't
# exist and multiprocessing.Process uses the windows-specific _subprocess.CreateProcess()
# to create a whole new process that has no relation to the current process;
# that process then calls multiprocessing.forking.main() to do its work.
# Since no state is shared, a fork watcher cannot exist in that process.
unittest.main()
|
Test
|
python
|
pdm-project__pdm
|
src/pdm/resolver/base.py
|
{
"start": 493,
"end": 887
}
|
class ____(t.NamedTuple):
"""The resolution result."""
packages: t.Iterable[Package]
"""The list of pinned packages with dependencies."""
collected_groups: set[str]
"""The list of collected groups."""
@property
def candidates(self) -> dict[str, Candidate]:
return {entry.candidate.identify(): entry.candidate for entry in self.packages}
@dataclass
|
Resolution
|
python
|
pytorch__pytorch
|
torchgen/model.py
|
{
"start": 99081,
"end": 103577
}
|
class ____:
base: str
inplace: bool
dunder_method: bool
# Note [Overload Ambiguity With Functional Variants]
# A handful of operators have both a "mutable" and a "functional" variant.
# (native_batch_norm is a good example, although this isn't the case today).
# For those operators, the mutable and functional variant take in the same set of
# arguments, but have different alias annotations.
# this makes it ambiguous when you try to resolve an OverloadPacket into an overload,
# given a set of input arguments.
#
# So instead of making the "functional" variant in this case a real overload, e.g:
# native_batch_norm (mutable variant)
# native_batch_norm.functional (functional variant)
# we make it a new base operator,
# native_batch_norm_functional (functional variant)
#
# In an ideal world, we would probably invert this so the operators were:
# native_batch_norm.mutable (mutable variant)
# native_batch_norm (functional variant)
#
# Doing that is BC-breaking though, so we're stuck with the above modeling.
functional_overload: bool = False
# NB: We don't officially support namespace in FunctionSchema, we treat this prefix
# as part of the base operator name, for __str__() to consume.
# The canonical input (from the rest of the infra) will not contain namespace, but
# we have a usecase in ExecuTorch where we want to support BaseOperatorName with namespace.
namespace: str | None = None
@staticmethod
def parse(op: str) -> BaseOperatorName:
assert op != ""
assert not op.endswith("_out"), (
"_out suffix is reserved and not permitted for operator names; "
"did you mean to specify an out overload name instead?"
)
# Extract namespace out. Base operator name may or may not contain namespace.
# E.g., aten::__lshift__ is a valid base operator name, __lshift__ is also valid.
# We want to split the namespace out from the base operator name.
match = re.match(r"^(?:(.*)::)?(.*)$", op)
namespace = match.group(1) if match else ""
op_without_ns = match.group(2) if match else op
m = re.match(r"^__([^_]+)__$", op_without_ns)
if m is not None:
dunder_method = True
base = m.group(1)
if any(base == f"i{n}" for n in AUGMENTED_ASSIGNMENT_NAMES):
inplace = True
base = base[1:]
else:
inplace = False
# temporary, this is not intrinsically true but
# has been historically true for dunder methods
# we support (but, if we ever got, say, __int__, this would
# be wrong!)
assert base[0] != "i"
else:
dunder_method = False
base = op_without_ns
if base[-1] == "_":
inplace = True
base = base[:-1]
else:
inplace = False
# See Note [Overload Ambiguity With Functional Variants]
functional_suffix = "_functional"
if base.endswith(functional_suffix):
functional_overload = True
base = base[: -len(functional_suffix)]
# This seems complicated and unnecessary, so banning dunder methods
# for now on ops that have a functional + mutable variant (like native_batch_norm).
assert not dunder_method and not inplace
else:
functional_overload = False
r = BaseOperatorName(
base=base,
inplace=inplace,
dunder_method=dunder_method,
functional_overload=functional_overload,
namespace=namespace,
)
assert str(r) == op, f"{str(r)} != {op}"
return r
def __str__(self) -> str:
namespace_prefix = f"{self.namespace}::" if self.namespace else ""
if self.dunder_method:
i = "i" if self.inplace else ""
return f"{namespace_prefix}__{i}{self.base}__"
else:
i = (
"_"
if self.inplace
else "_functional"
if self.functional_overload
else ""
)
return f"{namespace_prefix}{self.base}{i}"
# Operator name is the base operator name along with the (typically not
# user visible) overload string.
@dataclass(frozen=True)
|
BaseOperatorName
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/integrations/tableau/customize-tableau-asset-defs.py
|
{
"start": 715,
"end": 1673
}
|
class ____(DagsterTableauTranslator):
def get_asset_spec(self, data: TableauTranslatorData) -> dg.AssetSpec:
# We create the default asset spec using super()
default_spec = super().get_asset_spec(data)
# We customize the metadata and asset key prefix for all assets, including sheets,
# and we customize the team owner tag only for sheets.
return default_spec.replace_attributes(
key=default_spec.key.with_prefix("prefix"),
metadata={**default_spec.metadata, "custom": "metadata"},
owners=(
["team:my_team"]
if data.content_type == TableauContentType.SHEET
else ...
),
)
tableau_specs = load_tableau_asset_specs(
tableau_workspace,
dagster_tableau_translator=MyCustomTableauTranslator(),
)
defs = dg.Definitions(assets=[*tableau_specs], resources={"tableau": tableau_workspace})
|
MyCustomTableauTranslator
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/build_systems/sourceware.py
|
{
"start": 198,
"end": 768
}
|
class ____(PackageBase):
sourceware_mirror_path: Optional[str] = None
base_mirrors = [
"https://sourceware.org/pub/",
"https://mirrors.kernel.org/sourceware/",
"https://ftp.gwdg.de/pub/linux/sources.redhat.com/",
]
@property
def urls(self):
if self.sourceware_mirror_path is None:
raise AttributeError(f"{self.__class__.__name__}: `sourceware_mirror_path` missing")
return [
join_url(m, self.sourceware_mirror_path, resolve_href=True) for m in self.base_mirrors
]
|
SourcewarePackage
|
python
|
getsentry__sentry
|
tests/sentry/api/test_base.py
|
{
"start": 14017,
"end": 17050
}
|
class ____(APITestCase):
@mock.patch("rest_framework.views.APIView.handle_exception", return_value=Response(status=500))
def test_handle_exception_when_super_returns_response(
self, mock_super_handle_exception: MagicMock
):
mock_endpoint = DummyErroringEndpoint.as_view(error=Exception("nope"))
response = mock_endpoint(self.make_request(method="GET"))
# The endpoint should pass along the response generated by `APIView.handle_exception_with_details`
assert response == mock_super_handle_exception.return_value
@mock.patch("rest_framework.views.APIView.handle_exception", new=reraise)
@mock.patch("sentry.api.base.capture_exception", return_value="1231201211212012")
def test_handle_exception_when_super_reraises(
self,
mock_capture_exception: MagicMock,
):
handler_context = {"api_request_URL": "http://dogs.are.great/"}
scope = Scope()
tags = {"maisey": "silly", "charlie": "goofy"}
for tag, value in tags.items():
scope.set_tag(tag, value)
cases = [
# The first half of each tuple is what's passed to `handle_exception`, and the second
# half is what we expect in the scope passed to `capture_exception`
(None, None, {}, {}),
(handler_context, None, {"Request Handler Data": handler_context}, {}),
(None, scope, {}, tags),
(
handler_context,
scope,
{"Request Handler Data": handler_context},
tags,
),
]
for handler_context_arg, scope_arg, expected_scope_contexts, expected_scope_tags in cases:
handler_error = Exception("nope")
mock_endpoint = DummyErroringEndpoint.as_view(
error=handler_error,
handler_context_arg=handler_context_arg,
scope_arg=scope_arg,
)
with mock.patch("sys.stderr.write") as mock_stderr_write:
response = mock_endpoint(self.make_request(method="GET"))
assert response.status_code == 500
assert response.data == {
"detail": "Internal Error",
"errorId": "1231201211212012",
}
assert response.exception is True
(((s,), _),) = mock_stderr_write.call_args_list
assert s.splitlines()[-1] == "Exception: nope"
capture_exception_handler_context_arg = mock_capture_exception.call_args.args[0]
capture_exception_scope_kwarg = mock_capture_exception.call_args.kwargs.get("scope")
assert capture_exception_handler_context_arg == handler_error
assert isinstance(capture_exception_scope_kwarg, Scope)
assert capture_exception_scope_kwarg._contexts == expected_scope_contexts
assert capture_exception_scope_kwarg._tags == expected_scope_tags
|
EndpointHandleExceptionTest
|
python
|
streamlit__streamlit
|
lib/streamlit/components/v2/component_path_utils.py
|
{
"start": 1180,
"end": 8706
}
|
class ____:
"""Utility class for component path operations and security validation."""
@staticmethod
def has_glob_characters(path: str) -> bool:
"""Check if a path contains glob pattern characters.
Parameters
----------
path : str
The path to check
Returns
-------
bool
True if the path contains glob characters
"""
return any(char in path for char in ["*", "?", "[", "]"])
@staticmethod
def validate_path_security(path: str) -> None:
"""Validate that a path doesn't contain security vulnerabilities.
Parameters
----------
path : str
The path to validate
Raises
------
StreamlitComponentRegistryError
If the path contains security vulnerabilities like path traversal attempts
"""
ComponentPathUtils._assert_relative_no_traversal(path, label="component paths")
@staticmethod
def resolve_glob_pattern(pattern: str, package_root: Path) -> Path:
"""Resolve a glob pattern to a single file path with security checks.
Parameters
----------
pattern : str
The glob pattern to resolve
package_root : Path
The package root directory for security validation
Returns
-------
Path
The resolved file path
Raises
------
StreamlitComponentRegistryError
If zero or more than one file matches the pattern, or if security
checks fail (path traversal attempts)
"""
# Ensure pattern is relative and doesn't contain path traversal attempts
ComponentPathUtils._assert_relative_no_traversal(pattern, label="glob patterns")
# Use glob from the package root so subdirectory patterns are handled correctly
matching_files = list(package_root.glob(pattern))
# Ensure all matched files are within package_root (security check)
validated_files = []
for file_path in matching_files:
try:
# Resolve to absolute path and check if it's within package_root
resolved_path = file_path.resolve()
package_root_resolved = package_root.resolve()
# Check if the resolved path is within the package root using
# pathlib's relative path check to avoid prefix-matching issues
if not resolved_path.is_relative_to(package_root_resolved):
_LOGGER.warning(
"Skipping file outside package root: %s", resolved_path
)
continue
validated_files.append(resolved_path)
except (OSError, ValueError) as e:
_LOGGER.warning("Failed to resolve path %s: %s", file_path, e)
continue
# Ensure exactly one file matches
if len(validated_files) == 0:
raise StreamlitComponentRegistryError(
f"No files found matching pattern '{pattern}' in package root {package_root}"
)
if len(validated_files) > 1:
file_list = ", ".join(str(f) for f in validated_files)
raise StreamlitComponentRegistryError(
f"Multiple files found matching pattern '{pattern}': {file_list}. "
"Exactly one file must match the pattern."
)
return Path(validated_files[0])
@staticmethod
def _assert_relative_no_traversal(path: str, *, label: str) -> None:
"""Raise if ``path`` is absolute or contains ``..`` segments.
Parameters
----------
path : str
Path string to validate.
label : str
Human-readable label used in error messages (e.g., "component paths").
"""
# Absolute path checks (POSIX, Windows drive-letter, UNC)
is_windows_drive_abs = (
len(path) >= 3
and path[0].isalpha()
and path[1] == ":"
and path[2] in ("/", "\\")
)
is_unc_abs = path.startswith("\\\\")
# Consider rooted backslash paths "\\dir" as absolute on Windows-like inputs
is_rooted_backslash = path.startswith("\\") and not is_unc_abs
if (
os.path.isabs(path)
or is_windows_drive_abs
or is_unc_abs
or is_rooted_backslash
):
raise StreamlitComponentRegistryError(
f"Absolute paths are not allowed in {label}: {path}"
)
# Segment-based traversal detection to avoid false positives (e.g. "file..js")
normalized = path.replace("\\", "/")
segments = [seg for seg in normalized.split("/") if seg != ""]
if any(seg == ".." for seg in segments):
raise StreamlitComponentRegistryError(
f"Path traversal attempts are not allowed in {label}: {path}"
)
@staticmethod
def ensure_within_root(abs_path: Path, root: Path, *, kind: str) -> None:
"""Ensure that abs_path is within root; raise if not.
Parameters
----------
abs_path : Path
Absolute file path
root : Path
Root directory path
kind : str
Human-readable descriptor for error messages (e.g., "js" or "css")
Raises
------
StreamlitComponentRegistryError
If the path cannot be resolved or if the resolved path does not
reside within ``root`` after following symlinks.
"""
try:
resolved = abs_path.resolve()
root_resolved = root.resolve()
except Exception as e:
raise StreamlitComponentRegistryError(
f"Failed to resolve {kind} path '{abs_path}': {e}"
) from e
# Use Path.is_relative_to to avoid insecure prefix-based checks
if not resolved.is_relative_to(root_resolved):
raise StreamlitComponentRegistryError(
f"{kind} path '{abs_path}' is outside the declared asset_dir '{root}'."
)
@staticmethod
def looks_like_inline_content(value: str) -> bool:
r"""Heuristic to detect inline JS/CSS content strings.
Treat a string as a file path ONLY if it looks path-like:
- Does not contain newlines
- Contains glob characters (*, ?, [, ])
- Starts with ./, /, or \
- Contains a path separator ("/" or "\\")
- Or ends with a common asset extension like .js, .mjs, .cjs, or .css
Otherwise, treat it as inline content.
Parameters
----------
value : str
The string to classify as inline content or a file path.
Returns
-------
bool
True if ``value`` looks like inline content; False if it looks like a
file path.
"""
s = value.strip()
# If the value contains newlines, it's definitely inline content
if "\n" in s or "\r" in s:
return True
# Glob patterns indicate path-like
if ComponentPathUtils.has_glob_characters(s):
return False
# Obvious path prefixes
if s.startswith(("./", "/", "\\")):
return False
# Any path separator
if "/" in s or "\\" in s:
return False
return not (s.lower().endswith((".js", ".css", ".mjs", ".cjs")))
|
ComponentPathUtils
|
python
|
numpy__numpy
|
numpy/_core/tests/test_umath.py
|
{
"start": 53884,
"end": 55876
}
|
class ____(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
def test_reduce(self):
assert_equal(np.logaddexp2.identity, -np.inf)
assert_equal(np.logaddexp2.reduce([]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf)
assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0)
|
TestLogAddExp2
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/sql.py
|
{
"start": 6878,
"end": 7827
}
|
class ____(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
.. versionadded:: 1.5
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens))
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
if pattern[1] == Keyword:
tokens['root'][i] = (
words(KEYWORDS + PLPGSQL_KEYWORDS, suffix=r'\b'),
Keyword)
del i
break
else:
assert 0, "SQL keywords not found"
# Add specific PL/pgSQL rules (before the SQL ones)
tokens['root'][:0] = [
(r'\%[a-z]\w*\b', Name.Builtin), # actually, a datatype
(r':=', Operator),
(r'\<\<[a-z]\w*\>\>', Name.Label),
(r'\#[a-z]\w*\b', Keyword.Pseudo), # #variable_conflict
]
|
PlPgsqlLexer
|
python
|
neetcode-gh__leetcode
|
python/1020-number-of-enclaves.py
|
{
"start": 0,
"end": 762
}
|
class ____:
def numEnclaves(self, grid: List[List[int]]) -> int:
ROWS, COLS = len(grid), len(grid[0])
def dfs(grid, row, col):
if 0 <= row < ROWS and 0 <= col < COLS:
if grid[row][col] == 1:
grid[row][col] = 0
dfs(grid, row + 1, col)
dfs(grid, row - 1, col)
dfs(grid, row, col + 1)
dfs(grid, row, col - 1)
for row in range(ROWS):
dfs(grid, row, 0)
dfs(grid, row, COLS - 1)
for col in range(COLS):
dfs(grid, 0, col)
dfs(grid, ROWS - 1, col)
return sum(grid[row][col] == 1 for row in range(ROWS) for col in range(COLS))
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/monitored_session_test.py
|
{
"start": 24358,
"end": 26673
}
|
class ____(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(sess.graph, wrapped_sess.graph)
self.assertEqual(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
|
WrappedSessionTest
|
python
|
doocs__leetcode
|
solution/1900-1999/1981.Minimize the Difference Between Target and Chosen Elements/Solution.py
|
{
"start": 0,
"end": 233
}
|
class ____:
def minimizeTheDifference(self, mat: List[List[int]], target: int) -> int:
f = {0}
for row in mat:
f = set(a + b for a in f for b in row)
return min(abs(v - target) for v in f)
|
Solution
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_xy.py
|
{
"start": 9579,
"end": 9751
}
|
class ____(scale_y_continuous):
"""
Continuous y position reverse transformed scale
"""
trans: TransUser = "reverse"
@dataclass(kw_only=True)
|
scale_y_reverse
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/control_flow_ops_test.py
|
{
"start": 6713,
"end": 7956
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testTupleDependencies(self):
counter = variable_scope.get_variable(
"my_counter", shape=[], initializer=init_ops.zeros_initializer())
increment_counter = state_ops.assign_add(counter, 1)
const_with_dep = control_flow_ops.with_dependencies(
(increment_counter, constant_op.constant(42)), constant_op.constant(7))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(counter))
self.assertEqual(7, self.evaluate(const_with_dep))
self.assertEqual(1, self.evaluate(counter))
@test_util.run_deprecated_v1
def testListDependencies(self):
counter = variable_scope.get_variable(
"my_counter", shape=[], initializer=init_ops.zeros_initializer())
increment_counter = state_ops.assign_add(counter, 1)
const_with_dep = control_flow_ops.with_dependencies(
[increment_counter, constant_op.constant(42)], constant_op.constant(7))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(counter))
self.assertEqual(7, self.evaluate(const_with_dep))
self.assertEqual(1, self.evaluate(counter))
|
WithDependenciesTestCase
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 292166,
"end": 294025
}
|
class ____(Response):
"""
Response of tasks.stopped endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "stopped"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(StoppedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
|
StoppedResponse
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_R.py
|
{
"start": 3700,
"end": 5193
}
|
class ____(Benchmark):
r"""
Ratkowsky02 objective function.
This class defines the Ratkowsky 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ratkowsky02}}(x) = \sum_{m=1}^{9}(a_m - x[0] / (1 + exp(x[1]
- b_m x[2]))^2
where
.. math::
\begin{cases}
a=[8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62, 67.08]\\
b=[9., 14., 21., 28., 42., 57., 63., 70., 79.]\\
\end{cases}
Here :math:`x_1 \in [1, 100]`, :math:`x_2 \in [0.1, 5]` and
:math:`x_3 \in [0.01, 0.5]`
*Global optimum*: :math:`f(x) = 8.0565229338` for
:math:`x = [7.2462237576e1, 2.6180768402, 6.7359200066e-2]`
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([10, 0.5, 0.01],
[200, 5., 0.5]))
self.global_optimum = [[7.2462237576e1, 2.6180768402, 6.7359200066e-2]]
self.fglob = 8.0565229338
self.a = asarray([8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62,
67.08])
self.b = asarray([9., 14., 21., 28., 42., 57., 63., 70., 79.])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] / (1 + exp(x[1] - x[2] * self.b))
return sum((self.a - vec) ** 2)
|
Ratkowsky02
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 7029,
"end": 7260
}
|
class ____:
model = BookSigning
date_field = "event_date"
# use the same templates as for books
def get_template_names(self):
return ["generic_views/book%s.html" % self.template_name_suffix]
|
BookSigningConfig
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/split_cat.py
|
{
"start": 17792,
"end": 22820
}
|
class ____(CallFunction):
"""
Matches a call to torch.split if it is in a normalized form. Ensures that all users of
splits are unique getitems.
"""
def __init__(self, arg, sizes, func=torch.split) -> None:
# using KeywordArg("dim") for `dim` checks they all match
super().__init__(func, arg, sizes, _users=MULTIPLE, dim=KeywordArg("dim"))
def _match(self, node: torch.fx.Node, ctx: MatchContext):
m = super()._match(node, ctx)
if not m:
return m
split_sections = node.args[1]
if not isinstance(split_sections, (list, tuple)):
return FailedMatch("split not normalized")
# check users are all unique getitems
seen_idxs = OrderedSet[int]()
for user in node.users:
if not CallFunction(operator.getitem, Arg(), Arg()).match(user):
# This should ideally never happen. Split user should always be a getitem
return FailedMatch(f"user of split not a getitem: {user}")
if not isinstance(user.args[1], int):
return FailedMatch("only integer getitems are handled")
if user.args[1] in seen_idxs:
return FailedMatch(f"duplicate getitem {user.args[1]}")
if user.args[-1] < 0: # type: ignore[operator]
# This shouldn't ideally happen as dynamo normalizes indexes to positive
return FailedMatch("negative index")
seen_idxs.add(user.args[1])
return m
@register_graph_pattern(
TorchSplit(
CallFunction(
operator.getitem,
TorchSplit(
KeywordArg("first_split_input"),
KeywordArg("first_split_sections"),
),
Ignored(),
),
KeywordArg("next_split_sections"),
),
pass_dict=construct_pattern_matcher_pass("merge_splits_pass"),
)
def merge_splits(
match: Match,
first_split_input: torch.fx.Node,
first_split_sections: list[int],
next_split_sections: list[int],
# Note: dim is implicitly passed by TorchSplit, as it internally uses a pattern with dim
dim: int,
):
node = match.output_node()
# it is possible that the split has no users,
# we check the corner case and skip the pattern
if len(node.users.keys()) == 0:
return
graph = match.graph
first_split = node.args[0].args[0] # type: ignore[union-attr]
next_split_index = node.args[0].args[1] # type: ignore[union-attr]
new_split_sections = list(first_split_sections)
new_split_sections[next_split_index : next_split_index + 1] = next_split_sections # type: ignore[operator, misc]
first_split_dim = _get_dim(first_split)
to_remove = []
with graph.inserting_before(first_split): # type: ignore[arg-type]
# Add the new split node
new_split = graph.call_function(
torch.split,
args=(first_split_input, new_split_sections),
kwargs={"dim": first_split_dim},
)
if is_node_meta_valid(first_split_input):
new_split.meta["example_value"] = torch.split(
first_split_input.meta["example_value"],
new_split_sections,
dim=first_split_dim,
)
first_split_num_to_user = {
user.args[1]: user
for user in first_split.users # type: ignore[union-attr]
}
new_split_num = 0
for split_num in range(len(first_split_sections)):
if split_num not in first_split_num_to_user:
new_split_num += 1
continue
old_getitem = first_split_num_to_user[split_num]
if split_num != next_split_index:
old_getitem.update_arg(0, new_split)
old_getitem.update_arg(1, new_split_num)
new_split_num += 1
else:
next_split_num_to_user = {user.args[1]: user for user in node.users}
# It is not necessary all getitems from the split node are used.
for next_split_num in range(len(next_split_sections)):
with graph.inserting_after(new_split):
new_getitem = graph.call_function(
operator.getitem, args=(new_split, new_split_num)
)
new_split_num += 1
if next_split_num not in next_split_num_to_user:
continue
next_getitem = next_split_num_to_user[next_split_num]
new_getitem.meta.update(next_getitem.meta)
next_getitem.replace_all_uses_with(new_getitem)
to_remove.append(next_getitem)
to_remove.append(node)
to_remove.append(old_getitem)
to_remove.append(first_split) # type: ignore[arg-type]
for node in to_remove:
graph.erase_node(node)
counters[backend]["merge_splits_pass"] += 1
|
TorchSplit
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/op_def_util_test.py
|
{
"start": 1249,
"end": 5808
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
("any", "Foo", "Foo"),
("any", 12, 12),
("any", {2: 3}, {2: 3}),
("string", "Foo", "Foo"),
("string", b"Foo", b"Foo"),
("int", 12, 12),
("int", 12.3, 12),
("float", 12, 12.0),
("float", 12.3, 12.3),
("bool", True, True),
("shape", tensor_shape.TensorShape([3]), tensor_shape.TensorShape([3])),
("shape", [3], tensor_shape.TensorShape([3])),
("type", dtypes.int32, dtypes.int32),
("type", np.int32, dtypes.int32),
("type", "int32", dtypes.int32),
("tensor", tensor_pb2.TensorProto(dtype=types_pb2.DataType.DT_FLOAT),
tensor_pb2.TensorProto(dtype=types_pb2.DataType.DT_FLOAT)),
("tensor", "dtype: DT_FLOAT",
tensor_pb2.TensorProto(dtype=types_pb2.DataType.DT_FLOAT)),
("list(any)", [1, "foo", 7.3, dtypes.int32],
[1, "foo", 7.3, dtypes.int32]),
("list(any)", (1, "foo"), [1, "foo"]),
("list(string)", ["foo", "bar"], ["foo", "bar"]),
("list(string)", ("foo", "bar"), ["foo", "bar"]),
("list(string)", iter("abcd"), ["a", "b", "c", "d"]),
("list(int)", (1, 2.3), [1, 2]),
("list(float)", (1, 2.3), [1.0, 2.3]),
("list(bool)", [True, False], [True, False]),
("list(type)", [dtypes.int32, dtypes.bool], [dtypes.int32, dtypes.bool]),
("list(shape)", [tensor_shape.TensorShape([3]), [4, 5]],
[tensor_shape.TensorShape([3]), tensor_shape.TensorShape([4, 5])]),
("list(tensor)",
[tensor_pb2.TensorProto(dtype=types_pb2.DataType.DT_FLOAT),
"dtype: DT_INT32"],
[tensor_pb2.TensorProto(dtype=types_pb2.DataType.DT_FLOAT),
tensor_pb2.TensorProto(dtype=types_pb2.DataType.DT_INT32)]),
]) # pyformat: disable
def testConvert(self, attr_type, value, expected):
result = _op_def_util.ConvertPyObjectToAttributeType(value, attr_type)
# Check that we get the expected value(s).
self.assertEqual(expected, result)
# Check that we get the expected type(s).
self.assertEqual(type(expected), type(result))
if isinstance(result, list):
for expected_item, result_item in zip(expected, result):
self.assertEqual(type(expected_item), type(result_item))
@parameterized.parameters([
("string", 12),
("int", "foo"),
("float", "foo"),
("bool", 1),
("dtype", None),
("shape", 12.0),
("tensor", [1, 2, 3]),
("list(any)", 12),
("list(int)", [1, "two"]),
("list(string)", [1, "two"]),
("tensor", "string that is not a text-formatted TensorProto"),
])
def testConvertError(self, attr_type, value):
with self.assertRaisesRegex(TypeError, "Failed to convert value"):
_op_def_util.ConvertPyObjectToAttributeType(value, attr_type)
# Test AttrValueToPyObject(). Note: this test also exercises the code in
# DataTypeToPyObject() and TensorShapeToPyObject(), since those are used
# when the AttrValue contains a DataType or TensorShape.
@parameterized.parameters([
("s: 'foo'", "foo"),
("i: 5", 5),
("f: 8", 8.0),
("b: True", True),
("type: DT_INT32", dtypes.int32),
("shape { dim: [{size: 3}, {size: 4}] }",
tensor_shape.TensorShape([3, 4])),
("list { }", []),
("list { s: [] }", []),
("list { s: ['a', 'b', 'c'] }", ["a", "b", "c"]),
("list { i: [1, 2, 3] }", [1, 2, 3]),
("list { f: [2.0, 4.0] }", [2.0, 4.0]),
]) # pyformat: disable
def testAttrValueToPyObject(self, pbtxt, expected):
proto = attr_value_pb2.AttrValue()
text_format.Parse(pbtxt, proto)
result = _op_def_util.SerializedAttrValueToPyObject(
proto.SerializeToString())
self.assertEqual(expected, result)
@parameterized.parameters([
"", # Empty value (oneof not set)
"tensor {}", # 'TensorProto' not supported (yet).
"func {}", # 'func' not supported.
"placeholder: ''", # 'placeholder' not supported.
"list { tensor [{}] }", # 'TensorProto' not supported (yet).
"list { func [{}] }", # 'func' not supported.
]) # pyformat: disable
def testAttrValueToPyObjectError(self, pbtxt):
proto = attr_value_pb2.AttrValue()
text_format.Parse(pbtxt, proto)
with self.assertRaises((TypeError, ValueError)):
_op_def_util.SerializedAttrValueToPyObject(proto.SerializeToString())
if __name__ == "__main__":
googletest.main()
|
OpDefUtilTest
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/index.py
|
{
"start": 775,
"end": 2041
}
|
class ____(Domain):
"""Index domain."""
name = 'index'
label = 'index'
@property
def entries(self) -> dict[str, list[tuple[str, str, str, str, str | None]]]:
return self.data.setdefault('entries', {})
def clear_doc(self, docname: str) -> None:
self.entries.pop(docname, None)
def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:
for docname in docnames:
self.entries[docname] = otherdata['entries'][docname]
def process_doc(self, env: BuildEnvironment, docname: str, document: Node) -> None:
"""Process a document after it is read by the environment."""
entries = self.entries.setdefault(env.current_document.docname, [])
for node in list(document.findall(addnodes.index)):
node_entries = node['entries']
try:
for entry_type, value, _target_id, _main, _category_key in node_entries:
split_index_msg(entry_type, value)
except ValueError as exc:
logger.warning(str(exc), location=node, type='index')
node.parent.remove(node)
else:
for entry in node_entries:
entries.append(entry)
|
IndexDomain
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_moe/modeling_qwen3_moe.py
|
{
"start": 27492,
"end": 31948
}
|
class ____(Qwen3MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Qwen3MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_experts
self.num_experts_per_tok = config.num_experts_per_tok
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Qwen3MoeForCausalLM
>>> model = Qwen3MoeForCausalLM.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-MoE-15B-A2B")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
|
Qwen3MoeForCausalLM
|
python
|
huggingface__transformers
|
examples/pytorch/text-generation/run_generation.py
|
{
"start": 8792,
"end": 17060
}
|
class ____(GenerationMixin):
__slots__ = ("_optimized", "_default")
def __init__(self, optimized, default):
self._optimized = optimized
self._default = default
def __call__(self, *args, **kwargs):
if kwargs["past_key_values"] is None and self._default.config.use_cache:
kwargs["past_key_values"] = generate_past_key_values(self._default, kwargs["input_ids"].shape[0], 0)
kwargs.pop("position_ids", None)
for k in list(kwargs.keys()):
if kwargs[k] is None or isinstance(kwargs[k], bool):
kwargs.pop(k)
outputs = self._optimized(**kwargs)
lm_logits = outputs[0]
past_key_values = outputs[1]
fixed_output = CausalLMOutputWithPast(
loss=None,
logits=lm_logits,
past_key_values=past_key_values,
hidden_states=None,
attentions=None,
)
return fixed_output
def __getattr__(self, item):
return getattr(self._default, item)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, inputs_embeds=None, use_cache=None, **kwargs
):
return self._default.prepare_inputs_for_generation(
input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs
)
def _reorder_cache(
self, past_key_values: tuple[tuple[torch.Tensor]], beam_idx: torch.Tensor
) -> tuple[tuple[torch.Tensor]]:
"""
This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
[`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
"""
return self._default._reorder_cache(past_key_values, beam_idx)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2"
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--prefix", type=str, default="", help="Text added prior to input.")
parser.add_argument("--padding_text", type=str, default="", help="Deprecated, the use of `--prefix` is preferred.")
parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--use_cpu",
action="store_true",
help="Whether or not to use cpu. If set to False, we will use gpu/npu or mps device if available",
)
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument("--jit", action="store_true", help="Whether or not to use jit trace to accelerate inference")
args = parser.parse_args()
# Initialize the distributed state.
distributed_state = PartialState(cpu=args.use_cpu)
logger.warning(f"device: {distributed_state.device}, 16-bits inference: {args.fp16}")
if args.seed is not None:
set_seed(args.seed)
# Initialize the model and tokenizer
try:
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = model_class.from_pretrained(args.model_name_or_path)
# Set the model to the right device
model.to(distributed_state.device)
if args.fp16:
model.half()
max_seq_length = getattr(model.config, "max_position_embeddings", 0)
args.length = adjust_length_to_model(args.length, max_sequence_length=max_seq_length)
logger.info(args)
prompt_text = args.prompt if args.prompt else input("Model prompt >>> ")
# Different models need different input formatting and/or extra arguments
requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS
if requires_preprocessing:
prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)
preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)
tokenizer_kwargs = {}
encoded_prompt = tokenizer.encode(
preprocessed_prompt_text, add_special_tokens=False, return_tensors="pt", **tokenizer_kwargs
)
else:
prefix = args.prefix if args.prefix else args.padding_text
encoded_prompt = tokenizer.encode(prefix + prompt_text, add_special_tokens=False, return_tensors="pt")
encoded_prompt = encoded_prompt.to(distributed_state.device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
if args.jit:
jit_input_texts = ["enable jit"]
jit_inputs = prepare_jit_inputs(jit_input_texts, model, tokenizer)
torch._C._jit_set_texpr_fuser_enabled(False)
model.config.return_dict = False
if hasattr(model, "forward"):
sig = inspect.signature(model.forward)
else:
sig = inspect.signature(model.__call__)
jit_inputs = tuple(jit_inputs[key] for key in sig.parameters if jit_inputs.get(key, None) is not None)
traced_model = torch.jit.trace(model, jit_inputs, strict=False)
traced_model = torch.jit.freeze(traced_model.eval())
traced_model(*jit_inputs)
traced_model(*jit_inputs)
model = _ModelFallbackWrapper(traced_model, model)
output_sequences = model.generate(
input_ids=input_ids,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
repetition_penalty=args.repetition_penalty,
do_sample=True,
num_return_sequences=args.num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===")
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(args.stop_token) if args.stop_token else None]
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (
prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
)
generated_sequences.append(total_sequence)
print(total_sequence)
return generated_sequences
if __name__ == "__main__":
main()
|
_ModelFallbackWrapper
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_write_cell.py
|
{
"start": 336,
"end": 2060
}
|
class ____(unittest.TestCase):
"""
Test the Worksheet _write_cell() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_cell_number(self):
"""Test the _write_cell() method for numbers."""
cell_tuple = namedtuple("Number", "number, format")
cell = cell_tuple(1, None)
self.worksheet._write_cell(0, 0, cell)
exp = """<c r="A1"><v>1</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_cell_string(self):
"""Test the _write_cell() method for strings."""
cell_tuple = namedtuple("String", "string, format")
cell = cell_tuple(0, None)
self.worksheet._write_cell(3, 1, cell)
exp = """<c r="B4" t="s"><v>0</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_cell_formula01(self):
"""Test the _write_cell() method for formulas."""
cell_tuple = namedtuple("Formula", "formula, format, value")
cell = cell_tuple("A3+A5", None, 0)
self.worksheet._write_cell(1, 2, cell)
exp = """<c r="C2"><f>A3+A5</f><v>0</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_cell_formula02(self):
"""Test the _write_cell() method for formulas."""
cell_tuple = namedtuple("Formula", "formula, format, value")
cell = cell_tuple("A3+A5", None, 7)
self.worksheet._write_cell(1, 2, cell)
exp = """<c r="C2"><f>A3+A5</f><v>7</v></c>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
|
TestWriteCell
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py
|
{
"start": 286,
"end": 739
}
|
class ____(CloudProvider):
"""Checks if a configuration file has been passed or fixtures are going to be used for testing"""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
self._use_static_config()
|
DigitalOceanCloudProvider
|
python
|
doocs__leetcode
|
solution/0000-0099/0053.Maximum Subarray/Solution2.py
|
{
"start": 0,
"end": 845
}
|
class ____:
def maxSubArray(self, nums: List[int]) -> int:
def crossMaxSub(nums, left, mid, right):
lsum = rsum = 0
lmx = rmx = -inf
for i in range(mid, left - 1, -1):
lsum += nums[i]
lmx = max(lmx, lsum)
for i in range(mid + 1, right + 1):
rsum += nums[i]
rmx = max(rmx, rsum)
return lmx + rmx
def maxSub(nums, left, right):
if left == right:
return nums[left]
mid = (left + right) >> 1
lsum = maxSub(nums, left, mid)
rsum = maxSub(nums, mid + 1, right)
csum = crossMaxSub(nums, left, mid, right)
return max(lsum, rsum, csum)
left, right = 0, len(nums) - 1
return maxSub(nums, left, right)
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess24.py
|
{
"start": 449,
"end": 499
}
|
class ____(UnknownX):
y: Desc
|
DerivesFromUnknown
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/figure.py
|
{
"start": 91566,
"end": 143754
}
|
class ____(FigureBase):
"""
The top level container for all the plot elements.
See `matplotlib.figure` for an index of class methods.
Attributes
----------
patch
The `.Rectangle` instance representing the figure background patch.
suppressComposite
For multiple images, the figure will make composite images
depending on the renderer option_image_nocomposite function. If
*suppressComposite* is a boolean, this will override the renderer.
"""
# we want to cache the fonts and mathtext at a global level so that when
# multiple figures are created we can reuse them. This helps with a bug on
# windows where the creation of too many figures leads to too many open
# file handles and improves the performance of parsing mathtext. However,
# these global caches are not thread safe. The solution here is to let the
# Figure acquire a shared lock at the start of the draw, and release it when it
# is done. This allows multiple renderers to share the cached fonts and
# parsed text, but only one figure can draw at a time and so the font cache
# and mathtext cache are used by only one renderer at a time.
_render_lock = threading.RLock()
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __repr__(self):
return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format(
clsname=self.__class__.__name__,
h=self.bbox.size[0], w=self.bbox.size[1],
naxes=len(self.axes),
)
def __init__(self,
figsize=None,
dpi=None,
*,
facecolor=None,
edgecolor=None,
linewidth=0.0,
frameon=None,
subplotpars=None, # rc figure.subplot.*
tight_layout=None, # rc figure.autolayout
constrained_layout=None, # rc figure.constrained_layout.use
layout=None,
**kwargs
):
"""
Parameters
----------
figsize : (float, float) or (float, float, str), default: :rc:`figure.figsize`
The figure dimensions. This can be
- a tuple ``(width, height, unit)``, where *unit* is one of "in" (inch),
"cm" (centimenter), "px" (pixel).
- a tuple ``(width, height)``, which is interpreted in inches, i.e. as
``(width, height, "in")``.
dpi : float, default: :rc:`figure.dpi`
Dots per inch.
facecolor : default: :rc:`figure.facecolor`
The figure patch facecolor.
edgecolor : default: :rc:`figure.edgecolor`
The figure patch edge color.
linewidth : float
The linewidth of the frame (i.e. the edge linewidth of the figure
patch).
frameon : bool, default: :rc:`figure.frameon`
If ``False``, suppress drawing the figure background patch.
subplotpars : `~matplotlib.gridspec.SubplotParams`
Subplot parameters. If not given, the default subplot
parameters :rc:`figure.subplot.*` are used.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
Whether to use the tight layout mechanism. See `.set_tight_layout`.
.. admonition:: Discouraged
The use of this parameter is discouraged. Please use
``layout='tight'`` instead for the common case of
``tight_layout=True`` and use `.set_tight_layout` otherwise.
constrained_layout : bool, default: :rc:`figure.constrained_layout.use`
This is equal to ``layout='constrained'``.
.. admonition:: Discouraged
The use of this parameter is discouraged. Please use
``layout='constrained'`` instead.
layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, \
None}, default: None
The layout mechanism for positioning of plot elements to avoid
overlapping Axes decorations (labels, ticks, etc). Note that
layout managers can have significant performance penalties.
- 'constrained': The constrained layout solver adjusts Axes sizes
to avoid overlapping Axes decorations. Can handle complex plot
layouts and colorbars, and is thus recommended.
See :ref:`constrainedlayout_guide` for examples.
- 'compressed': uses the same algorithm as 'constrained', but
removes extra space between fixed-aspect-ratio Axes. Best for
simple grids of Axes.
- 'tight': Use the tight layout mechanism. This is a relatively
simple algorithm that adjusts the subplot parameters so that
decorations do not overlap.
See :ref:`tight_layout_guide` for examples.
- 'none': Do not use a layout engine.
- A `.LayoutEngine` instance. Builtin layout classes are
`.ConstrainedLayoutEngine` and `.TightLayoutEngine`, more easily
accessible by 'constrained' and 'tight'. Passing an instance
allows third parties to provide their own layout engine.
If not given, fall back to using the parameters *tight_layout* and
*constrained_layout*, including their config defaults
:rc:`figure.autolayout` and :rc:`figure.constrained_layout.use`.
Other Parameters
----------------
**kwargs : `.Figure` properties, optional
%(Figure:kwdoc)s
"""
super().__init__(**kwargs)
self._root_figure = self
self._layout_engine = None
if layout is not None:
if (tight_layout is not None):
_api.warn_external(
"The Figure parameters 'layout' and 'tight_layout' cannot "
"be used together. Please use 'layout' only.")
if (constrained_layout is not None):
_api.warn_external(
"The Figure parameters 'layout' and 'constrained_layout' "
"cannot be used together. Please use 'layout' only.")
self.set_layout_engine(layout=layout)
elif tight_layout is not None:
if constrained_layout is not None:
_api.warn_external(
"The Figure parameters 'tight_layout' and "
"'constrained_layout' cannot be used together. Please use "
"'layout' parameter")
self.set_layout_engine(layout='tight')
if isinstance(tight_layout, dict):
self.get_layout_engine().set(**tight_layout)
elif constrained_layout is not None:
if isinstance(constrained_layout, dict):
self.set_layout_engine(layout='constrained')
self.get_layout_engine().set(**constrained_layout)
elif constrained_layout:
self.set_layout_engine(layout='constrained')
else:
# everything is None, so use default:
self.set_layout_engine(layout=layout)
# Callbacks traditionally associated with the canvas (and exposed with
# a proxy property), but that actually need to be on the figure for
# pickling.
self._canvas_callbacks = cbook.CallbackRegistry(
signals=FigureCanvasBase.events)
connect = self._canvas_callbacks._connect_picklable
self._mouse_key_ids = [
connect('key_press_event', backend_bases._key_handler),
connect('key_release_event', backend_bases._key_handler),
connect('key_release_event', backend_bases._key_handler),
connect('button_press_event', backend_bases._mouse_handler),
connect('button_release_event', backend_bases._mouse_handler),
connect('scroll_event', backend_bases._mouse_handler),
connect('motion_notify_event', backend_bases._mouse_handler),
]
self._button_pick_id = connect('button_press_event', self.pick)
self._scroll_pick_id = connect('scroll_event', self.pick)
figsize = mpl._val_or_rc(figsize, 'figure.figsize')
dpi = mpl._val_or_rc(dpi, 'figure.dpi')
facecolor = mpl._val_or_rc(facecolor, 'figure.facecolor')
edgecolor = mpl._val_or_rc(edgecolor, 'figure.edgecolor')
frameon = mpl._val_or_rc(frameon, 'figure.frameon')
figsize = _parse_figsize(figsize, dpi)
if not np.isfinite(figsize).all() or (np.array(figsize) < 0).any():
raise ValueError('figure size must be positive finite not '
f'{figsize}')
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.dpi_scale_trans = Affine2D().scale(dpi)
# do not use property as it will trigger
self._dpi = dpi
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.figbbox = self.bbox
self.transFigure = BboxTransformTo(self.bbox)
self.transSubfigure = self.transFigure
self.patch = Rectangle(
xy=(0, 0), width=1, height=1, visible=frameon,
facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth,
# Don't let the figure patch influence bbox calculation.
in_layout=False)
self._set_artist_props(self.patch)
self.patch.set_antialiased(False)
self._set_base_canvas()
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
self._axstack = _AxesStack() # track all figure Axes and current Axes
self.clear()
def pick(self, mouseevent):
if not self.canvas.widgetlock.locked():
super().pick(mouseevent)
def _check_layout_engines_compat(self, old, new):
"""
Helper for set_layout engine
If the figure has used the old engine and added a colorbar then the
value of colorbar_gridspec must be the same on the new engine.
"""
if old is None or new is None:
return True
if old.colorbar_gridspec == new.colorbar_gridspec:
return True
# colorbar layout different, so check if any colorbars are on the
# figure...
for ax in self.axes:
if hasattr(ax, '_colorbar'):
# colorbars list themselves as a colorbar.
return False
return True
def set_layout_engine(self, layout=None, **kwargs):
"""
Set the layout engine for this figure.
Parameters
----------
layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, None}
- 'constrained' will use `~.ConstrainedLayoutEngine`
- 'compressed' will also use `~.ConstrainedLayoutEngine`, but with
a correction that attempts to make a good layout for fixed-aspect
ratio Axes.
- 'tight' uses `~.TightLayoutEngine`
- 'none' removes layout engine.
If a `.LayoutEngine` instance, that instance will be used.
If `None`, the behavior is controlled by :rc:`figure.autolayout`
(which if `True` behaves as if 'tight' was passed) and
:rc:`figure.constrained_layout.use` (which if `True` behaves as if
'constrained' was passed). If both are `True`,
:rc:`figure.autolayout` takes priority.
Users and libraries can define their own layout engines and pass
the instance directly as well.
**kwargs
The keyword arguments are passed to the layout engine to set things
like padding and margin sizes. Only used if *layout* is a string.
"""
if layout is None:
if mpl.rcParams['figure.autolayout']:
layout = 'tight'
elif mpl.rcParams['figure.constrained_layout.use']:
layout = 'constrained'
else:
self._layout_engine = None
return
if layout == 'tight':
new_layout_engine = TightLayoutEngine(**kwargs)
elif layout == 'constrained':
new_layout_engine = ConstrainedLayoutEngine(**kwargs)
elif layout == 'compressed':
new_layout_engine = ConstrainedLayoutEngine(compress=True,
**kwargs)
elif layout == 'none':
if self._layout_engine is not None:
new_layout_engine = PlaceHolderLayoutEngine(
self._layout_engine.adjust_compatible,
self._layout_engine.colorbar_gridspec
)
else:
new_layout_engine = None
elif isinstance(layout, LayoutEngine):
new_layout_engine = layout
else:
raise ValueError(f"Invalid value for 'layout': {layout!r}")
if self._check_layout_engines_compat(self._layout_engine,
new_layout_engine):
self._layout_engine = new_layout_engine
else:
raise RuntimeError('Colorbar layout of new layout engine not '
'compatible with old engine, and a colorbar '
'has been created. Engine not changed.')
def get_layout_engine(self):
return self._layout_engine
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditionally.
if 'WebAgg' in type(self.canvas).__name__:
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using `~.pyplot.figure`, it will lack
a `~.backend_bases.FigureManagerBase`, and this method will raise an
AttributeError.
.. warning::
This does not manage an GUI event loop. Consequently, the figure
may only be shown briefly or not shown at all if you or your
environment are not managing an event loop.
Use cases for `.Figure.show` include running this from a GUI
application (where there is persistently an event loop running) or
from a shell, like IPython, that install an input hook to allow the
interactive shell to accept input while the figure is also being
shown and interactive. Some, but not all, GUI toolkits will
register an input hook on import. See :ref:`cp_integration` for
more details.
If you're in a shell without input hook integration or executing a
python script, you should use `matplotlib.pyplot.show` with
``block=True`` instead, which takes care of starting and running
the event loop for you.
Parameters
----------
warn : bool, default: True
If ``True`` and we are not running headless (i.e. on Linux with an
unset DISPLAY), issue warning when called on a non-GUI backend.
"""
if self.canvas.manager is None:
raise AttributeError(
"Figure.show works only for figures managed by pyplot, "
"normally created by pyplot.figure()")
try:
self.canvas.manager.show()
except NonGuiException as exc:
if warn:
_api.warn_external(str(exc))
@property
def axes(self):
"""
List of Axes in the Figure. You can access and modify the Axes in the
Figure through this list.
Do not modify the list itself. Instead, use `~Figure.add_axes`,
`~.Figure.add_subplot` or `~.Figure.delaxes` to add or remove an Axes.
Note: The `.Figure.axes` property and `~.Figure.get_axes` method are
equivalent.
"""
return self._axstack.as_list()
get_axes = axes.fget
@property
def number(self):
"""The figure id, used to identify figures in `.pyplot`."""
# Historically, pyplot dynamically added a number attribute to figure.
# However, this number must stay in sync with the figure manager.
# AFAICS overwriting the number attribute does not have the desired
# effect for pyplot. But there are some repos in GitHub that do change
# number. So let's take it slow and properly migrate away from writing.
#
# Making the dynamic attribute private and wrapping it in a property
# allows to maintain current behavior and deprecate write-access.
#
# When the deprecation expires, there's no need for duplicate state
# anymore and the private _number attribute can be replaced by
# `self.canvas.manager.num` if that exists and None otherwise.
if hasattr(self, '_number'):
return self._number
else:
raise AttributeError(
"'Figure' object has no attribute 'number'. In the future this"
"will change to returning 'None' instead.")
@number.setter
def number(self, num):
_api.warn_deprecated(
"3.10",
message="Changing 'Figure.number' is deprecated since %(since)s and "
"will raise an error starting %(removal)s")
self._number = num
def _get_renderer(self):
if hasattr(self.canvas, 'get_renderer'):
return self.canvas.get_renderer()
else:
return _get_renderer(self)
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi, forward=True):
"""
Parameters
----------
dpi : float
forward : bool
Passed on to `~.Figure.set_size_inches`
"""
if dpi == self._dpi:
# We don't want to cause undue events in backends.
return
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi)
w, h = self.get_size_inches()
self.set_size_inches(w, h, forward=forward)
dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.")
def get_tight_layout(self):
"""Return whether `.Figure.tight_layout` is called when drawing."""
return isinstance(self.get_layout_engine(), TightLayoutEngine)
@_api.deprecated("3.6", alternative="set_layout_engine",
pending=True)
def set_tight_layout(self, tight):
"""
Set whether and how `.Figure.tight_layout` is called when drawing.
Parameters
----------
tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None
If a bool, sets whether to call `.Figure.tight_layout` upon drawing.
If ``None``, use :rc:`figure.autolayout` instead.
If a dict, pass it as kwargs to `.Figure.tight_layout`, overriding the
default paddings.
"""
tight = mpl._val_or_rc(tight, 'figure.autolayout')
_tight = 'tight' if bool(tight) else 'none'
_tight_parameters = tight if isinstance(tight, dict) else {}
self.set_layout_engine(_tight, **_tight_parameters)
self.stale = True
def get_constrained_layout(self):
"""
Return whether constrained layout is being used.
See :ref:`constrainedlayout_guide`.
"""
return isinstance(self.get_layout_engine(), ConstrainedLayoutEngine)
@_api.deprecated("3.6", alternative="set_layout_engine('constrained')",
pending=True)
def set_constrained_layout(self, constrained):
"""
Set whether ``constrained_layout`` is used upon drawing.
If None, :rc:`figure.constrained_layout.use` value will be used.
When providing a dict containing the keys ``w_pad``, ``h_pad``
the default ``constrained_layout`` paddings will be
overridden. These pads are in inches and default to 3.0/72.0.
``w_pad`` is the width padding and ``h_pad`` is the height padding.
Parameters
----------
constrained : bool or dict or None
"""
constrained = mpl._val_or_rc(constrained, 'figure.constrained_layout.use')
_constrained = 'constrained' if bool(constrained) else 'none'
_parameters = constrained if isinstance(constrained, dict) else {}
self.set_layout_engine(_constrained, **_parameters)
self.stale = True
@_api.deprecated(
"3.6", alternative="figure.get_layout_engine().set()",
pending=True)
def set_constrained_layout_pads(self, **kwargs):
"""
Set padding for ``constrained_layout``.
Tip: The parameters can be passed from a dictionary by using
``fig.set_constrained_layout(**pad_dict)``.
See :ref:`constrainedlayout_guide`.
Parameters
----------
w_pad : float, default: :rc:`figure.constrained_layout.w_pad`
Width padding in inches. This is the pad around Axes
and is meant to make sure there is enough room for fonts to
look good. Defaults to 3 pts = 0.04167 inches
h_pad : float, default: :rc:`figure.constrained_layout.h_pad`
Height padding in inches. Defaults to 3 pts.
wspace : float, default: :rc:`figure.constrained_layout.wspace`
Width padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being w_pad + wspace.
hspace : float, default: :rc:`figure.constrained_layout.hspace`
Height padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being h_pad + hspace.
"""
if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):
self.get_layout_engine().set(**kwargs)
@_api.deprecated("3.6", alternative="fig.get_layout_engine().get()",
pending=True)
def get_constrained_layout_pads(self, relative=False):
"""
Get padding for ``constrained_layout``.
Returns a list of ``w_pad, h_pad`` in inches and
``wspace`` and ``hspace`` as fractions of the subplot.
All values are None if ``constrained_layout`` is not used.
See :ref:`constrainedlayout_guide`.
Parameters
----------
relative : bool
If `True`, then convert from inches to figure relative.
"""
if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):
return None, None, None, None
info = self.get_layout_engine().get()
w_pad = info['w_pad']
h_pad = info['h_pad']
wspace = info['wspace']
hspace = info['hspace']
if relative and (w_pad is not None or h_pad is not None):
renderer = self._get_renderer()
dpi = renderer.dpi
w_pad = w_pad * dpi / renderer.width
h_pad = h_pad * dpi / renderer.height
return w_pad, h_pad, wspace, hspace
def _set_base_canvas(self):
"""
Initialize self.canvas with a FigureCanvasBase instance.
This is used upon initialization of the Figure, but also
to reset the canvas when decoupling from pyplot.
"""
# check if we have changed the DPI due to hi-dpi screens
orig_dpi = getattr(self, '_original_dpi', self._dpi)
FigureCanvasBase(self) # Set self.canvas as a side-effect
# put it back to what it was
if orig_dpi != self._dpi:
self.dpi = orig_dpi
def set_canvas(self, canvas):
"""
Set the canvas that contains the figure
Parameters
----------
canvas : FigureCanvas
"""
self.canvas = canvas
@_docstring.interpd
def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None,
vmin=None, vmax=None, origin=None, resize=False, *,
colorizer=None, **kwargs):
"""
Add a non-resampled image to the figure.
The image is attached to the lower or upper left corner depending on
*origin*.
Parameters
----------
X
The image data. This is an array of one of the following shapes:
- (M, N): an image with scalar data. Color-mapping is controlled
by *cmap*, *norm*, *vmin*, and *vmax*.
- (M, N, 3): an image with RGB values (0-1 float or 0-255 int).
- (M, N, 4): an image with RGBA values (0-1 float or 0-255 int),
i.e. including transparency.
xo, yo : int
The *x*/*y* image offset in pixels.
alpha : None or float
The alpha blending value.
%(cmap_doc)s
This parameter is ignored if *X* is RGB(A).
%(norm_doc)s
This parameter is ignored if *X* is RGB(A).
%(vmin_vmax_doc)s
This parameter is ignored if *X* is RGB(A).
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Indicates where the [0, 0] index of the array is in the upper left
or lower left corner of the Axes.
resize : bool
If *True*, resize the figure to match the given image size.
%(colorizer_doc)s
This parameter is ignored if *X* is RGB(A).
Returns
-------
`matplotlib.image.FigureImage`
Other Parameters
----------------
**kwargs
Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`.
Notes
-----
figimage complements the Axes image (`~matplotlib.axes.Axes.imshow`)
which will be resampled to fit the current Axes. If you want
a resampled image to fill the entire figure, you can define an
`~matplotlib.axes.Axes` with extent [0, 0, 1, 1].
Examples
--------
::
f = plt.figure()
nx = int(f.get_figwidth() * f.dpi)
ny = int(f.get_figheight() * f.dpi)
data = np.random.random((ny, nx))
f.figimage(data)
plt.show()
"""
if resize:
dpi = self.get_dpi()
figsize = [x / dpi for x in (X.shape[1], X.shape[0])]
self.set_size_inches(figsize, forward=True)
im = mimage.FigureImage(self, cmap=cmap, norm=norm,
colorizer=colorizer,
offsetx=xo, offsety=yo,
origin=origin, **kwargs)
im.stale_callback = _stale_figure_callback
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im._check_exclusionary_keywords(colorizer, vmin=vmin, vmax=vmax)
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = self.images.remove
self.stale = True
return im
def set_size_inches(self, w, h=None, forward=True):
"""
Set the figure size in inches.
Call signatures::
fig.set_size_inches(w, h) # OR
fig.set_size_inches((w, h))
Parameters
----------
w : (float, float) or float
Width and height in inches (if height not specified as a separate
argument) or width.
h : float
Height in inches.
forward : bool, default: True
If ``True``, the canvas size is automatically updated, e.g.,
you can resize the figure window from the shell.
See Also
--------
matplotlib.figure.Figure.get_size_inches
matplotlib.figure.Figure.set_figwidth
matplotlib.figure.Figure.set_figheight
Notes
-----
To transform from pixels to inches divide by `Figure.dpi`.
"""
if h is None: # Got called with a single pair as argument.
w, h = w
size = np.array([w, h])
if not np.isfinite(size).all() or (size < 0).any():
raise ValueError(f'figure size must be positive finite not {size}')
self.bbox_inches.p1 = size
if forward:
manager = self.canvas.manager
if manager is not None:
manager.resize(*(size * self.dpi).astype(int))
self.stale = True
def get_size_inches(self):
"""
Return the current size of the figure in inches.
Returns
-------
ndarray
The size (width, height) of the figure in inches.
See Also
--------
matplotlib.figure.Figure.set_size_inches
matplotlib.figure.Figure.get_figwidth
matplotlib.figure.Figure.get_figheight
Notes
-----
The size in pixels can be obtained by multiplying with `Figure.dpi`.
"""
return np.array(self.bbox_inches.p1)
def get_figwidth(self):
"""Return the figure width in inches."""
return self.bbox_inches.width
def get_figheight(self):
"""Return the figure height in inches."""
return self.bbox_inches.height
def get_dpi(self):
"""Return the resolution in dots per inch as a float."""
return self.dpi
def set_dpi(self, val):
"""
Set the resolution of the figure in dots-per-inch.
Parameters
----------
val : float
"""
self.dpi = val
self.stale = True
def set_figwidth(self, val, forward=True):
"""
Set the width of the figure in inches.
Parameters
----------
val : float
forward : bool
See `set_size_inches`.
See Also
--------
matplotlib.figure.Figure.set_figheight
matplotlib.figure.Figure.set_size_inches
"""
self.set_size_inches(val, self.get_figheight(), forward=forward)
def set_figheight(self, val, forward=True):
"""
Set the height of the figure in inches.
Parameters
----------
val : float
forward : bool
See `set_size_inches`.
See Also
--------
matplotlib.figure.Figure.set_figwidth
matplotlib.figure.Figure.set_size_inches
"""
self.set_size_inches(self.get_figwidth(), val, forward=forward)
def clear(self, keep_observers=False):
# docstring inherited
super().clear(keep_observers=keep_observers)
# FigureBase.clear does not clear toolbars, as
# only Figure can have toolbars
toolbar = self.canvas.toolbar
if toolbar is not None:
toolbar.update()
@_finalize_rasterization
@allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
with self._render_lock:
artists = self._get_draw_artists(renderer)
try:
renderer.open_group('figure', gid=self.get_gid())
if self.axes and self.get_layout_engine() is not None:
try:
self.get_layout_engine().execute(self)
except ValueError:
pass
# ValueError can occur when resizing a window.
self.patch.draw(renderer)
mimage._draw_list_compositing_images(
renderer, self, artists, self.suppressComposite)
renderer.close_group('figure')
finally:
self.stale = False
DrawEvent("draw_event", self.canvas, renderer)._process()
def draw_without_rendering(self):
"""
Draw the figure with no output. Useful to get the final size of
artists that require a draw before their size is known (e.g. text).
"""
renderer = _get_renderer(self)
with renderer._draw_disabled():
self.draw(renderer)
def draw_artist(self, a):
"""
Draw `.Artist` *a* only.
"""
a.draw(self.canvas.get_renderer())
def __getstate__(self):
state = super().__getstate__()
# The canvas cannot currently be pickled, but this has the benefit
# of meaning that a figure can be detached from one canvas, and
# re-attached to another.
state.pop("canvas")
# discard any changes to the dpi due to pixel ratio changes
state["_dpi"] = state.get('_original_dpi', state['_dpi'])
# add version information to the state
state['__mpl_version__'] = mpl.__version__
# check whether the figure manager (if any) is registered with pyplot
from matplotlib import _pylab_helpers
if self.canvas.manager in _pylab_helpers.Gcf.figs.values():
state['_restore_to_pylab'] = True
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != mpl.__version__:
_api.warn_external(
f"This figure was saved with matplotlib version {version} and "
f"loaded with {mpl.__version__} so may not function correctly."
)
self.__dict__ = state
# re-initialise some of the unstored state information
FigureCanvasBase(self) # Set self.canvas.
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
backend = plt._get_backend_mod()
mgr = backend.new_figure_manager_given_figure(num, self)
pylab_helpers.Gcf._set_new_active_manager(mgr)
plt.draw_if_interactive()
self.stale = True
def add_axobserver(self, func):
"""Whenever the Axes state change, ``func(self)`` will be called."""
# Connect a wrapper lambda and not func itself, to avoid it being
# weakref-collected.
self._axobservers.connect("_axes_change_event", lambda arg: func(arg))
def savefig(self, fname, *, transparent=None, **kwargs):
"""
Save the current figure as an image or vector graphic to a file.
Call signature::
savefig(fname, *, transparent=None, dpi='figure', format=None,
metadata=None, bbox_inches=None, pad_inches=0.1,
facecolor='auto', edgecolor='auto', backend=None,
**kwargs
)
The available output formats depend on the backend being used.
Parameters
----------
fname : str or path-like or binary file-like
A path, or a Python file-like object, or
possibly some backend-dependent object such as
`matplotlib.backends.backend_pdf.PdfPages`.
If *format* is set, it determines the output format, and the file
is saved as *fname*. Note that *fname* is used verbatim, and there
is no attempt to make the extension, if any, of *fname* match
*format*, and no extension is appended.
If *format* is not set, then the format is inferred from the
extension of *fname*, if there is one. If *format* is not
set and *fname* has no extension, then the file is saved with
:rc:`savefig.format` and the appropriate extension is appended to
*fname*.
Other Parameters
----------------
transparent : bool, default: :rc:`savefig.transparent`
If *True*, the Axes patches will all be transparent; the
Figure patch will also be transparent unless *facecolor*
and/or *edgecolor* are specified via kwargs.
If *False* has no effect and the color of the Axes and
Figure patches are unchanged (unless the Figure patch
is specified via the *facecolor* and/or *edgecolor* keyword
arguments in which case those colors are used).
The transparency of these patches will be restored to their
original values upon exit of this function.
This is useful, for example, for displaying
a plot on top of a colored background on a web page.
dpi : float or 'figure', default: :rc:`savefig.dpi`
The resolution in dots per inch. If 'figure', use the figure's
dpi value.
format : str
The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when
this is unset is documented under *fname*.
metadata : dict, optional
Key/value pairs to store in the image metadata. The supported keys
and defaults depend on the image format and backend:
- 'png' with Agg backend: See the parameter ``metadata`` of
`~.FigureCanvasAgg.print_png`.
- 'pdf' with pdf backend: See the parameter ``metadata`` of
`~.backend_pdf.PdfPages`.
- 'svg' with svg backend: See the parameter ``metadata`` of
`~.FigureCanvasSVG.print_svg`.
- 'eps' and 'ps' with PS backend: Only 'Creator' is supported.
Not supported for 'pgf', 'raw', and 'rgba' as those formats do not support
embedding metadata.
Does not currently support 'jpg', 'tiff', or 'webp', but may include
embedding EXIF metadata in the future.
bbox_inches : str or `.Bbox`, default: :rc:`savefig.bbox`
Bounding box in inches: only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of the figure.
pad_inches : float or 'layout', default: :rc:`savefig.pad_inches`
Amount of padding in inches around the figure when bbox_inches is
'tight'. If 'layout' use the padding from the constrained or
compressed layout engine; ignored if one of those engines is not in
use.
facecolor : :mpltype:`color` or 'auto', default: :rc:`savefig.facecolor`
The facecolor of the figure. If 'auto', use the current figure
facecolor.
edgecolor : :mpltype:`color` or 'auto', default: :rc:`savefig.edgecolor`
The edgecolor of the figure. If 'auto', use the current figure
edgecolor.
backend : str, optional
Use a non-default backend to render the file, e.g. to render a
png file with the "cairo" backend rather than the default "agg",
or a pdf file with the "pgf" backend rather than the default
"pdf". Note that the default backend is normally sufficient. See
:ref:`the-builtin-backends` for a list of valid backends for each
file format. Custom backends can be referenced as "module://...".
orientation : {'landscape', 'portrait'}
Currently only supported by the postscript backend.
papertype : str
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
A list of extra artists that will be considered when the
tight bbox is calculated.
pil_kwargs : dict, optional
Additional keyword arguments that are passed to
`PIL.Image.Image.save` when saving the figure.
"""
kwargs.setdefault('dpi', mpl.rcParams['savefig.dpi'])
transparent = mpl._val_or_rc(transparent, 'savefig.transparent')
with ExitStack() as stack:
if transparent:
def _recursively_make_subfig_transparent(exit_stack, subfig):
exit_stack.enter_context(
subfig.patch._cm_set(
facecolor="none", edgecolor="none"))
for ax in subfig.axes:
exit_stack.enter_context(
ax.patch._cm_set(
facecolor="none", edgecolor="none"))
for sub_subfig in subfig.subfigs:
_recursively_make_subfig_transparent(
exit_stack, sub_subfig)
def _recursively_make_axes_transparent(exit_stack, ax):
exit_stack.enter_context(
ax.patch._cm_set(facecolor="none", edgecolor="none"))
for child_ax in ax.child_axes:
exit_stack.enter_context(
child_ax.patch._cm_set(
facecolor="none", edgecolor="none"))
for child_childax in ax.child_axes:
_recursively_make_axes_transparent(
exit_stack, child_childax)
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
# set subfigure to appear transparent in printed image
for subfig in self.subfigs:
_recursively_make_subfig_transparent(stack, subfig)
# set Axes to be transparent
for ax in self.axes:
_recursively_make_axes_transparent(stack, ax)
self.canvas.print_figure(fname, **kwargs)
def ginput(self, n=1, timeout=30, show_clicks=True,
mouse_add=MouseButton.LEFT,
mouse_pop=MouseButton.RIGHT,
mouse_stop=MouseButton.MIDDLE):
"""
Blocking call to interact with a figure.
Wait until the user clicks *n* times on the figure, and return the
coordinates of each click in a list.
There are three possible interactions:
- Add a point.
- Remove the most recently added point.
- Stop the interaction and return the points added so far.
The actions are assigned to mouse buttons via the arguments
*mouse_add*, *mouse_pop* and *mouse_stop*.
Parameters
----------
n : int, default: 1
Number of mouse clicks to accumulate. If negative, accumulate
clicks until the input is terminated manually.
timeout : float, default: 30 seconds
Number of seconds to wait before timing out. If zero or negative
will never time out.
show_clicks : bool, default: True
If True, show a red cross at the location of each click.
mouse_add : `.MouseButton` or None, default: `.MouseButton.LEFT`
Mouse button used to add points.
mouse_pop : `.MouseButton` or None, default: `.MouseButton.RIGHT`
Mouse button used to remove the most recently added point.
mouse_stop : `.MouseButton` or None, default: `.MouseButton.MIDDLE`
Mouse button used to stop input.
Returns
-------
list of tuples
A list of the clicked (x, y) coordinates.
Notes
-----
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right-clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
clicks = []
marks = []
def handler(event):
is_button = event.name == "button_press_event"
is_key = event.name == "key_press_event"
# Quit (even if not in infinite mode; this is consistent with
# MATLAB and sometimes quite useful, but will require the user to
# test how many points were actually returned before using data).
if (is_button and event.button == mouse_stop
or is_key and event.key in ["escape", "enter"]):
self.canvas.stop_event_loop()
# Pop last click.
elif (is_button and event.button == mouse_pop
or is_key and event.key in ["backspace", "delete"]):
if clicks:
clicks.pop()
if show_clicks:
marks.pop().remove()
self.canvas.draw()
# Add new click.
elif (is_button and event.button == mouse_add
# On macOS/gtk, some keys return None.
or is_key and event.key is not None):
if event.inaxes:
clicks.append((event.xdata, event.ydata))
_log.info("input %i: %f, %f",
len(clicks), event.xdata, event.ydata)
if show_clicks:
line = mpl.lines.Line2D([event.xdata], [event.ydata],
marker="+", color="r")
event.inaxes.add_line(line)
marks.append(line)
self.canvas.draw()
if len(clicks) == n and n > 0:
self.canvas.stop_event_loop()
_blocking_input.blocking_input_loop(
self, ["button_press_event", "key_press_event"], timeout, handler)
# Cleanup.
for mark in marks:
mark.remove()
self.canvas.draw()
return clicks
def waitforbuttonpress(self, timeout=-1):
"""
Blocking call to interact with the figure.
Wait for user input and return True if a key was pressed, False if a
mouse button was pressed and None if no input was given within
*timeout* seconds. Negative values deactivate *timeout*.
"""
event = None
def handler(ev):
nonlocal event
event = ev
self.canvas.stop_event_loop()
_blocking_input.blocking_input_loop(
self, ["button_press_event", "key_press_event"], timeout, handler)
return None if event is None else event.name == "key_press_event"
def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust the padding between and around subplots.
To exclude an artist on the Axes from the bounding box calculation
that determines the subplot parameters (i.e. legend, or annotation),
set ``a.set_in_layout(False)`` for that artist.
Parameters
----------
pad : float, default: 1.08
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad : float, default: *pad*
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size.
rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1)
A rectangle in normalized figure coordinates into which the whole
subplots area (including labels) will fit.
See Also
--------
.Figure.set_layout_engine
.pyplot.tight_layout
"""
# note that here we do not permanently set the figures engine to
# tight_layout but rather just perform the layout in place and remove
# any previous engines.
engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
try:
previous_engine = self.get_layout_engine()
self.set_layout_engine(engine)
engine.execute(self)
if previous_engine is not None and not isinstance(
previous_engine, (TightLayoutEngine, PlaceHolderLayoutEngine)
):
_api.warn_external('The figure layout has changed to tight')
finally:
self.set_layout_engine('none')
def figaspect(arg):
"""
Calculate the width and height for a figure with a specified aspect ratio.
While the height is taken from :rc:`figure.figsize`, the width is
adjusted to match the desired aspect ratio. Additionally, it is ensured
that the width is in the range [4., 16.] and the height is in the range
[2., 16.]. If necessary, the default height is adjusted to ensure this.
Parameters
----------
arg : float or 2D array
If a float, this defines the aspect ratio (i.e. the ratio height /
width).
In case of an array the aspect ratio is number of rows / number of
columns, so that the array could be fitted in the figure undistorted.
Returns
-------
size : (2,) array
The width and height of the figure in inches.
Notes
-----
If you want to create an Axes within the figure, that still preserves the
aspect ratio, be sure to create it with equal width and height. See
examples below.
Thanks to Fernando Perez for this function.
Examples
--------
Make a figure twice as tall as it is wide::
w, h = figaspect(2.)
fig = Figure(figsize=(w, h))
ax = fig.add_axes((0.1, 0.1, 0.8, 0.8))
ax.imshow(A, **kwargs)
Make a figure with the proper aspect for an array::
A = rand(5, 3)
w, h = figaspect(A)
fig = Figure(figsize=(w, h))
ax = fig.add_axes((0.1, 0.1, 0.8, 0.8))
ax.imshow(A, **kwargs)
"""
isarray = hasattr(arg, 'shape') and not np.isscalar(arg)
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = nr / nc
else:
arr_ratio = arg
# Height of user figure defaults
fig_height = mpl.rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
def _parse_figsize(figsize, dpi):
"""
Convert a figsize expression to (width, height) in inches.
Parameters
----------
figsize : (float, float) or (float, float, str)
This can be
- a tuple ``(width, height, unit)``, where *unit* is one of "in" (inch),
"cm" (centimenter), "px" (pixel).
- a tuple ``(width, height)``, which is interpreted in inches, i.e. as
``(width, height, "in")``.
dpi : float
The dots-per-inch; used for converting 'px' to 'in'.
"""
num_parts = len(figsize)
if num_parts == 2:
return figsize
elif num_parts == 3:
x, y, unit = figsize
if unit == 'in':
pass
elif unit == 'cm':
x /= 2.54
y /= 2.54
elif unit == 'px':
x /= dpi
y /= dpi
else:
raise ValueError(
f"Invalid unit {unit!r} in 'figsize'; "
"supported units are 'in', 'cm', 'px'"
)
return x, y
else:
raise ValueError(
"Invalid figsize format, expected (x, y) or (x, y, unit) but got "
f"{figsize!r}"
)
|
Figure
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/beta_container_upload_block.py
|
{
"start": 201,
"end": 300
}
|
class ____(BaseModel):
file_id: str
type: Literal["container_upload"]
|
BetaContainerUploadBlock
|
python
|
huggingface__transformers
|
src/transformers/models/video_llama_3/modular_video_llama_3.py
|
{
"start": 46486,
"end": 46735
}
|
class ____(Qwen2VLProcessorKwargs):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"videos_kwargs": {"return_metadata": True},
}
|
VideoLlama3ProcessorKwargs
|
python
|
tiangolo__fastapi
|
fastapi/dependencies/models.py
|
{
"start": 521,
"end": 638
}
|
class ____:
security_scheme: SecurityBase
scopes: Optional[Sequence[str]] = None
@dataclass
|
SecurityRequirement
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_cluster.py
|
{
"start": 16297,
"end": 17600
}
|
class ____:
@classmethod
def setup_class(cls):
pytest.importorskip("numpy")
def test_empty(self):
G = nx.Graph()
with pytest.raises(ZeroDivisionError):
nx.average_clustering(G)
def test_average_clustering(self):
G = nx.cycle_graph(3)
G.add_edge(2, 3)
assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 4
assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 4
assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 3
assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 3
assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 3
assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 2
def test_average_clustering_signed(self):
G = nx.cycle_graph(3)
G.add_edge(2, 3)
G.add_edge(0, 1, weight=-1)
assert nx.average_clustering(G, weight="weight") == (-1 - 1 - 1 / 3) / 4
assert (
nx.average_clustering(G, weight="weight", count_zeros=True)
== (-1 - 1 - 1 / 3) / 4
)
assert (
nx.average_clustering(G, weight="weight", count_zeros=False)
== (-1 - 1 - 1 / 3) / 3
)
|
TestAverageClustering
|
python
|
bokeh__bokeh
|
src/bokeh/models/annotations/geometry.py
|
{
"start": 3348,
"end": 4694
}
|
class ____(Model):
""" Defines interaction handles for box-like annotations.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
all = Required(Instance(AreaVisuals)) # move, resize
move = Nullable(Instance(AreaVisuals))
resize = Nullable(Instance(AreaVisuals)) # sides, corners
sides = Nullable(Instance(AreaVisuals)) # left, right, top, bottom
corners = Nullable(Instance(AreaVisuals)) # top_left, top_right, bottom_left, bottom_right
left = Nullable(Instance(AreaVisuals))
right = Nullable(Instance(AreaVisuals))
top = Nullable(Instance(AreaVisuals))
bottom = Nullable(Instance(AreaVisuals))
top_left = Nullable(Instance(AreaVisuals))
top_right = Nullable(Instance(AreaVisuals))
bottom_left = Nullable(Instance(AreaVisuals))
bottom_right = Nullable(Instance(AreaVisuals))
DEFAULT_BOX_ANNOTATION_HANDLES = lambda: \
BoxInteractionHandles(
all=AreaVisuals(
fill_color="white",
fill_alpha=1.0,
line_color="black",
line_alpha=1.0,
hover_fill_color="lightgray",
hover_fill_alpha=1.0,
),
)
|
BoxInteractionHandles
|
python
|
walkccc__LeetCode
|
solutions/3498. Reverse Degree of a String/3498.py
|
{
"start": 0,
"end": 151
}
|
class ____:
def reverseDegree(self, s: str) -> int:
return sum((26 - (ord(c) - ord('a'))) * (i + 1)
for i, c in enumerate(s))
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/engine/data_adapter.py
|
{
"start": 27424,
"end": 31644
}
|
class ____(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
# Generators should never shuffle as exhausting the generator in order to
# shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,))
self._first_batch_size = int(nest.flatten(peek)[0].shape[0])
def _get_dynamic_shape(t):
shape = t.shape
# Unknown number of dimensions, `as_list` cannot be called.
if shape.rank is None:
return shape
return tensor_shape.TensorShape([None for _ in shape.as_list()])
output_shapes = nest.map_structure(_get_dynamic_shape, peek)
output_types = nest.map_structure(lambda t: t.dtype, peek)
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing,
max_queue_size)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = dataset_ops.DatasetV2.from_generator(
wrapped_generator, output_types, output_shapes=output_shapes)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = nest.list_to_tuple(data)
def _convert_dtype(t):
if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)):
return np.array(t, dtype=backend.floatx())
return t
data = nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
|
GeneratorDataAdapter
|
python
|
h5py__h5py
|
h5py/tests/test_dataset.py
|
{
"start": 15692,
"end": 18422
}
|
class ____(BaseDataset):
"""
Feature: Datasets can be created by manually specifying chunks
"""
def test_create_chunks(self):
""" Create via chunks tuple """
dset = self.f.create_dataset(make_name(), shape=(100,), chunks=(10,))
self.assertEqual(dset.chunks, (10,))
def test_create_chunks_integer(self):
""" Create via chunks integer """
dset = self.f.create_dataset(make_name(), shape=(100,), chunks=10)
self.assertEqual(dset.chunks, (10,))
def test_chunks_mismatch(self):
""" Illegal chunk size raises ValueError """
with self.assertRaises(ValueError):
self.f.create_dataset(make_name(), shape=(100,), chunks=(200,))
def test_chunks_false(self):
""" Chunked format required for given storage options """
with self.assertRaises(ValueError):
self.f.create_dataset(make_name(), shape=(10,), maxshape=100, chunks=False)
def test_chunks_scalar(self):
""" Attempting to create chunked scalar dataset raises TypeError """
with self.assertRaises(TypeError):
self.f.create_dataset(make_name(), shape=(), chunks=(50,))
def test_auto_chunks(self):
""" Auto-chunking of datasets """
dset = self.f.create_dataset(make_name(), shape=(20, 100), chunks=True)
self.assertIsInstance(dset.chunks, tuple)
self.assertEqual(len(dset.chunks), 2)
def test_auto_chunks_abuse(self):
""" Auto-chunking with pathologically large element sizes """
dset = self.f.create_dataset(make_name(), shape=(3,), dtype='S100000000', chunks=True)
self.assertEqual(dset.chunks, (1,))
def test_scalar_assignment(self):
""" Test scalar assignment of chunked dataset """
dset = self.f.create_dataset(make_name(), shape=(3, 50, 50),
dtype=np.int32, chunks=(1, 50, 50))
# test assignment of selection smaller than chunk size
dset[1, :, 40] = 10
self.assertTrue(np.all(dset[1, :, 40] == 10))
# test assignment of selection equal to chunk size
dset[1] = 11
self.assertTrue(np.all(dset[1] == 11))
# test assignment of selection bigger than chunk size
dset[0:2] = 12
self.assertTrue(np.all(dset[0:2] == 12))
def test_auto_chunks_no_shape(self):
""" Auto-chunking of empty datasets not allowed"""
name = make_name()
with pytest.raises(TypeError, match='Empty') as err:
self.f.create_dataset(name, dtype='S100', chunks=True)
with pytest.raises(TypeError, match='Empty') as err:
self.f.create_dataset(name, dtype='S100', maxshape=20)
|
TestCreateChunked
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/sqltypes.py
|
{
"start": 122819,
"end": 122903
}
|
class ____(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = "NCHAR"
|
NCHAR
|
python
|
spyder-ide__spyder
|
spyder/plugins/projects/api.py
|
{
"start": 6094,
"end": 6396
}
|
class ____(BaseProjectType):
ID = 'empty-project-type'
@staticmethod
def get_name():
return _("Empty project")
def create_project(self):
return True, ""
def open_project(self):
return True, ""
def close_project(self):
return True, ""
|
EmptyProject
|
python
|
matplotlib__matplotlib
|
galleries/examples/misc/demo_ribbon_box.py
|
{
"start": 1148,
"end": 2788
}
|
class ____(AxesImage):
zorder = 1
def __init__(self, ax, bbox, color, *, extent=(0, 1, 0, 1), **kwargs):
super().__init__(ax, extent=extent, **kwargs)
self._bbox = bbox
self._ribbonbox = RibbonBox(color)
self.set_transform(BboxTransformTo(bbox))
def draw(self, renderer):
stretch_factor = self._bbox.height / self._bbox.width
ny = int(stretch_factor*self._ribbonbox.nx)
if self.get_array() is None or self.get_array().shape[0] != ny:
arr = self._ribbonbox.get_stretched_image(stretch_factor)
self.set_array(arr)
super().draw(renderer)
def main():
fig, ax = plt.subplots()
years = np.arange(2004, 2009)
heights = [7900, 8100, 7900, 6900, 2800]
box_colors = [
(0.8, 0.2, 0.2),
(0.2, 0.8, 0.2),
(0.2, 0.2, 0.8),
(0.7, 0.5, 0.8),
(0.3, 0.8, 0.7),
]
for year, h, bc in zip(years, heights, box_colors):
bbox0 = Bbox.from_extents(year - 0.4, 0., year + 0.4, h)
bbox = TransformedBbox(bbox0, ax.transData)
ax.add_artist(RibbonBoxImage(ax, bbox, bc, interpolation="bicubic"))
ax.annotate(str(h), (year, h), va="bottom", ha="center")
ax.set_xlim(years[0] - 0.5, years[-1] + 0.5)
ax.set_ylim(0, 10000)
background_gradient = np.zeros((2, 2, 4))
background_gradient[:, :, :3] = [1, 1, 0]
background_gradient[:, :, 3] = [[0.1, 0.3], [0.3, 0.5]] # alpha channel
ax.imshow(background_gradient, interpolation="bicubic", zorder=0.1,
extent=(0, 1, 0, 1), transform=ax.transAxes)
plt.show()
main()
|
RibbonBoxImage
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_variable.py
|
{
"start": 1336,
"end": 16447
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self):
db.clear_db_variables()
SecretCache.reset()
with conf_vars({("secrets", "use_cache"): "true"}):
SecretCache.init()
with mock.patch("airflow.models.variable.mask_secret", autospec=True) as m:
self.mask_secret = m
yield
db.clear_db_variables()
@conf_vars({("core", "fernet_key"): "", ("core", "unit_test_mode"): "True"})
def test_variable_no_encryption(self, session):
"""
Test variables without encryption
"""
crypto.get_fernet.cache_clear()
Variable.set(key="key", value="value", session=session)
test_var = session.query(Variable).filter(Variable.key == "key").one()
assert not test_var.is_encrypted
assert test_var.val == "value"
# We always call mask_secret for variables, and let the SecretsMasker decide based on the name if it
# should mask anything. That logic is tested in test_secrets_masker.py
self.mask_secret.assert_called_once_with("value", "key")
@conf_vars({("core", "fernet_key"): Fernet.generate_key().decode()})
def test_variable_with_encryption(self, session):
"""
Test variables with encryption
"""
crypto.get_fernet.cache_clear()
Variable.set(key="key", value="value", session=session)
test_var = session.query(Variable).filter(Variable.key == "key").one()
assert test_var.is_encrypted
assert test_var.val == "value"
@pytest.mark.parametrize("test_value", ["value", ""])
def test_var_with_encryption_rotate_fernet_key(self, test_value, session):
"""
Tests rotating encrypted variables.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({("core", "fernet_key"): key1.decode()}):
crypto.get_fernet.cache_clear()
Variable.set(key="key", value=test_value, session=session)
test_var = session.query(Variable).filter(Variable.key == "key").one()
assert test_var.is_encrypted
assert test_var.val == test_value
assert Fernet(key1).decrypt(test_var._val.encode()) == test_value.encode()
# Test decrypt of old value with new key
with conf_vars({("core", "fernet_key"): f"{key2.decode()},{key1.decode()}"}):
crypto.get_fernet.cache_clear()
assert test_var.val == test_value
# Test decrypt of new value with new key
test_var.rotate_fernet_key()
assert test_var.is_encrypted
assert test_var.val == test_value
assert Fernet(key2).decrypt(test_var._val.encode()) == test_value.encode()
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
assert Variable.get("tested_var_set_id") == "Monday morning breakfast"
def test_variable_set_with_env_variable(self, caplog, session):
caplog.set_level(logging.WARNING, logger=variable.log.name)
Variable.set(key="key", value="db-value", session=session)
with mock.patch.dict("os.environ", AIRFLOW_VAR_KEY="env-value"):
# setting value while shadowed by an env variable will generate a warning
Variable.set(key="key", value="new-db-value", session=session)
# value set above is not returned because the env variable value takes priority
assert Variable.get("key") == "env-value"
# invalidate the cache to re-evaluate value
SecretCache.invalidate_variable("key")
# now that env var is not here anymore, we see the value we set before.
assert Variable.get("key") == "new-db-value"
assert caplog.messages[0] == (
"The variable key is defined in the EnvironmentVariablesBackend secrets backend, "
"which takes precedence over reading from the database. The value in the database "
"will be updated, but to read it you have to delete the conflicting variable from "
"EnvironmentVariablesBackend"
)
def test_variable_set_update_existing(self, session):
Variable.set(key="test_key", value="initial_value", session=session)
initial_var = session.query(Variable).filter(Variable.key == "test_key").one()
initial_id = initial_var.id
# Need to expire session cache to fetch fresh data from db on next query
# Without this, SQLAlchemy will return the cached object with old values
# instead of querying the database again for the updated values
session.expire(initial_var)
Variable.set(key="test_key", value="updated_value", session=session)
updated_var = session.query(Variable).filter(Variable.key == "test_key").one()
# 1. The ID remains the same (no delete-insert)
assert updated_var.id == initial_id, "Variable ID should remain the same after update"
# 2. The value is updated to the new value
assert updated_var.val == "updated_value", "Variable value should be updated to the new value"
@mock.patch("airflow.models.variable.ensure_secrets_loaded")
def test_variable_set_with_extra_secret_backend(self, mock_ensure_secrets, caplog, session):
caplog.set_level(logging.WARNING, logger=variable.log.name)
mock_backend = mock.Mock()
mock_backend.get_variable.return_value = "secret_val"
mock_backend.__class__.__name__ = "MockSecretsBackend"
mock_ensure_secrets.return_value = [mock_backend, MetastoreBackend]
Variable.set(key="key", value="new-db-value", session=session)
assert Variable.get("key") == "secret_val"
assert caplog.messages[0] == (
"The variable key is defined in the MockSecretsBackend secrets backend, "
"which takes precedence over reading from the database. The value in the database "
"will be updated, but to read it you have to delete the conflicting variable from "
"MockSecretsBackend"
)
Variable.delete(key="key", session=session)
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set(key="tested_var_set_id", value=value, serialize_json=True)
assert value == Variable.get("tested_var_set_id", deserialize_json=True)
def test_variable_update(self, session):
Variable.set(key="test_key", value="value1", session=session)
assert Variable.get(key="test_key") == "value1"
Variable.update(key="test_key", value="value2", session=session)
assert Variable.get("test_key") == "value2"
def test_variable_update_fails_on_non_metastore_variable(self, session):
with mock.patch.dict("os.environ", AIRFLOW_VAR_KEY="env-value"):
with pytest.raises(AttributeError):
Variable.update(key="key", value="new-value", session=session)
def test_variable_update_preserves_description(self, session):
Variable.set(key="key", value="value", description="a test variable", session=session)
assert Variable.get("key") == "value"
Variable.update("key", "value2")
test_var = session.query(Variable).filter(Variable.key == "key").one()
assert test_var.val == "value2"
assert test_var.description == "a test variable"
def test_set_variable_sets_description(self, session):
Variable.set(key="key", value="value", description="a test variable", session=session)
test_var = session.query(Variable).filter(Variable.key == "key").one()
assert test_var.description == "a test variable"
assert test_var.val == "value"
@conf_vars({("core", "multi_team"): "True"})
def test_set_variable_sets_team(self, testing_team, session):
Variable.set(key="key", value="value", team_id=testing_team.id, session=session)
test_var = session.query(Variable).filter(Variable.key == "key").one()
assert test_var.team_id == testing_team.id
assert test_var.val == "value"
def test_set_variable_sets_team_multi_team_off(self, testing_team, session):
with pytest.raises(ValueError, match=r"Multi-team mode is not configured in the Airflow environment"):
Variable.set(key="key", value="value", team_id=testing_team.id, session=session)
def test_variable_set_existing_value_to_blank(self, session):
test_value = "Some value"
test_key = "test_key"
Variable.set(key=test_key, value=test_value, session=session)
Variable.set(key=test_key, value="", session=session)
assert Variable.get("test_key") == ""
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
assert default_value == Variable.get("thisIdDoesNotExist", default_var=default_value)
def test_get_non_existing_var_should_raise_key_error(self):
with pytest.raises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_update_non_existing_var_should_raise_key_error(self, session):
with pytest.raises(KeyError):
Variable.update(key="thisIdDoesNotExist", value="value", session=session)
def test_get_non_existing_var_with_none_default_should_return_none(self):
assert Variable.get("thisIdDoesNotExist", default_var=None) is None
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
assert default_value == Variable.get(
"thisIdDoesNotExist", default_var=default_value, deserialize_json=True
)
def test_variable_setdefault_round_trip(self, session):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key=key, default=value)
assert value == Variable.get(key)
def test_variable_setdefault_round_trip_json(self, session):
key = "tested_var_setdefault_2_id"
value = {"city": "Paris", "Happiness": True}
Variable.setdefault(key=key, default=value, deserialize_json=True)
assert value == Variable.get(key, deserialize_json=True)
def test_variable_setdefault_existing_json(self, session):
key = "tested_var_setdefault_2_id"
value = {"city": "Paris", "Happiness": True}
Variable.set(key=key, value=value, serialize_json=True, session=session)
val = Variable.setdefault(key=key, default=value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
assert value == val
assert value == Variable.get(key, deserialize_json=True)
def test_variable_delete(self, session):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key=key, session=session)
with pytest.raises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key=key, value=value, session=session)
assert value == Variable.get(key)
# Delete the variable
Variable.delete(key=key, session=session)
with pytest.raises(KeyError):
Variable.get(key)
def test_masking_from_db(self, session):
"""Test secrets are masked when loaded directly from the DB"""
# Normally people will use `Variable.get`, but just in case, catch direct DB access too
try:
var = Variable(
key=f"password-{os.getpid()}",
val="s3cr3t",
)
session.add(var)
session.flush()
# Make sure we re-load it, not just get the cached object back
session.expunge(var)
self.mask_secret.reset_mock()
session.get(Variable, var.id)
assert self.mask_secret.mock_calls == [
# We should have called it _again_ when loading from the DB
mock.call("s3cr3t", var.key),
]
finally:
session.rollback()
@mock.patch("airflow.models.variable.ensure_secrets_loaded")
def test_caching_caches(self, mock_ensure_secrets: mock.Mock):
mock_backend = mock.Mock()
mock_backend.get_variable.return_value = "secret_val"
mock_backend.__class__.__name__ = "MockSecretsBackend"
mock_ensure_secrets.return_value = [mock_backend, MetastoreBackend]
key = "doesn't matter"
first = Variable.get(key)
second = Variable.get(key)
mock_backend.get_variable.assert_called_once() # second call was not made because of cache
assert first == second
def test_cache_invalidation_on_set(self, session):
with mock.patch.dict("os.environ", AIRFLOW_VAR_KEY="from_env"):
a = Variable.get("key") # value is saved in cache
with mock.patch.dict("os.environ", AIRFLOW_VAR_KEY="from_env_two"):
b = Variable.get("key") # value from cache is used
assert a == b
# setting a new value invalidates the cache
Variable.set(key="key", value="new_value", session=session)
c = Variable.get("key") # cache should not be used
assert c != b
def test_get_team_name(self, testing_team: Team, session: Session):
var = Variable(key="key", val="value", team_id=testing_team.id)
session.add(var)
session.flush()
assert Variable.get_team_name("key", session=session) == "testing"
def test_get_key_to_team_name_mapping(self, testing_team: Team, session: Session):
var1 = Variable(key="key1", val="value1", team_id=testing_team.id)
var2 = Variable(key="key2", val="value2")
session.add(var1)
session.add(var2)
session.flush()
assert Variable.get_key_to_team_name_mapping(["key1", "key2"], session=session) == {"key1": "testing"}
@pytest.mark.parametrize(
("variable_value", "deserialize_json", "expected_masked_values"),
[
("s3cr3t", False, ["s3cr3t"]),
('{"api_key": "s3cr3t"}', True, ["s3cr3t"]),
('{"api_key": "s3cr3t", "normal_key": "normal_value"}', True, ["s3cr3t"]),
('{"api_key": "s3cr3t", "another_secret": "123456"}', True, ["s3cr3t", "123456"]),
],
)
def test_masking_only_secret_values(variable_value, deserialize_json, expected_masked_values, session):
from airflow._shared.secrets_masker import _secrets_masker
SecretCache.reset()
try:
var = Variable(
key=f"password-{os.getpid()}",
val=variable_value,
)
session.add(var)
session.commit()
# Make sure we re-load it, not just get the cached object back
session.expunge(var)
_secrets_masker().patterns = set()
Variable.get(var.key, deserialize_json=deserialize_json)
for expected_masked_value in expected_masked_values:
assert expected_masked_value in _secrets_masker().patterns
finally:
db.clear_db_variables()
|
TestVariable
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/sensors.py
|
{
"start": 7680,
"end": 7812
}
|
class ____(graphene.ObjectType):
results = non_null_list(GrapheneSensor)
class Meta:
name = "Sensors"
|
GrapheneSensors
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_image09.py
|
{
"start": 315,
"end": 847
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red_64x20.png")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
ApeWorX__ape
|
src/ape/utils/os.py
|
{
"start": 4094,
"end": 11697
}
|
class ____:
"""
A context manager to manage injecting and removing paths from
a user's sys paths without permanently modifying it.
"""
def __init__(self, path: Path, exclude: Optional[list[Path]] = None):
self.temp_path = str(path)
self.exclude = [str(p) for p in exclude or []]
def __enter__(self):
for path in self.exclude:
if path in sys.path:
sys.path.remove(path)
else:
# Preventing trying to re-add during exit.
self.exclude.remove(path)
if self.temp_path not in sys.path:
sys.path.append(self.temp_path)
def __exit__(self, *exc):
if self.temp_path in sys.path:
sys.path.remove(self.temp_path)
for path in self.exclude:
if path not in sys.path:
sys.path.append(path)
def get_full_extension(path: Union[Path, str]) -> str:
"""
For a path like ``Path("Contract.t.sol")``,
returns ``.t.sol``, unlike the regular Path
property ``.suffix`` which returns ``.sol``.
Args:
path (Path | str): The path with an extension.
Returns:
str: The full suffix
"""
if not path:
return ""
path = Path(path)
if path.is_dir() or path.suffix == "":
return ""
name = path.name
parts = name.split(".")
if len(parts) > 2 and name.startswith("."):
return "." + ".".join(parts[2:])
elif len(parts) > 1:
return "." + ".".join(parts[1:])
return ""
@contextmanager
def create_tempdir(name: Optional[str] = None) -> Iterator[Path]:
"""
Create a temporary directory. Differs from ``TemporaryDirectory()``
context-call alone because it automatically resolves the path.
Args:
name (Optional[str]): Optional provide a name of the directory.
Else, defaults to root of ``tempfile.TemporaryDirectory()``
(resolved).
Returns:
Iterator[Path]: Context managing the temporary directory.
"""
with TemporaryDirectory(ignore_cleanup_errors=True) as temp_dir:
temp_path = Path(temp_dir).resolve()
if name:
path = temp_path / name
path.mkdir()
else:
path = temp_path
yield path
def run_in_tempdir(
fn: Callable[[Path], Any],
name: Optional[str] = None,
):
"""
Run the given function in a temporary directory with its path
resolved.
Args:
fn (Callable): A function that takes a path. It gets called
with the resolved path to the temporary directory.
name (Optional[str]): Optionally name the temporary directory.
Returns:
Any: The result of the function call.
"""
with create_tempdir(name=name) as temp_dir:
return fn(temp_dir)
def in_tempdir(path: Path) -> bool:
"""
Returns ``True`` when the given path is in a temporary directory.
Args:
path (Path): The path to check.
Returns:
bool
"""
temp_dir = os.path.normpath(f"{Path(gettempdir()).resolve()}")
normalized_path = os.path.normpath(path)
return normalized_path.startswith(temp_dir)
def path_match(path: Union[str, Path], *exclusions: str) -> bool:
"""
A better glob-matching function. For example:
>>> from pathlib import Path
>>> p = Path("test/to/.build/me/2/file.json")
>>> p.match("**/.build/**")
False
>>> from ape.utils.os import path_match
>>> path_match(p, "**/.build/**")
True
"""
path_str = str(path)
path_path = Path(path)
for excl in exclusions:
if fnmatch(path_str, excl):
return True
elif fnmatch(path_path.name, excl):
return True
else:
# If the exclusion is he full name of any of the parents
# (e.g. ".cache", it is a match).
for parent in path_path.parents:
if parent.name == excl:
return True
# Walk the path recursively.
relative_str = path_str.replace(str(parent), "").strip(os.path.sep)
if fnmatch(relative_str, excl):
return True
return False
def clean_path(path: Path) -> str:
"""
Replace the home directory with key ``$HOME`` and return
the path as a str. This is used for outputting paths
with less doxxing.
Args:
path (Path): The path to sanitize.
Returns:
str: A sanitized path-str.
"""
home = Path.home()
if path.is_relative_to(home):
return f"$HOME{os.path.sep}{path.relative_to(home)}"
return f"{path}"
def get_package_path(package_name: str) -> Path:
"""
Get the path to a package from site-packages.
Args:
package_name (str): The name of the package.
Returns:
Path
"""
try:
dist = distribution(package_name)
except PackageNotFoundError as err:
raise ValueError(f"Package '{package_name}' not found in site-packages.") from err
package_path = Path(str(dist.locate_file(""))) / package_name
if not package_path.exists():
raise ValueError(f"Package '{package_name}' not found in site-packages.")
return package_path
def extract_archive(archive_file: Path, destination: Optional[Path] = None):
"""
Extract an archive file. Supports ``.zip`` or ``.tar.gz``.
Args:
archive_file (Path): The file-path to the archive.
destination (Optional[Path]): Optionally provide a destination.
Defaults to the parent directory of the archive file.
"""
destination = destination or archive_file.parent
if archive_file.suffix == ".zip":
with zipfile.ZipFile(archive_file, "r") as zip_ref:
zip_members = zip_ref.namelist()
if top_level_dir := os.path.commonpath(zip_members):
for zip_member in zip_members:
# Modify the member name to remove the top-level directory.
member_path = Path(zip_member)
relative_path = (
member_path.relative_to(top_level_dir) if top_level_dir else member_path
)
target_path = destination / relative_path
if member_path.is_dir():
target_path.mkdir(parents=True, exist_ok=True)
else:
target_path.parent.mkdir(parents=True, exist_ok=True)
with zip_ref.open(member_path.as_posix()) as source:
target_path.write_bytes(source.read())
else:
zip_ref.extractall(f"{destination}")
elif archive_file.name.endswith(".tar.gz"):
with tarfile.open(archive_file, "r:gz") as tar_ref:
tar_members = tar_ref.getmembers()
if top_level_dir := os.path.commonpath([m.name for m in tar_members]):
for tar_member in tar_members:
# Modify the member name to remove the top-level directory.
tar_member.name = os.path.relpath(tar_member.name, top_level_dir)
tar_ref.extract(tar_member, path=destination)
else:
tar_ref.extractall(path=f"{destination}")
else:
raise ValueError(f"Unsupported zip format: '{archive_file.suffix}'.")
def _remove_readonly(func, path, excinfo):
"""
Error handler for shutil.rmtree that handles removing read-only files.
"""
os.chmod(path, stat.S_IWRITE)
func(path)
|
use_temp_sys_path
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_fsdp.py
|
{
"start": 40606,
"end": 41024
}
|
class ____(MultiThreadedTestCase):
@property
def world_size(self):
return DEVICE_COUNT
def setUp(self):
super().setUp()
self._spawn_threads()
def run_subtests(self, *args, **kwargs):
return run_subtests(self, *args, **kwargs)
def perThreadSetUp(self):
torch._dynamo.reset()
def perThreadTearDown(self):
torch._dynamo.reset()
|
FSDPTestMultiThread
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/memory/summary_buffer.py
|
{
"start": 536,
"end": 5678
}
|
class ____(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory.
Provides a running summary of the conversation together with the most recent
messages in the conversation under the constraint that the total number of
tokens in the conversation does not exceed a certain limit.
"""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> str | list[BaseMessage]:
"""String buffer of memory."""
return self.load_memory_variables({})[self.memory_key]
async def abuffer(self) -> str | list[BaseMessage]:
"""Async memory buffer."""
memory_variables = await self.aload_memory_variables({})
return memory_variables[self.memory_key]
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables."""
return [self.memory_key]
@override
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
buffer = self.chat_memory.messages
if self.moving_summary_buffer != "":
first_messages: list[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer),
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
@override
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Asynchronously return key-value pairs given the text input to the chain."""
buffer = await self.chat_memory.aget_messages()
if self.moving_summary_buffer != "":
first_messages: list[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer),
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
@pre_init
def validate_prompt_input_variables(cls, values: dict) -> dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
raise ValueError(msg)
return values
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.prune()
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Asynchronously save context from this conversation to buffer."""
await super().asave_context(inputs, outputs)
await self.aprune()
def prune(self) -> None:
"""Prune buffer if it exceeds max token limit."""
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = self.predict_new_summary(
pruned_memory,
self.moving_summary_buffer,
)
async def aprune(self) -> None:
"""Asynchronously prune buffer if it exceeds max token limit."""
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = await self.apredict_new_summary(
pruned_memory,
self.moving_summary_buffer,
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ""
async def aclear(self) -> None:
"""Asynchronously clear memory contents."""
await super().aclear()
self.moving_summary_buffer = ""
|
ConversationSummaryBufferMemory
|
python
|
RaRe-Technologies__gensim
|
gensim/utils.py
|
{
"start": 35871,
"end": 41126
}
|
class ____(SaveLoad):
"""Wrap `corpus` and return a slice of it."""
def __init__(self, corpus, slice_):
"""
Parameters
----------
corpus : iterable of iterable of (int, numeric)
Input corpus.
slice_ : slice or iterable
Slice for `corpus`.
Notes
-----
Negative slicing can only be used if the corpus is indexable, otherwise, the corpus will be iterated over.
Slice can also be a np.ndarray to support fancy indexing.
Calculating the size of a SlicedCorpus is expensive when using a slice as the corpus has
to be iterated over once. Using a list or np.ndarray does not have this drawback, but consumes more memory.
"""
self.corpus = corpus
self.slice_ = slice_
self.length = None
def __iter__(self):
if hasattr(self.corpus, 'index') and len(self.corpus.index) > 0:
return (self.corpus.docbyoffset(i) for i in self.corpus.index[self.slice_])
return itertools.islice(self.corpus, self.slice_.start, self.slice_.stop, self.slice_.step)
def __len__(self):
# check cached length, calculate if needed
if self.length is None:
if isinstance(self.slice_, (list, np.ndarray)):
self.length = len(self.slice_)
elif isinstance(self.slice_, slice):
(start, end, step) = self.slice_.indices(len(self.corpus.index))
diff = end - start
self.length = diff // step + (diff % step > 0)
else:
self.length = sum(1 for x in self)
return self.length
def safe_unichr(intval):
"""Create a unicode character from its integer value. In case `unichr` fails, render the character
as an escaped `\\U<8-byte hex value of intval>` string.
Parameters
----------
intval : int
Integer code of character
Returns
-------
string
Unicode string of character
"""
try:
return chr(intval)
except ValueError:
# ValueError: chr() arg not in range(0x10000) (narrow Python build)
s = "\\U%08x" % intval
# return UTF16 surrogate pair
return s.decode('unicode-escape')
def decode_htmlentities(text):
"""Decode all HTML entities in text that are encoded as hex, decimal or named entities.
Adapted from `python-twitter-ircbot/html_decode.py
<https://github.com/sku/python-twitter-ircbot/blob/321d94e0e40d0acc92f5bf57d126b57369da70de/html_decode.py>`_.
Parameters
----------
text : str
Input HTML.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.utils import decode_htmlentities
>>>
>>> u = u'E tu vivrai nel terrore - L'aldilà (1981)'
>>> print(decode_htmlentities(u).encode('UTF-8'))
E tu vivrai nel terrore - L'aldilà (1981)
>>> print(decode_htmlentities("l'eau"))
l'eau
>>> print(decode_htmlentities("foo < bar"))
foo < bar
"""
def substitute_entity(match):
try:
ent = match.group(3)
if match.group(1) == "#":
# decoding by number
if match.group(2) == '':
# number is in decimal
return safe_unichr(int(ent))
elif match.group(2) in ['x', 'X']:
# number is in hex
return safe_unichr(int(ent, 16))
else:
# they were using a name
cp = n2cp.get(ent)
if cp:
return safe_unichr(cp)
else:
return match.group()
except Exception:
# in case of errors, return original input
return match.group()
return RE_HTML_ENTITY.sub(substitute_entity, text)
def chunkize_serial(iterable, chunksize, as_numpy=False, dtype=np.float32):
"""Yield elements from `iterable` in "chunksize"-ed groups.
The last returned element may be smaller if the length of collection is not divisible by `chunksize`.
Parameters
----------
iterable : iterable of object
An iterable.
chunksize : int
Split iterable into chunks of this size.
as_numpy : bool, optional
Yield chunks as `np.ndarray` instead of lists.
Yields
------
list OR np.ndarray
"chunksize"-ed chunks of elements from `iterable`.
Examples
--------
.. sourcecode:: pycon
>>> print(list(grouper(range(10), 3)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
it = iter(iterable)
while True:
if as_numpy:
# convert each document to a 2d numpy array (~6x faster when transmitting
# chunk data over the wire, in Pyro)
wrapped_chunk = [[np.array(doc, dtype=dtype) for doc in itertools.islice(it, int(chunksize))]]
else:
wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]
if not wrapped_chunk[0]:
break
# memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference
yield wrapped_chunk.pop()
grouper = chunkize_serial
|
SlicedCorpus
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/dotnet.py
|
{
"start": 21059,
"end": 21757
}
|
class ____(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
|
VbNetAspxLexer
|
python
|
walkccc__LeetCode
|
solutions/1634. Add Two Polynomials Represented as Linked Lists/1634.py
|
{
"start": 183,
"end": 1125
}
|
class ____:
def addPoly(self, poly1: 'PolyNode', poly2: 'PolyNode') -> 'PolyNode':
dummy = PolyNode()
curr = dummy
p = poly1 # poly1's pointer
q = poly2 # poly2's pointer
while p and q:
if p.power > q.power:
curr.next = PolyNode(p.coefficient, p.power)
curr = curr.next
p = p.next
elif p.power < q.power:
curr.next = PolyNode(q.coefficient, q.power)
curr = curr.next
q = q.next
else: # p.power == q.power
sumCoefficient = p.coefficient + q.coefficient
if sumCoefficient != 0:
curr.next = PolyNode(sumCoefficient, p.power)
curr = curr.next
p = p.next
q = q.next
while p:
curr.next = PolyNode(p.coefficient, p.power)
curr = curr.next
p = p.next
while q:
curr.next = PolyNode(q.coefficient, q.power)
curr = curr.next
q = q.next
return dummy.next
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/1231. Divide Chocolate/1231.py
|
{
"start": 0,
"end": 611
}
|
class ____:
def maximizeSweetness(self, sweetness: list[int], k: int) -> int:
l = len(sweetness) // (k + 1)
r = sum(sweetness) // (k + 1)
def canEat(m: int) -> bool:
"""
Returns True if can eat m sweetness (the minimum sweetness of each piece).
"""
pieces = 0
summ = 0 # the running sum
for s in sweetness:
summ += s
if summ >= m:
pieces += 1
summ = 0
return pieces > k
while l < r:
m = (l + r) // 2
if canEat(m):
l = m + 1
else:
r = m
return l if canEat(l) else l - 1
|
Solution
|
python
|
eth-brownie__brownie
|
brownie/project/build.py
|
{
"start": 947,
"end": 7260
}
|
class ____:
"""Methods for accessing and manipulating a project's contract build data."""
def __init__(self, sources: Sources) -> None:
self._sources: Final = sources
self._contracts: Final[Dict[ContractName, ContractBuildJson]] = {}
self._interfaces: Final[Dict[ContractName, InterfaceBuildJson]] = {}
def _add_contract(
self,
build_json: ContractBuildJson,
alias: Optional[ContractName] = None,
) -> None:
contract_name = alias or build_json["contractName"]
if contract_name in self._contracts and build_json["type"] == "interface":
return
if build_json["sourcePath"].startswith("interface"):
# interfaces should generate artifact in /build/interfaces/ not /build/contracts/
return
self._contracts[contract_name] = build_json
if "pcMap" not in build_json:
# no pcMap means build artifact is for an interface
return
pc_map: Dict[int | str, ProgramCounter] = build_json["pcMap"] # type: ignore [assignment]
if "0" in pc_map:
build_json["pcMap"] = PCMap({Count(int(k)): pc_map[k] for k in pc_map})
self._generate_revert_map(pc_map, build_json["allSourcePaths"], build_json["language"])
def _add_interface(self, build_json: InterfaceBuildJson) -> None:
contract_name = build_json["contractName"]
self._interfaces[contract_name] = build_json
def _generate_revert_map(
self,
pcMap: Dict[int | str, ProgramCounter],
source_map: Dict[str, str],
language: Language,
) -> None:
# Adds a contract's dev revert strings to the revert map and it's pcMap
marker = "//" if language == "Solidity" else "#"
for pc, data in pcMap.items():
op = data["op"]
if op in ("REVERT", "INVALID") or "jump_revert" in data:
path = data.get("path")
if path is None:
continue
path_str = source_map[path]
if "dev" not in data:
if "fn" not in data or "first_revert" in data:
_revert_map[pc] = False
continue
try:
revert_str = self._sources.get(path_str)[data["offset"][1] :]
revert_str = revert_str[: revert_str.index("\n")]
revert_str = revert_str[revert_str.index(marker) + len(marker) :].strip()
if revert_str.startswith("dev:"):
data["dev"] = revert_str
except (KeyError, ValueError):
pass
msg = "" if op == "REVERT" else "invalid opcode"
revert = (
path_str,
data["offset"],
data.get("fn", "<None>"),
data.get("dev", msg),
self._sources,
)
# do not compare the final tuple item in case the same project was loaded twice
if pc not in _revert_map or (
_revert_map[pc] and revert[:-1] == _revert_map[pc][:-1]
):
_revert_map[pc] = revert
continue
_revert_map[pc] = False
def _remove_contract(self, contract_name: ContractName) -> None:
key = self._stem(contract_name)
if key in self._contracts:
del self._contracts[key]
def _remove_interface(self, contract_name: ContractName) -> None:
key = self._stem(contract_name)
if key in self._interfaces:
del self._interfaces[key]
def get(self, contract_name: ContractName) -> BuildJson:
"""Returns build data for the given contract name."""
key = self._stem(contract_name)
if key in self._contracts:
return self._contracts[key]
return self._interfaces[key]
def items(
self,
path: Optional[str] = None,
) -> List[Tuple[ContractName, BuildJson]]:
"""Provides an list of tuples as (key,value), similar to calling dict.items.
If a path is given, only contracts derived from that source file are returned."""
items = [*self._contracts.items(), *self._interfaces.items()]
if path is None:
return items
return [(k, v) for k, v in items if v.get("sourcePath") == path]
def contains(self, contract_name: ContractName) -> bool:
"""Checks if the contract name exists in the currently loaded build data."""
stem = self._stem(contract_name)
return stem in self._contracts or stem in self._interfaces
def get_dependents(self, contract_name: ContractName) -> List[ContractName]:
"""Returns a list of contract names that inherit from or link to the given
contract. Used by the compiler when determining which contracts to recompile
based on a changed source file."""
return [k for k, v in self._contracts.items() if contract_name in v.get("dependencies", [])]
def _stem(self, contract_name: ContractName) -> ContractName:
return contract_name.replace(".json", "") # type: ignore [return-value]
def _get_dev_revert(pc: int) -> Optional[str]:
# Given the program counter from a stack trace that caused a transaction
# to revert, returns the commented dev string (if any)
if pc not in _revert_map:
return None
revert = _revert_map[pc]
if revert is False:
return None
return revert[3]
def _get_error_source_from_pc(
pc: int, pad: int = 3
) -> Tuple[Optional[str], Optional[Tuple[int, int]], Optional[str], Optional[str]]:
# Given the program counter from a stack trace that caused a transaction
# to revert, returns the highlighted relevant source code and the method name.
if pc not in _revert_map or _revert_map[pc] is False:
return (None,) * 4
revert: Tuple[str, Offset, str, str, Sources] = _revert_map[pc] # type: ignore [assignment]
source = revert[4].get(revert[0]) # type: ignore [index]
highlight, linenos = highlight_source(source, revert[1], pad=pad)
return highlight, linenos, revert[0], revert[2] # type: ignore [index]
|
Build
|
python
|
sanic-org__sanic
|
sanic/signals.py
|
{
"start": 4114,
"end": 14827
}
|
class ____(BaseRouter):
"""A `BaseRouter` that is used to dispatch signals to handlers"""
def __init__(self) -> None:
super().__init__(
delimiter=".",
route_class=Signal,
group_class=SignalGroup,
stacking=True,
)
self.allow_fail_builtin = True
self.ctx.loop = None
@staticmethod
def format_event(event: Union[str, Enum]) -> str:
"""Ensure event strings in proper format
Args:
event (str): event string
Returns:
str: formatted event string
"""
if isinstance(event, Enum):
event = str(event.value)
if "." not in event:
event = GENERIC_SIGNAL_FORMAT % event
return event
def get( # type: ignore
self,
event: Union[str, Enum],
condition: Optional[dict[str, str]] = None,
):
"""Get the handlers for a signal
Args:
event (str): The event to get the handlers for
condition (Optional[Dict[str, str]], optional): A dictionary of conditions to match against the handlers. Defaults to `None`.
Returns:
Tuple[SignalGroup, List[SignalHandler], Dict[str, Any]]: A tuple of the `SignalGroup` that matched, a list of the handlers that matched, and a dictionary of the params that matched
Raises:
NotFound: If no handlers are found
""" # noqa: E501
event = self.format_event(event)
extra = condition or {}
try:
group, param_basket = self.find_route(
f".{event}",
self.DEFAULT_METHOD,
self,
{"__params__": {}, "__matches__": {}},
extra=extra,
)
except NotFound:
message = "Could not find signal %s"
terms: list[Union[str, Optional[dict[str, str]]]] = [event]
if extra:
message += " with %s"
terms.append(extra)
raise NotFound(message % tuple(terms))
# Regex routes evaluate and can extract params directly. They are set
# on param_basket["__params__"]
params = param_basket["__params__"]
if not params:
# If param_basket["__params__"] does not exist, we might have
# param_basket["__matches__"], which are indexed based matches
# on path segments. They should already be cast types.
params = {
param.name: param_basket["__matches__"][idx]
for idx, param in group.params.items()
}
return group, [route.handler for route in group], params
async def _dispatch(
self,
event: str,
context: Optional[dict[str, Any]] = None,
condition: Optional[dict[str, str]] = None,
fail_not_found: bool = True,
reverse: bool = False,
) -> Any:
event = self.format_event(event)
try:
group, handlers, params = self.get(event, condition=condition)
except NotFound as e:
is_reserved = event.split(".", 1)[0] in RESERVED_NAMESPACES
if fail_not_found and (not is_reserved or self.allow_fail_builtin):
raise e
else:
if self.ctx.app.debug and self.ctx.app.state.verbosity >= 1:
error_logger.warning(str(e))
return None
if context:
params.update(context)
params.pop("__trigger__", None)
signals = group.routes
if not reverse:
signals = signals[::-1]
try:
for signal in signals:
for waiter in signal.ctx.waiters:
if waiter.matches(event, condition):
waiter.future.set_result(dict(params))
for signal in signals:
requirements = signal.extra.requirements
if (
(condition is None and signal.ctx.exclusive is False)
or (condition is None and not requirements)
or (condition == requirements)
) and (signal.ctx.trigger or event == signal.ctx.definition):
maybe_coroutine = signal.handler(**params)
if isawaitable(maybe_coroutine):
retval = await maybe_coroutine
if retval:
return retval
elif maybe_coroutine:
return maybe_coroutine
return None
except Exception as e:
if self.ctx.app.debug and self.ctx.app.state.verbosity >= 1:
error_logger.exception(e)
if event != Event.SERVER_EXCEPTION_REPORT.value:
await self.dispatch(
Event.SERVER_EXCEPTION_REPORT.value,
context={"exception": e},
)
setattr(e, "__dispatched__", True)
raise e
async def dispatch(
self,
event: Union[str, Enum],
*,
context: Optional[dict[str, Any]] = None,
condition: Optional[dict[str, str]] = None,
fail_not_found: bool = True,
inline: bool = False,
reverse: bool = False,
) -> Union[asyncio.Task, Any]:
"""Dispatch a signal to all handlers that match the event
Args:
event (str): The event to dispatch
context (Optional[Dict[str, Any]], optional): A dictionary of context to pass to the handlers. Defaults to `None`.
condition (Optional[Dict[str, str]], optional): A dictionary of conditions to match against the handlers. Defaults to `None`.
fail_not_found (bool, optional): Whether to raise an exception if no handlers are found. Defaults to `True`.
inline (bool, optional): Whether to run the handlers inline. An inline run means it will return the value of the signal handler. When `False` (which is the default) the signal handler will run in a background task. Defaults to `False`.
reverse (bool, optional): Whether to run the handlers in reverse order. Defaults to `False`.
Returns:
Union[asyncio.Task, Any]: If `inline` is `True` then the return value of the signal handler will be returned. If `inline` is `False` then an `asyncio.Task` will be returned.
Raises:
RuntimeError: If the signal is dispatched outside of an event loop
""" # noqa: E501
event = self.format_event(event)
dispatch = self._dispatch(
event,
context=context,
condition=condition,
fail_not_found=fail_not_found and inline,
reverse=reverse,
)
logger.debug(f"Dispatching signal: {event}", extra={"verbosity": 1})
if inline:
return await dispatch
task = asyncio.get_running_loop().create_task(dispatch)
await asyncio.sleep(0)
return task
def get_waiter(
self,
event: Union[str, Enum],
condition: Optional[dict[str, Any]] = None,
exclusive: bool = True,
) -> Optional[SignalWaiter]:
event_definition = self.format_event(event)
name, trigger, _ = self._get_event_parts(event_definition)
signal = cast(Signal, self.name_index.get(name))
if not signal:
return None
if event_definition.endswith(".*") and not trigger:
trigger = "*"
return SignalWaiter(
signal=signal,
event_definition=event_definition,
trigger=trigger,
requirements=condition,
exclusive=bool(exclusive),
)
def _get_event_parts(self, event: str) -> tuple[str, str, str]:
parts = self._build_event_parts(event)
if parts[2].startswith("<"):
name = ".".join([*parts[:-1], "*"])
trigger = self._clean_trigger(parts[2])
else:
name = event
trigger = ""
if not trigger:
event = ".".join([*parts[:2], "<__trigger__>"])
return name, trigger, event
def add( # type: ignore
self,
handler: SignalHandler,
event: Union[str, Enum],
condition: Optional[dict[str, Any]] = None,
exclusive: bool = True,
*,
priority: int = 0,
) -> Signal:
event_definition = self.format_event(event)
name, trigger, event_string = self._get_event_parts(event_definition)
signal = super().add(
event_string,
handler,
name=name,
append=True,
priority=priority,
) # type: ignore
signal.ctx.exclusive = exclusive
signal.ctx.trigger = trigger
signal.ctx.definition = event_definition
signal.extra.requirements = condition
return cast(Signal, signal)
def finalize(self, do_compile: bool = True, do_optimize: bool = False):
"""Finalize the router and compile the routes
Args:
do_compile (bool, optional): Whether to compile the routes. Defaults to `True`.
do_optimize (bool, optional): Whether to optimize the routes. Defaults to `False`.
Returns:
SignalRouter: The router
Raises:
RuntimeError: If the router is finalized outside of an event loop
""" # noqa: E501
self.add(_blank, "sanic.__signal__.__init__")
try:
self.ctx.loop = asyncio.get_running_loop()
except RuntimeError:
raise RuntimeError("Cannot finalize signals outside of event loop")
for signal in self.routes:
signal.ctx.waiters = deque()
return super().finalize(do_compile=do_compile, do_optimize=do_optimize)
def _build_event_parts(self, event: str) -> tuple[str, str, str]:
parts = path_to_parts(event, self.delimiter)
if (
len(parts) != 3
or parts[0].startswith("<")
or parts[1].startswith("<")
):
raise InvalidSignal("Invalid signal event: %s" % event)
if (
parts[0] in RESERVED_NAMESPACES
and event not in RESERVED_NAMESPACES[parts[0]]
and not (parts[2].startswith("<") and parts[2].endswith(">"))
):
raise InvalidSignal(
"Cannot declare reserved signal event: %s" % event
)
return parts
def _clean_trigger(self, trigger: str) -> str:
trigger = trigger[1:-1]
if ":" in trigger:
trigger, _ = trigger.split(":")
return trigger
|
SignalRouter
|
python
|
getsentry__sentry
|
src/sentry/deletions/defaults/sentry_app_installation_token.py
|
{
"start": 289,
"end": 787
}
|
class ____(ModelDeletionTask[SentryAppInstallationToken]):
def get_child_relations(self, instance: SentryAppInstallationToken) -> list[BaseRelation]:
from sentry.models.apitoken import ApiToken
return [
ModelRelation(ApiToken, {"id": instance.api_token_id}, task=ModelApiTokenDeletionTask),
]
def mark_deletion_in_progress(
self, instance_list: Sequence[SentryAppInstallationToken]
) -> None:
pass
|
SentryAppInstallationTokenDeletionTask
|
python
|
ray-project__ray
|
python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py
|
{
"start": 29127,
"end": 33343
}
|
class ____:
"""Comprehensive tests for the PrefixTree"""
def test_tree_structure_multiple_insertions(self, tree: PrefixTree) -> None:
"""Test tree structure after multiple insertions."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2)
tree.insert("hellothomas", "tenant_2", 3)
# Access tree directly
root: Node = tree.root
# Test tree structure - validate each node
# Root node
assert root.text == ""
assert root.parent is None
assert root.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(root.edge_label_to_child.keys()) == {"h"}
# Hello node
hello_node: Node = root.edge_label_to_child["h"]
assert hello_node.text == "hello"
assert hello_node.parent.text == ""
assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 3}
assert set(hello_node.edge_label_to_child.keys()) == {"w", "t"}
# World node
world_node: Node = hello_node.edge_label_to_child["w"]
assert world_node.text == "world"
assert world_node.parent.text == "hello"
assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
assert set(world_node.edge_label_to_child.keys()) == set()
# Th node
th_node: Node = hello_node.edge_label_to_child["t"]
assert th_node.text == "th"
assert th_node.parent.text == "hello"
assert th_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(th_node.edge_label_to_child.keys()) == {"e", "o"}
# Ere node
ere_node: Node = th_node.edge_label_to_child["e"]
assert ere_node.text == "ere"
assert ere_node.parent.text == "th"
assert ere_node.tenant_to_last_access_time == {"tenant_2": 2}
assert set(ere_node.edge_label_to_child.keys()) == set()
# Omas node
omas_node: Node = th_node.edge_label_to_child["o"]
assert omas_node.text == "omas"
assert omas_node.parent.text == "th"
assert omas_node.tenant_to_last_access_time == {"tenant_2": 3}
assert set(omas_node.edge_label_to_child.keys()) == set()
def test_multiple_evictions_maintains_lru_order(self, tree: PrefixTree) -> None:
"""Test multiple evictions maintain LRU order."""
tree.add_tenants(["tenant_1", "tenant_2"], 0)
tree.insert("helloworld", "tenant_1", 1)
tree.insert("hellothere", "tenant_2", 2)
tree.insert("hellothomas", "tenant_2", 3)
assert tree.tenant_to_char_count == {"tenant_1": 10, "tenant_2": 14}
assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"]
assert get_lru_texts_from_tree(tree, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
]
# Eviction 1 (tenant_1): min_remove_size=1. "hello" and "world" removed.
evicted_1 = tree.evict_tenant_by_lru("tenant_1", 1)
assert evicted_1 == 10
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 14}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
assert get_lru_texts_from_tree(tree, "tenant_2") == [
"",
"omas",
"th",
"hello",
"ere",
] # T2 unchanged
# Eviction 2 (tenant_2): min_remove_size=1. "ere" is oldest timestamp, removed.
evicted_2 = tree.evict_tenant_by_lru("tenant_2", 1)
assert evicted_2 == 3 # "ere" is 3 chars
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 11} # 14 - 3
assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "omas", "th", "hello"]
# Eviction 3 (tenant_2): min_remove_size=1. "omas"(ts3), "th"(ts3), "hello"(ts3) removed.
evicted_3 = tree.evict_tenant_by_lru("tenant_2", 1)
assert evicted_3 == 11 # 4+2+5 chars
assert tree.tenant_to_char_count == {"tenant_1": 0, "tenant_2": 0}
assert get_lru_texts_from_tree(tree, "tenant_2") == [""]
@pytest.mark.asyncio
|
TestPrefixTreeComprehensive
|
python
|
getsentry__sentry
|
src/sentry/sentry_metrics/querying/units.py
|
{
"start": 3011,
"end": 3163
}
|
class ____(UnitMetadata):
"""
Represents the unit metadata of a QueryExpression with no unit.
"""
pass
@dataclass(frozen=True)
|
WithNoUnit
|
python
|
huggingface__transformers
|
src/transformers/models/textnet/modeling_textnet.py
|
{
"start": 12706,
"end": 15087
}
|
class ____(TextNetPreTrainedModel, BackboneMixin):
has_attentions = False
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.textnet = TextNetModel(config)
self.num_features = config.hidden_sizes
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> Union[tuple[tuple], BackboneOutput]:
r"""
Examples:
```python
>>> import torch
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("czczup/textnet-base")
>>> model = AutoBackbone.from_pretrained("czczup/textnet-base")
>>> inputs = processor(image, return_tensors="pt")
>>> with torch.no_grad():
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.textnet(pixel_values, output_hidden_states=True, return_dict=return_dict)
hidden_states = outputs.hidden_states if return_dict else outputs[2]
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
hidden_states = outputs.hidden_states if return_dict else outputs[2]
output += (hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = ["TextNetBackbone", "TextNetModel", "TextNetPreTrainedModel", "TextNetForImageClassification"]
|
TextNetBackbone
|
python
|
huggingface__transformers
|
src/transformers/models/bridgetower/modeling_bridgetower.py
|
{
"start": 2010,
"end": 3149
}
|
class ____(ModelOutput):
r"""
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_size)`):
Sequence of hidden-states at the text output of the last layer of the model.
image_features (`torch.FloatTensor` of shape `(batch_size, image_sequence_length, hidden_size)`):
Sequence of hidden-states at the image output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size x 2)`):
Concatenation of last layer hidden-state of the first token of the text and image sequence (classification
token), respectively, after further processing through layers used for auxiliary pretraining tasks.
"""
text_features: Optional[torch.FloatTensor] = None
image_features: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of ['BridgeTowerForContrastiveLearning']
"""
)
|
BridgeTowerModelOutput
|
python
|
spack__spack
|
lib/spack/spack/bootstrap/core.py
|
{
"start": 2945,
"end": 4923
}
|
class ____:
"""Interface for "core" software bootstrappers"""
config_scope_name = ""
def __init__(self, conf: ConfigDictionary) -> None:
self.conf = conf
self.name = conf["name"]
self.metadata_dir = spack.util.path.canonicalize_path(conf["metadata"])
# Check for relative paths, and turn them into absolute paths
# root is the metadata_dir
maybe_url = conf["info"]["url"]
if spack.util.url.is_path_instead_of_url(maybe_url) and not os.path.isabs(maybe_url):
maybe_url = os.path.join(self.metadata_dir, maybe_url)
self.url = spack.mirrors.mirror.Mirror(maybe_url).fetch_url
@property
def mirror_scope(self) -> spack.config.InternalConfigScope:
"""Mirror scope to be pushed onto the bootstrapping configuration when using
this bootstrapper.
"""
return spack.config.InternalConfigScope(
self.config_scope_name, {"mirrors:": {self.name: self.url}}
)
def try_import(self, module: str, abstract_spec_str: str) -> bool:
"""Try to import a Python module from a spec satisfying the abstract spec
passed as argument.
Args:
module: Python module name to try importing
abstract_spec_str: abstract spec that can provide the Python module
Return:
True if the Python module could be imported, False otherwise
"""
return False
def try_search_path(self, executables: Tuple[str], abstract_spec_str: str) -> bool:
"""Try to search some executables in the prefix of specs satisfying the abstract
spec passed as argument.
Args:
executables: executables to be found
abstract_spec_str: abstract spec that can provide the Python module
Return:
True if the executables are found, False otherwise
"""
return False
@bootstrapper(bootstrapper_type="buildcache")
|
Bootstrapper
|
python
|
django__django
|
tests/admin_inlines/tests.py
|
{
"start": 55235,
"end": 59163
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
"testing", password="password", is_staff=True
)
cls.user.user_permissions.add(
Permission.objects.get(
codename="view_poll",
content_type=ContentType.objects.get_for_model(Poll),
)
)
cls.user.user_permissions.add(
*Permission.objects.filter(
codename__endswith="question",
content_type=ContentType.objects.get_for_model(Question),
).values_list("pk", flat=True)
)
cls.poll = Poll.objects.create(name="Survey")
cls.add_url = reverse("admin:admin_inlines_poll_add")
cls.change_url = reverse("admin:admin_inlines_poll_change", args=(cls.poll.id,))
def setUp(self):
self.client.force_login(self.user)
def test_add_url_not_allowed(self):
response = self.client.get(self.add_url)
self.assertEqual(response.status_code, 403)
response = self.client.post(self.add_url, {})
self.assertEqual(response.status_code, 403)
def test_post_to_change_url_not_allowed(self):
response = self.client.post(self.change_url, {})
self.assertEqual(response.status_code, 403)
def test_get_to_change_url_is_allowed(self):
response = self.client.get(self.change_url)
self.assertEqual(response.status_code, 200)
def test_main_model_is_rendered_as_read_only(self):
response = self.client.get(self.change_url)
self.assertContains(
response, '<div class="readonly">%s</div>' % self.poll.name, html=True
)
input = (
'<input type="text" name="name" value="%s" class="vTextField" '
'maxlength="40" required id="id_name">'
)
self.assertNotContains(response, input % self.poll.name, html=True)
def test_inlines_are_rendered_as_read_only(self):
question = Question.objects.create(
text="How will this be rendered?", poll=self.poll
)
response = self.client.get(self.change_url)
self.assertContains(
response, '<td class="field-text"><p>%s</p></td>' % question.text, html=True
)
self.assertNotContains(response, 'id="id_question_set-0-text"')
self.assertNotContains(response, 'id="id_related_objs-0-DELETE"')
def test_submit_line_shows_only_close_button(self):
response = self.client.get(self.change_url)
self.assertContains(
response,
'<a role="button" href="/admin/admin_inlines/poll/" class="closelink">'
"Close</a>",
html=True,
)
delete_link = (
'<a role="button" href="/admin/admin_inlines/poll/%s/delete/" '
'class="deletelink">Delete</a>'
)
self.assertNotContains(response, delete_link % self.poll.id, html=True)
self.assertNotContains(
response,
'<input type="submit" value="Save and add another" name="_addanother">',
)
self.assertNotContains(
response,
'<input type="submit" value="Save and continue editing" name="_continue">',
)
def test_inline_delete_buttons_are_not_shown(self):
Question.objects.create(text="How will this be rendered?", poll=self.poll)
response = self.client.get(self.change_url)
self.assertNotContains(
response,
'<input type="checkbox" name="question_set-0-DELETE" '
'id="id_question_set-0-DELETE">',
html=True,
)
def test_extra_inlines_are_not_shown(self):
response = self.client.get(self.change_url)
self.assertNotContains(response, 'id="id_question_set-0-text"')
@override_settings(ROOT_URLCONF="admin_inlines.urls")
|
TestReadOnlyChangeViewInlinePermissions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.