language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/common/messaging/tests/unit/common/messaging/triggers/test_msg_queue.py | {
"start": 12355,
"end": 13766
} | class ____:
@pytest.mark.usefixtures("collect_queue_param_deprecation_warning")
@mock.patch(
MESSAGE_QUEUE_PROVIDERS_PATH,
new_callable=mock.PropertyMock,
)
def test_provider_integrations_with_queue(self, _):
trigger_by_queue = MessageQueueTrigger(queue="any queue")
assert trigger_by_queue is not None
@mock.patch(
MESSAGE_QUEUE_PROVIDERS_PATH,
new_callable=mock.PropertyMock,
)
def test_provider_integrations_with_scheme(self, _):
trigger_by_scheme = MessageQueueTrigger(scheme="any scheme")
assert trigger_by_scheme is not None
def test_provider_integrations_validation(self):
"""Test that either queue or scheme parameter must be provided."""
with pytest.raises(ValueError, match="Either `queue` or `scheme` parameter must be provided"):
MessageQueueTrigger()
def test_provider_integrations_deprecation_warning(self):
"""Test that a deprecation warning is raised when using the 'queue' parameter."""
with pytest.warns(
AirflowProviderDeprecationWarning,
match="The `queue` parameter is deprecated and will be removed in future versions. Use the `scheme` parameter instead and pass configuration as keyword arguments to `MessageQueueTrigger`.",
):
MessageQueueTrigger(queue="any queue")
| TestMessageQueueTriggerIntegration |
python | huggingface__transformers | src/transformers/loss/loss_rt_detr.py | {
"start": 5496,
"end": 21956
} | class ____(nn.Module):
"""
This class computes the losses for RTDetr. The process happens in two steps: 1) we compute hungarian assignment
between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth /
prediction (supervise class and box).
Args:
matcher (`DetrHungarianMatcher`):
Module able to compute a matching between targets and proposals.
weight_dict (`Dict`):
Dictionary relating each loss with its weights. These losses are configured in RTDetrConf as
`weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou`
losses (`list[str]`):
List of all the losses to be applied. See `get_loss` for a list of all available losses.
alpha (`float`):
Parameter alpha used to compute the focal loss.
gamma (`float`):
Parameter gamma used to compute the focal loss.
eos_coef (`float`):
Relative classification weight applied to the no-object category.
num_classes (`int`):
Number of object categories, omitting the special no-object category.
"""
def __init__(self, config):
super().__init__()
self.matcher = RTDetrHungarianMatcher(config)
self.num_classes = config.num_labels
self.weight_dict = {
"loss_vfl": config.weight_loss_vfl,
"loss_bbox": config.weight_loss_bbox,
"loss_giou": config.weight_loss_giou,
}
self.losses = ["vfl", "boxes"]
self.eos_coef = config.eos_coefficient
empty_weight = torch.ones(config.num_labels + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
self.alpha = config.focal_loss_alpha
self.gamma = config.focal_loss_gamma
def loss_labels_vfl(self, outputs, targets, indices, num_boxes, log=True):
if "pred_boxes" not in outputs:
raise KeyError("No predicted boxes found in outputs")
if "logits" not in outputs:
raise KeyError("No predicted logits found in outputs")
idx = self._get_source_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([_target["boxes"][i] for _target, (_, i) in zip(targets, indices)], dim=0)
ious, _ = box_iou(center_to_corners_format(src_boxes.detach()), center_to_corners_format(target_boxes))
ious = torch.diag(ious)
src_logits = outputs["logits"]
target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_original
target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1]
target_score_original = torch.zeros_like(target_classes, dtype=src_logits.dtype)
target_score_original[idx] = ious.to(target_score_original.dtype)
target_score = target_score_original.unsqueeze(-1) * target
pred_score = F.sigmoid(src_logits.detach())
weight = self.alpha * pred_score.pow(self.gamma) * (1 - target) + target_score
loss = F.binary_cross_entropy_with_logits(src_logits, target_score, weight=weight, reduction="none")
loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes
return {"loss_vfl": loss}
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes]
"""
if "logits" not in outputs:
raise KeyError("No logits were found in the outputs")
src_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_original
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.class_weight)
losses = {"loss_ce": loss_ce}
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""
Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not
really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
"""
logits = outputs["logits"]
device = logits.device
target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
losses = {"cardinality_error": card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must
contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in
format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes" not in outputs:
raise KeyError("No predicted boxes found in outputs")
idx = self._get_source_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
losses = {}
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
generalized_box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes))
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key
"masks" containing a tensor of dim [nb_target_boxes, h, w].
"""
if "pred_masks" not in outputs:
raise KeyError("No predicted masks found in outputs")
source_idx = self._get_source_permutation_idx(indices)
target_idx = self._get_target_permutation_idx(indices)
source_masks = outputs["pred_masks"]
source_masks = source_masks[source_idx]
masks = [t["masks"] for t in targets]
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(source_masks)
target_masks = target_masks[target_idx]
# upsample predictions to the target size
source_masks = nn.functional.interpolate(
source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
)
source_masks = source_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(source_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes),
"loss_dice": dice_loss(source_masks, target_masks, num_boxes),
}
return losses
def loss_labels_bce(self, outputs, targets, indices, num_boxes, log=True):
src_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_original
target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1]
loss = F.binary_cross_entropy_with_logits(src_logits, target * 1.0, reduction="none")
loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes
return {"loss_bce": loss}
def _get_source_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
source_idx = torch.cat([source for (source, _) in indices])
return batch_idx, source_idx
def _get_target_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
target_idx = torch.cat([target for (_, target) in indices])
return batch_idx, target_idx
def loss_labels_focal(self, outputs, targets, indices, num_boxes, log=True):
if "logits" not in outputs:
raise KeyError("No logits found in outputs")
src_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_original = torch.cat([_target["class_labels"][i] for _target, (_, i) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_original
target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[..., :-1]
loss = sigmoid_focal_loss(src_logits, target, self.alpha, self.gamma)
loss = loss.mean(1).sum() * src_logits.shape[1] / num_boxes
return {"loss_focal": loss}
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"masks": self.loss_masks,
"bce": self.loss_labels_bce,
"focal": self.loss_labels_focal,
"vfl": self.loss_labels_vfl,
}
if loss not in loss_map:
raise ValueError(f"Loss {loss} not supported")
return loss_map[loss](outputs, targets, indices, num_boxes)
@staticmethod
def get_cdn_matched_indices(dn_meta, targets):
dn_positive_idx, dn_num_group = dn_meta["dn_positive_idx"], dn_meta["dn_num_group"]
num_gts = [len(t["class_labels"]) for t in targets]
device = targets[0]["class_labels"].device
dn_match_indices = []
for i, num_gt in enumerate(num_gts):
if num_gt > 0:
gt_idx = torch.arange(num_gt, dtype=torch.int64, device=device)
gt_idx = gt_idx.tile(dn_num_group)
assert len(dn_positive_idx[i]) == len(gt_idx)
dn_match_indices.append((dn_positive_idx[i], gt_idx))
else:
dn_match_indices.append(
(
torch.zeros(0, dtype=torch.int64, device=device),
torch.zeros(0, dtype=torch.int64, device=device),
)
)
return dn_match_indices
def forward(self, outputs, targets):
"""
This performs the loss computation.
Args:
outputs (`dict`, *optional*):
Dictionary of tensors, see the output specification of the model for the format.
targets (`list[dict]`, *optional*):
List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
losses applied, see each loss' doc.
"""
outputs_without_aux = {k: v for k, v in outputs.items() if "auxiliary_outputs" not in k}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes across all nodes, for normalization purposes
num_boxes = sum(len(t["class_labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
num_boxes = torch.clamp(num_boxes, min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes)
l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict}
losses.update(l_dict)
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "auxiliary_outputs" in outputs:
for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
indices = self.matcher(auxiliary_outputs, targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict}
l_dict = {k + f"_aux_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
# In case of cdn auxiliary losses. For rtdetr
if "dn_auxiliary_outputs" in outputs:
if "denoising_meta_values" not in outputs:
raise ValueError(
"The output must have the 'denoising_meta_values` key. Please, ensure that 'outputs' includes a 'denoising_meta_values' entry."
)
indices = self.get_cdn_matched_indices(outputs["denoising_meta_values"], targets)
num_boxes = num_boxes * outputs["denoising_meta_values"]["dn_num_group"]
for i, auxiliary_outputs in enumerate(outputs["dn_auxiliary_outputs"]):
# indices = self.matcher(auxiliary_outputs, targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes, **kwargs)
l_dict = {k: l_dict[k] * self.weight_dict[k] for k in l_dict if k in self.weight_dict}
l_dict = {k + f"_dn_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
def RTDetrForObjectDetectionLoss(
logits,
labels,
device,
pred_boxes,
config,
outputs_class=None,
outputs_coord=None,
enc_topk_logits=None,
enc_topk_bboxes=None,
denoising_meta_values=None,
**kwargs,
):
criterion = RTDetrLoss(config)
criterion.to(device)
# Second: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
auxiliary_outputs = None
if config.auxiliary_loss:
if denoising_meta_values is not None:
dn_out_coord, outputs_coord = torch.split(outputs_coord, denoising_meta_values["dn_num_split"], dim=2)
dn_out_class, outputs_class = torch.split(outputs_class, denoising_meta_values["dn_num_split"], dim=2)
auxiliary_outputs = _set_aux_loss(outputs_class[:, :-1].transpose(0, 1), outputs_coord[:, :-1].transpose(0, 1))
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
outputs_loss["auxiliary_outputs"].extend(_set_aux_loss([enc_topk_logits], [enc_topk_bboxes]))
if denoising_meta_values is not None:
outputs_loss["dn_auxiliary_outputs"] = _set_aux_loss(
dn_out_class.transpose(0, 1), dn_out_coord.transpose(0, 1)
)
outputs_loss["denoising_meta_values"] = denoising_meta_values
loss_dict = criterion(outputs_loss, labels)
loss = sum(loss_dict.values())
return loss, loss_dict, auxiliary_outputs
| RTDetrLoss |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 43284,
"end": 49901
} | class ____(BridgeTowerPreTrainedModel):
config: BridgeTowerTextConfig
input_modalities = ("text",)
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = BridgeTowerTextEmbeddings(config)
self.encoder = BridgeTowerTextEncoder(config)
self.pooler = BridgeTowerPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
# NOTE: bridgetower with its multimodality has a more complicated scheme making records harder
# for now we skip the copies from bert but stay close to the original
# copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertModel._create_attention_masks
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
@auto_docstring(
custom_intro="""
The bare BridgeTower Model transformer outputting BridgeTowerModelOutput object without any specific head on
"""
)
| BridgeTowerTextModel |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/translator.py | {
"start": 4063,
"end": 5235
} | class ____:
"""Represents an Airbyte job, based on data as returned from the API."""
id: int
status: str
type: str
connection_id: str | None = None
start_time: datetime | None = None
last_updated_at: datetime | None = None
duration: str | None = None
bytes_synced: int | None = None
rows_synced: int | None = None
@classmethod
def from_job_details(
cls,
job_details: Mapping[str, Any],
) -> "AirbyteJob":
return cls(
id=job_details["jobId"],
status=job_details["status"],
type=job_details["jobType"],
connection_id=job_details.get("connectionId"),
start_time=datetime.fromisoformat(job_details["startTime"])
if "startTime" in job_details
else None,
last_updated_at=datetime.fromisoformat(job_details["lastUpdatedAt"])
if "lastUpdatedAt" in job_details
else None,
duration=job_details.get("duration"),
bytes_synced=job_details.get("bytesScanned"),
rows_synced=job_details.get("rowsSynced"),
)
@whitelist_for_serdes
@record
| AirbyteJob |
python | dask__distributed | distributed/comm/tcp.py | {
"start": 20408,
"end": 20790
} | class ____(BaseTCPConnector):
prefix = "tls://"
comm_class = TLS
encrypted = True
def _get_connect_args(self, **connection_args):
tls_args = {"ssl_options": _expect_tls_context(connection_args)}
if connection_args.get("server_hostname"):
tls_args["server_hostname"] = connection_args["server_hostname"]
return tls_args
| TLSConnector |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/tools.py | {
"start": 225,
"end": 1447
} | class ____(BaseTool):
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
@override
def _run(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: CallbackManagerForToolRun | None = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join(list(available_tool_names))
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
@override
async def _arun(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join(list(available_tool_names))
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
| InvalidTool |
python | doocs__leetcode | solution/1300-1399/1392.Longest Happy Prefix/Solution2.py | {
"start": 0,
"end": 434
} | class ____:
def longestPrefix(self, s: str) -> str:
s += "#"
n = len(s)
next = [0] * n
next[0] = -1
i, j = 2, 0
while i < n:
if s[i - 1] == s[j]:
j += 1
next[i] = j
i += 1
elif j:
j = next[j]
else:
next[i] = 0
i += 1
return s[: next[-1]]
| Solution |
python | fsspec__filesystem_spec | fsspec/implementations/smb.py | {
"start": 13770,
"end": 15236
} | class ____:
"""writes to remote temporary file, move on commit"""
def __init__(self, path, temp, mode, port=445, block_size=-1, **kwargs):
self.path = path
self.temp = temp
self.mode = mode
self.block_size = block_size
self.kwargs = kwargs
self.smbfile = None
self._incontext = False
self.port = port
self._open()
def _open(self):
if self.smbfile is None or self.smbfile.closed:
self.smbfile = smbclient.open_file(
self.temp,
self.mode,
port=self.port,
buffering=self.block_size,
**self.kwargs,
)
def commit(self):
"""Move temp file to definitive on success."""
# TODO: use transaction support in SMB protocol
smbclient.replace(self.temp, self.path, port=self.port)
def discard(self):
"""Remove the temp file on failure."""
smbclient.remove(self.temp, port=self.port)
def __fspath__(self):
return self.path
def __iter__(self):
return self.smbfile.__iter__()
def __getattr__(self, item):
return getattr(self.smbfile, item)
def __enter__(self):
self._incontext = True
return self.smbfile.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self._incontext = False
self.smbfile.__exit__(exc_type, exc_value, traceback)
| SMBFileOpener |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/table.py | {
"start": 1806,
"end": 2155
} | class ____(graphene.ObjectType):
assetKey = graphene.NonNull(GrapheneAssetKey)
columnName = graphene.NonNull(graphene.String)
class Meta:
name = "TableColumnDep"
def __init__(self, column_dep: "TableColumnDep"):
super().__init__(assetKey=column_dep.asset_key, columnName=column_dep.column_name)
| GrapheneTableColumnDep |
python | astropy__astropy | astropy/table/tests/test_table.py | {
"start": 15821,
"end": 16563
} | class ____(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(
table_types.Column(name="b", data=[4, 5, 6, 7])
) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name="b", data=[4, 5])) # data too short
@pytest.mark.usefixtures("table_types")
| TestAddLength |
python | OmkarPathak__pygorithm | tests/test_pathing.py | {
"start": 465,
"end": 1612
} | class ____(TimedTestCase):
def find_path(self, my_graph, v1, v2):
return [ (0, 0), (0, 1), (0, 2), (1, 3), (2, 4), (3, 3), (3, 2), (3, 1), (3, 0) ]
def test_find_path_package_example(self):
# initialize the graph with nodes from (0, 0) to (4, 4)
# with weight corresponding to distance (orthogonal
# is 1, diagonal is sqrt(2))
my_graph = graph.WeightedUndirectedGraph()
my_graph.gridify(5, 1)
# make the graph more interesting by removing along the
# x=2 column except for (2,4)
my_graph.remove_edge((2, 0))
my_graph.remove_edge((2, 1))
my_graph.remove_edge((2, 2))
my_graph.remove_edge((2, 3))
# calculate a path
my_path = self.find_path(my_graph, (0, 0), (3, 0))
# check path:
self.assertIsNotNone(my_path)
total_weight = 0
for i in range(1, len(my_path)):
total_weight += my_graph.get_edge_weight(my_path[i - 1], my_path[i])
self.assertAlmostEqual(9.242640687119284, total_weight)
| SimplePathfindingTestCaseTimed |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/grant_types/test_client_credentials.py | {
"start": 264,
"end": 3281
} | class ____(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.grant_type = 'client_credentials'
self.request.client = mock_client
self.request.scopes = ('mocked', 'scopes')
self.mock_validator = mock.MagicMock()
self.auth = ClientCredentialsGrant(
request_validator=self.mock_validator)
def test_custom_auth_validators_unsupported(self):
authval1, authval2 = mock.Mock(), mock.Mock()
expected = ('ClientCredentialsGrant does not support authorization '
'validators. Use token validators instead.')
with self.assertRaises(ValueError) as caught:
ClientCredentialsGrant(self.mock_validator, pre_auth=[authval1])
self.assertEqual(caught.exception.args[0], expected)
with self.assertRaises(ValueError) as caught:
ClientCredentialsGrant(self.mock_validator, post_auth=[authval2])
self.assertEqual(caught.exception.args[0], expected)
with self.assertRaises(AttributeError):
self.auth.custom_validators.pre_auth.append(authval1)
with self.assertRaises(AttributeError):
self.auth.custom_validators.pre_auth.append(authval2)
def test_custom_token_validators(self):
tknval1, tknval2 = mock.Mock(), mock.Mock()
self.auth.custom_validators.pre_token.append(tknval1)
self.auth.custom_validators.post_token.append(tknval2)
bearer = BearerToken(self.mock_validator)
self.auth.create_token_response(self.request, bearer)
self.assertTrue(tknval1.called)
self.assertTrue(tknval2.called)
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertIn('Content-Type', headers)
self.assertEqual(headers['Content-Type'], 'application/json')
def test_error_response(self):
bearer = BearerToken(self.mock_validator)
self.mock_validator.authenticate_client.return_value = False
headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
error_msg = json.loads(body)
self.assertIn('error', error_msg)
self.assertEqual(error_msg['error'], 'invalid_client')
self.assertIn('Content-Type', headers)
self.assertEqual(headers['Content-Type'], 'application/json')
def test_validate_token_response(self):
# wrong grant type, scope
pass
| ClientCredentialsGrantTest |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 10313,
"end": 10390
} | class ____(OpcodeWithArg):
_FLAGS = HAS_ARGUMENT
__slots__ = ()
| LIST_APPEND |
python | django__django | tests/gis_tests/geoapp/tests.py | {
"start": 10136,
"end": 25480
} | class ____(TestCase):
fixtures = ["initial"]
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name="Pueblo")
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual("Kansas", qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name="Texas")
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if connection.features.supports_contained_lookup:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ["Houston", "Dallas", "Oklahoma City"]
for c in qs:
self.assertIn(c.name, cities)
# Pulling out some cities.
houston = City.objects.get(name="Houston")
wellington = City.objects.get(name="Wellington")
pueblo = City.objects.get(name="Pueblo")
okcity = City.objects.get(name="Oklahoma City")
lawrence = City.objects.get(name="Lawrence")
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(
mpoly__contains=wellington.point.hex
) # Query w/EWKBHEX
self.assertEqual("Texas", tx.name)
self.assertEqual("New Zealand", nz.name)
# Testing `contains` on the states using the point for Lawrence.
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual("Kansas", ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding
# box of Texas) are not contained in Texas or New Zealand.
self.assertEqual(
len(Country.objects.filter(mpoly__contains=pueblo.point)), 0
) # Query w/GEOSGeometry object
self.assertEqual(
len(Country.objects.filter(mpoly__contains=okcity.point.wkt)), 0
) # Query w/WKT
# OK City is contained w/in bounding box of Texas.
if connection.features.supports_bbcontains_lookup:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual("Texas", qs[0].name)
@skipUnlessDBFeature("supports_crosses_lookup")
def test_crosses_lookup(self):
Track.objects.create(name="Line1", line=LineString([(-95, 29), (-60, 0)]))
self.assertEqual(
Track.objects.filter(
line__crosses=LineString([(-95, 0), (-60, 29)])
).count(),
1,
)
self.assertEqual(
Track.objects.filter(
line__crosses=LineString([(-95, 30), (0, 30)])
).count(),
0,
)
@skipUnlessDBFeature("supports_isvalid_lookup")
def test_isvalid_lookup(self):
invalid_geom = fromstr("POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))")
State.objects.create(name="invalid", poly=invalid_geom)
qs = State.objects.all()
if connection.ops.oracle:
# Kansas has adjacent vertices with distance 6.99244813842e-12
# which is smaller than the default Oracle tolerance.
qs = qs.exclude(name="Kansas")
self.assertEqual(
State.objects.filter(name="Kansas", poly__isvalid=False).count(), 1
)
self.assertEqual(qs.filter(poly__isvalid=False).count(), 1)
self.assertEqual(qs.filter(poly__isvalid=True).count(), qs.count() - 1)
@skipUnlessGISLookup("left", "right")
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in
# PostGIS source.
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name="Colorado").poly
ks_border = State.objects.get(name="Kansas").poly
# Note: Wellington has an 'X' value of 174, so it will not be
# considered to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = [
"Houston",
"Dallas",
"Oklahoma City",
"Lawrence",
"Chicago",
"Wellington",
]
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# These cities should be strictly to the right of the KS border.
cities = ["Chicago", "Wellington"]
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
# Note: Wellington has an 'X' value of 174, so it will not be
# considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual("Victoria", vic.name)
cities = ["Pueblo", "Victoria"]
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertIn(c.name, cities)
@skipUnlessGISLookup("strictly_above", "strictly_below")
def test_strictly_above_below_lookups(self):
dallas = City.objects.get(name="Dallas")
self.assertQuerySetEqual(
City.objects.filter(point__strictly_above=dallas.point).order_by("name"),
["Chicago", "Lawrence", "Oklahoma City", "Pueblo", "Victoria"],
lambda b: b.name,
)
self.assertQuerySetEqual(
City.objects.filter(point__strictly_below=dallas.point).order_by("name"),
["Houston", "Wellington"],
lambda b: b.name,
)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr("POINT (-95.363151 29.763374)", srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual("Houston", c.name)
@skipUnlessDBFeature("supports_null_geometries")
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name="Puerto Rico")
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated
# territory)
self.assertEqual(1, len(nullqs))
self.assertEqual("Puerto Rico", nullqs[0].name)
# GeometryField=None is an alias for __isnull=True.
self.assertCountEqual(State.objects.filter(poly=None), nullqs)
self.assertCountEqual(State.objects.exclude(poly=None), validqs)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertIn("Colorado", state_names)
self.assertIn("Kansas", state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name="Northern Mariana Islands", poly=None)
self.assertIsNone(nmi.poly)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = "POLYGON((0 0,1 0,1 1,1 0,0 0))"
nmi.save()
State.objects.filter(name="Northern Mariana Islands").update(poly=None)
self.assertIsNone(State.objects.get(name="Northern Mariana Islands").poly)
@skipUnlessDBFeature(
"supports_null_geometries", "supports_crosses_lookup", "supports_relate_lookup"
)
def test_null_geometries_excluded_in_lookups(self):
"""NULL features are excluded in spatial lookup functions."""
null = State.objects.create(name="NULL", poly=None)
queries = [
("equals", Point(1, 1)),
("disjoint", Point(1, 1)),
("touches", Point(1, 1)),
("crosses", LineString((0, 0), (1, 1), (5, 5))),
("within", Point(1, 1)),
("overlaps", LineString((0, 0), (1, 1), (5, 5))),
("contains", LineString((0, 0), (1, 1), (5, 5))),
("intersects", LineString((0, 0), (1, 1), (5, 5))),
("relate", (Point(1, 1), "T*T***FF*")),
("same_as", Point(1, 1)),
("exact", Point(1, 1)),
("coveredby", Point(1, 1)),
("covers", Point(1, 1)),
]
for lookup, geom in queries:
with self.subTest(lookup=lookup):
self.assertNotIn(
null, State.objects.filter(**{"poly__%s" % lookup: geom})
)
def test_wkt_string_in_lookup(self):
# Valid WKT strings don't emit error logs.
with self.assertNoLogs("django.contrib.gis", "ERROR"):
State.objects.filter(poly__intersects="LINESTRING(0 0, 1 1, 5 5)")
@skipUnlessGISLookup("coveredby")
def test_coveredby_lookup(self):
poly = Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))
state = State.objects.create(name="Test", poly=poly)
small_poly = Polygon(LinearRing((0, 0), (1, 4), (4, 4), (4, 1), (0, 0)))
qs = State.objects.filter(poly__coveredby=small_poly)
self.assertSequenceEqual(qs, [])
large_poly = Polygon(LinearRing((0, 0), (-1, 6), (6, 6), (6, -1), (0, 0)))
qs = State.objects.filter(poly__coveredby=large_poly)
self.assertSequenceEqual(qs, [state])
if not connection.ops.oracle:
# On Oracle, COVEREDBY doesn't match for EQUAL objects.
qs = State.objects.filter(poly__coveredby=poly)
self.assertSequenceEqual(qs, [state])
@skipUnlessGISLookup("covers")
def test_covers_lookup(self):
poly = Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))
state = State.objects.create(name="Test", poly=poly)
small_poly = Polygon(LinearRing((0, 0), (1, 4), (4, 4), (4, 1), (0, 0)))
qs = State.objects.filter(poly__covers=small_poly)
self.assertSequenceEqual(qs, [state])
large_poly = Polygon(LinearRing((-1, -1), (-1, 6), (6, 6), (6, -1), (-1, -1)))
qs = State.objects.filter(poly__covers=large_poly)
self.assertSequenceEqual(qs, [])
if not connection.ops.oracle:
# On Oracle, COVERS doesn't match for EQUAL objects.
qs = State.objects.filter(poly__covers=poly)
self.assertSequenceEqual(qs, [state])
@skipUnlessDBFeature("supports_relate_lookup")
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference
# point in different SRIDs.
pnt1 = fromstr("POINT (649287.0363174 4177429.4494686)", srid=2847)
pnt2 = fromstr("POINT(-98.4919715741052 29.4333344025053)", srid=4326)
# Not passing in a geometry as first param raises a TypeError when
# initializing the QuerySet.
with self.assertRaises(ValueError):
Country.objects.filter(mpoly__relate=(23, "foo"))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [
((pnt1, 0), ValueError),
((pnt2, "T*T***FF*", 0), ValueError),
]:
qs = Country.objects.filter(mpoly__relate=bad_args)
with self.assertRaises(e):
qs.count()
contains_mask = "T*T***FF*"
within_mask = "T*F**F***"
intersects_mask = "T********"
# Relate works differently on Oracle.
if connection.ops.oracle:
contains_mask = "contains"
within_mask = "inside"
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = "overlapbdyintersect"
# Testing contains relation mask.
if connection.features.supports_transform:
self.assertEqual(
Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name,
"Texas",
)
self.assertEqual(
"Texas", Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name
)
# Testing within relation mask.
ks = State.objects.get(name="Kansas")
self.assertEqual(
"Lawrence",
# Remove ".filter(name="Lawrence")" once PostGIS 3.5.4 is released.
# https://lists.osgeo.org/pipermail/postgis-devel/2025-July/030581.html
City.objects.filter(name="Lawrence")
.get(point__relate=(ks.poly, within_mask))
.name,
)
# Testing intersection relation mask.
if not connection.ops.oracle:
if connection.features.supports_transform:
self.assertEqual(
Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name,
"Texas",
)
self.assertEqual(
"Texas", Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name
)
self.assertEqual(
"Lawrence",
City.objects.get(point__relate=(ks.poly, intersects_mask)).name,
)
# With a complex geometry expression
mask = "anyinteract" if connection.ops.oracle else within_mask
self.assertFalse(
City.objects.exclude(
point__relate=(functions.Union("point", "point"), mask)
)
)
def test_gis_lookups_with_complex_expressions(self):
multiple_arg_lookups = {
"dwithin",
"relate",
} # These lookups are tested elsewhere.
lookups = connection.ops.gis_operators.keys() - multiple_arg_lookups
self.assertTrue(lookups, "No lookups found")
for lookup in lookups:
with self.subTest(lookup):
City.objects.filter(
**{"point__" + lookup: functions.Union("point", "point")}
).exists()
def test_subquery_annotation(self):
multifields = MultiFields.objects.create(
city=City.objects.create(point=Point(1, 1)),
point=Point(2, 2),
poly=Polygon.from_bbox((0, 0, 2, 2)),
)
qs = MultiFields.objects.annotate(
city_point=Subquery(
City.objects.filter(
id=OuterRef("city"),
).values("point")
),
).filter(
city_point__within=F("poly"),
)
self.assertEqual(qs.get(), multifields)
| GeoLookupTest |
python | dagster-io__dagster | examples/docs_projects/project_dagster_modal_pipes/src/modal_project/transcribe.py | {
"start": 289,
"end": 6982
} | class ____(TypedDict):
text: str
start: float
end: float
logger = config.get_logger(__name__)
def coalesce_short_transcript_segments(
segments: list[Segment],
) -> list[Segment]:
minimum_transcript_len = 200 # About 2 sentences.
previous = None
long_enough_segments = []
for current in segments:
if previous is None:
previous = current
elif len(previous["text"]) < minimum_transcript_len:
previous = _merge_segments(left=previous, right=current)
else:
long_enough_segments.append(previous)
previous = current
if previous:
long_enough_segments.append(previous)
return long_enough_segments
def _merge_segments(left: Segment, right: Segment) -> Segment:
return {
"text": left["text"] + " " + right["text"],
"start": left["start"],
"end": right["end"],
}
# start_app
app_image = (
modal.Image.debian_slim(python_version="3.10")
.apt_install("git")
.pip_install(
"git+https://github.com/openai/whisper.git",
"dacite",
"jiwer",
"ffmpeg-python",
"gql[all]~=3.0.0a5",
"python-multipart~=0.0.9",
"pandas",
"loguru==0.6.0",
"torchaudio==2.1.0",
"python-dotenv",
)
.apt_install("ffmpeg")
.pip_install("ffmpeg-python")
)
app = modal.App(
"whisper-pod-transcriber",
image=app_image,
)
# end_app
# start_mount
cloud_bucket_mount = modal.CloudBucketMount(
"dagster-modal-demo",
bucket_endpoint_url=os.environ.get("CLOUDFLARE_R2_API"),
secret=modal.Secret.from_dict(
{
"AWS_ACCESS_KEY_ID": os.environ.get("CLOUDFLARE_R2_ACCESS_KEY_ID"),
"AWS_SECRET_ACCESS_KEY": os.environ.get("CLOUDFLARE_R2_SECRET_ACCESS_KEY"),
"AWS_REGION": "auto",
}
),
)
# end_mount
def split_silences(
path: str, min_segment_length: float = 30.0, min_silence_length: float = 1.0
) -> Iterator[tuple[float, float]]:
"""Split audio file into contiguous chunks using the ffmpeg `silencedetect` filter.
Retuns:
Generator of tuples (start, end) of each chunk in seconds.
"""
import re
import ffmpeg
silence_end_re = re.compile(
r" silence_end: (?P<end>[0-9]+(\.?[0-9]*)) \| silence_duration: (?P<dur>[0-9]+(\.?[0-9]*))"
)
metadata = ffmpeg.probe(path)
duration = float(metadata["format"]["duration"])
reader = (
ffmpeg.input(str(path))
.filter("silencedetect", n="-10dB", d=min_silence_length)
.output("pipe:", format="null")
.run_async(pipe_stderr=True)
)
cur_start = 0.0
num_segments = 0
while True:
line = reader.stderr.readline().decode("utf-8")
if not line:
break
match = silence_end_re.search(line)
if match:
silence_end, silence_dur = match.group("end"), match.group("dur")
split_at = float(silence_end) - (float(silence_dur) / 2)
if (split_at - cur_start) < min_segment_length:
continue
yield cur_start, split_at
cur_start = split_at
num_segments += 1
# silencedetect can place the silence end *after* the end of the full audio segment.
# Such segments definitions are negative length and invalid.
if duration > cur_start:
yield cur_start, duration
num_segments += 1
logger.info(f"Split {path} into {num_segments} segments")
# start_transcribe_segment
@app.function(
image=app_image,
cpu=2,
timeout=400,
volumes={
"/mount": cloud_bucket_mount,
},
)
def transcribe_segment(
start: float,
end: float,
audio_filepath: pathlib.Path,
model: config.ModelSpec,
):
import tempfile
import time
import ffmpeg
import torch
import whisper # type: ignore
t0 = time.time()
with tempfile.NamedTemporaryFile(suffix=".mp3") as f:
(
ffmpeg.input(str(audio_filepath))
.filter("atrim", start=start, end=end)
.output(f.name)
.overwrite_output()
.run(quiet=True)
)
use_gpu = torch.cuda.is_available()
device = "cuda" if use_gpu else "cpu"
model = whisper.load_model(model.name, device=device, download_root=config.MODEL_DIR)
result = model.transcribe(f.name, language="en", fp16=use_gpu) # type: ignore
logger.info(
f"Transcribed segment {start:.2f} to {end:.2f} ({end - start:.2f}s duration) in {time.time() - t0:.2f} seconds."
)
# Add back offsets.
for segment in result["segments"]:
segment["start"] += start
segment["end"] += start
return result
# end_transcribe_segment
@app.function(
image=app_image,
timeout=900,
volumes={
"/mount": cloud_bucket_mount,
},
)
def transcribe_episode(
audio_file: pathlib.Path,
result_path: pathlib.Path,
model: config.ModelSpec,
force: bool = False,
):
if not audio_file.exists():
raise Exception("Audio file not present on the file system")
if os.path.exists(result_path) and not force:
logger.info("Transcript already exists, skipping...")
return
# start_segment
segment_gen = split_silences(str(audio_file))
output_text = ""
output_segments = []
for result in transcribe_segment.starmap(
segment_gen, kwargs=dict(audio_filepath=audio_file, model=model)
):
output_text += result["text"]
output_segments += result["segments"]
# end_segment
result = {
"text": output_text,
"segments": output_segments,
"language": "en",
}
logger.info(f"Writing openai/whisper transcription to {result_path}")
with open(result_path, "w") as f:
json.dump(result, f, indent=4)
# start_main
@app.local_entrypoint()
def main():
from dagster_pipes import open_dagster_pipes
model = config.DEFAULT_MODEL
with open_dagster_pipes() as context:
audio_path = context.extras.get("audio_file_path")
if not audio_path:
raise Exception("Missing `audio_file_path` extras parameter")
audio_path = "/mount/" + audio_path
transcription_path = audio_path.replace(".mp3", ".json")
transcribe_episode.remote(
audio_file=Path(audio_path),
result_path=Path(transcription_path),
model=model,
)
context.report_asset_materialization(
metadata={
"audio_file": audio_path,
"transcription_file": transcription_path,
}
)
# end_main
| Segment |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/dialogs.py | {
"start": 719,
"end": 3380
} | class ____:
"""
Simple dialog window. This is the base for input dialogs, message dialogs
and confirmation dialogs.
Changing the title and body of the dialog is possible at runtime by
assigning to the `body` and `title` attributes of this class.
:param body: Child container object.
:param title: Text to be displayed in the heading of the dialog.
:param buttons: A list of `Button` widgets, displayed at the bottom.
"""
def __init__(
self,
body: AnyContainer,
title: AnyFormattedText = "",
buttons: Sequence[Button] | None = None,
modal: bool = True,
width: AnyDimension = None,
with_background: bool = False,
) -> None:
self.body = body
self.title = title
buttons = buttons or []
# When a button is selected, handle left/right key bindings.
buttons_kb = KeyBindings()
if len(buttons) > 1:
first_selected = has_focus(buttons[0])
last_selected = has_focus(buttons[-1])
buttons_kb.add("left", filter=~first_selected)(focus_previous)
buttons_kb.add("right", filter=~last_selected)(focus_next)
frame_body: AnyContainer
if buttons:
frame_body = HSplit(
[
# Add optional padding around the body.
Box(
body=DynamicContainer(lambda: self.body),
padding=D(preferred=1, max=1),
padding_bottom=0,
),
# The buttons.
Box(
body=VSplit(buttons, padding=1, key_bindings=buttons_kb),
height=D(min=1, max=3, preferred=3),
),
]
)
else:
frame_body = body
# Key bindings for whole dialog.
kb = KeyBindings()
kb.add("tab", filter=~has_completions)(focus_next)
kb.add("s-tab", filter=~has_completions)(focus_previous)
frame = Shadow(
body=Frame(
title=lambda: self.title,
body=frame_body,
style="class:dialog.body",
width=(None if with_background is None else width),
key_bindings=kb,
modal=modal,
)
)
self.container: Box | Shadow
if with_background:
self.container = Box(body=frame, style="class:dialog", width=width)
else:
self.container = frame
def __pt_container__(self) -> AnyContainer:
return self.container
| Dialog |
python | ansible__ansible | test/integration/targets/win_exec_wrapper/action_plugins/test_rc_1.py | {
"start": 229,
"end": 1101
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
super().run(tmp, task_vars)
del tmp
exec_command = self._connection.exec_command
def patched_exec_command(*args, **kwargs):
rc, stdout, stderr = exec_command(*args, **kwargs)
new_stdout = json.dumps({
"rc": rc,
"stdout": stdout.decode(),
"stderr": stderr.decode(),
"failed": False,
"changed": False,
}).encode()
return (0, new_stdout, b"")
try:
# This is done to capture the raw rc/stdio from the module exec
self._connection.exec_command = patched_exec_command
return self._execute_module(task_vars=task_vars)
finally:
self._connection.exec_command = exec_command
| ActionModule |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 36322,
"end": 41229
} | class ____(TestCase):
def assertRecordsMatch(self, record_a, record_b):
self.assertEqual(record_a, record_b)
self.assertEqual(record_a.question, record_b.question)
def setUp(self):
self.poll = Poll(question="what's up?", pub_date=today)
self.poll.save()
def test_get_prev_record(self):
self.poll.question = "ask questions?"
self.poll.save()
self.poll.question = "eh?"
self.poll.save()
self.poll.question = "one more?"
self.poll.save()
first_record = self.poll.history.filter(question="what's up?").get()
second_record = self.poll.history.filter(question="ask questions?").get()
third_record = self.poll.history.filter(question="eh?").get()
fourth_record = self.poll.history.filter(question="one more?").get()
with self.assertNumQueries(1):
self.assertRecordsMatch(second_record.prev_record, first_record)
with self.assertNumQueries(1):
self.assertRecordsMatch(third_record.prev_record, second_record)
with self.assertNumQueries(1):
self.assertRecordsMatch(fourth_record.prev_record, third_record)
def test_get_prev_record_none_if_only(self):
self.assertEqual(self.poll.history.count(), 1)
record = self.poll.history.get()
self.assertIsNone(record.prev_record)
def test_get_prev_record_none_if_earliest(self):
self.poll.question = "ask questions?"
self.poll.save()
first_record = self.poll.history.filter(question="what's up?").get()
self.assertIsNone(first_record.prev_record)
def test_get_prev_record_with_custom_manager_name(self):
instance = CustomManagerNameModel.objects.create(name="Test name 1")
instance.name = "Test name 2"
instance.save()
first_record = instance.log.filter(name="Test name 1").get()
second_record = instance.log.filter(name="Test name 2").get()
self.assertEqual(second_record.prev_record, first_record)
def test_get_prev_record_with_excluded_field(self):
instance = PollWithExcludeFields.objects.create(
question="what's up?", pub_date=today
)
instance.question = "ask questions?"
instance.save()
first_record = instance.history.filter(question="what's up?").get()
second_record = instance.history.filter(question="ask questions?").get()
with self.assertNumQueries(1):
self.assertRecordsMatch(second_record.prev_record, first_record)
def test_get_next_record(self):
self.poll.question = "ask questions?"
self.poll.save()
self.poll.question = "eh?"
self.poll.save()
self.poll.question = "one more?"
self.poll.save()
first_record = self.poll.history.filter(question="what's up?").get()
second_record = self.poll.history.filter(question="ask questions?").get()
third_record = self.poll.history.filter(question="eh?").get()
fourth_record = self.poll.history.filter(question="one more?").get()
self.assertIsNone(fourth_record.next_record)
with self.assertNumQueries(1):
self.assertRecordsMatch(first_record.next_record, second_record)
with self.assertNumQueries(1):
self.assertRecordsMatch(second_record.next_record, third_record)
with self.assertNumQueries(1):
self.assertRecordsMatch(third_record.next_record, fourth_record)
def test_get_next_record_none_if_only(self):
self.assertEqual(self.poll.history.count(), 1)
record = self.poll.history.get()
self.assertIsNone(record.next_record)
def test_get_next_record_none_if_most_recent(self):
self.poll.question = "ask questions?"
self.poll.save()
recent_record = self.poll.history.filter(question="ask questions?").get()
self.assertIsNone(recent_record.next_record)
def test_get_next_record_with_custom_manager_name(self):
instance = CustomManagerNameModel.objects.create(name="Test name 1")
instance.name = "Test name 2"
instance.save()
first_record = instance.log.filter(name="Test name 1").get()
second_record = instance.log.filter(name="Test name 2").get()
self.assertEqual(first_record.next_record, second_record)
def test_get_next_record_with_excluded_field(self):
instance = PollWithExcludeFields.objects.create(
question="what's up?", pub_date=today
)
instance.question = "ask questions?"
instance.save()
first_record = instance.history.filter(question="what's up?").get()
second_record = instance.history.filter(question="ask questions?").get()
with self.assertNumQueries(1):
self.assertRecordsMatch(first_record.next_record, second_record)
| GetPrevRecordAndNextRecordTestCase |
python | run-llama__llama_index | llama-index-core/llama_index/core/response_synthesizers/context_only.py | {
"start": 220,
"end": 845
} | class ____(BaseSynthesizer):
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
return "\n\n".join(text_chunks)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
return "\n\n".join(text_chunks)
| ContextOnly |
python | plotly__plotly.py | plotly/graph_objs/box/_marker.py | {
"start": 233,
"end": 13981
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "box"
_path_str = "box.marker"
_valid_props = {
"angle",
"color",
"line",
"opacity",
"outliercolor",
"size",
"symbol",
}
@property
def angle(self):
"""
Sets the marker angle in respect to `angleref`.
The 'angle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["angle"]
@angle.setter
def angle(self, val):
self["angle"] = val
@property
def color(self):
"""
Sets the marker color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.box.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.box.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def outliercolor(self):
"""
Sets the color of the outlier sample points.
The 'outliercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["outliercolor"]
@outliercolor.setter
def outliercolor(self, val):
self["outliercolor"] = val
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, '0', 'circle', 100, '100', 'circle-open', 200, '200',
'circle-dot', 300, '300', 'circle-open-dot', 1, '1',
'square', 101, '101', 'square-open', 201, '201',
'square-dot', 301, '301', 'square-open-dot', 2, '2',
'diamond', 102, '102', 'diamond-open', 202, '202',
'diamond-dot', 302, '302', 'diamond-open-dot', 3, '3',
'cross', 103, '103', 'cross-open', 203, '203',
'cross-dot', 303, '303', 'cross-open-dot', 4, '4', 'x',
104, '104', 'x-open', 204, '204', 'x-dot', 304, '304',
'x-open-dot', 5, '5', 'triangle-up', 105, '105',
'triangle-up-open', 205, '205', 'triangle-up-dot', 305,
'305', 'triangle-up-open-dot', 6, '6', 'triangle-down',
106, '106', 'triangle-down-open', 206, '206',
'triangle-down-dot', 306, '306', 'triangle-down-open-dot',
7, '7', 'triangle-left', 107, '107', 'triangle-left-open',
207, '207', 'triangle-left-dot', 307, '307',
'triangle-left-open-dot', 8, '8', 'triangle-right', 108,
'108', 'triangle-right-open', 208, '208',
'triangle-right-dot', 308, '308',
'triangle-right-open-dot', 9, '9', 'triangle-ne', 109,
'109', 'triangle-ne-open', 209, '209', 'triangle-ne-dot',
309, '309', 'triangle-ne-open-dot', 10, '10',
'triangle-se', 110, '110', 'triangle-se-open', 210, '210',
'triangle-se-dot', 310, '310', 'triangle-se-open-dot', 11,
'11', 'triangle-sw', 111, '111', 'triangle-sw-open', 211,
'211', 'triangle-sw-dot', 311, '311',
'triangle-sw-open-dot', 12, '12', 'triangle-nw', 112,
'112', 'triangle-nw-open', 212, '212', 'triangle-nw-dot',
312, '312', 'triangle-nw-open-dot', 13, '13', 'pentagon',
113, '113', 'pentagon-open', 213, '213', 'pentagon-dot',
313, '313', 'pentagon-open-dot', 14, '14', 'hexagon', 114,
'114', 'hexagon-open', 214, '214', 'hexagon-dot', 314,
'314', 'hexagon-open-dot', 15, '15', 'hexagon2', 115,
'115', 'hexagon2-open', 215, '215', 'hexagon2-dot', 315,
'315', 'hexagon2-open-dot', 16, '16', 'octagon', 116,
'116', 'octagon-open', 216, '216', 'octagon-dot', 316,
'316', 'octagon-open-dot', 17, '17', 'star', 117, '117',
'star-open', 217, '217', 'star-dot', 317, '317',
'star-open-dot', 18, '18', 'hexagram', 118, '118',
'hexagram-open', 218, '218', 'hexagram-dot', 318, '318',
'hexagram-open-dot', 19, '19', 'star-triangle-up', 119,
'119', 'star-triangle-up-open', 219, '219',
'star-triangle-up-dot', 319, '319',
'star-triangle-up-open-dot', 20, '20',
'star-triangle-down', 120, '120',
'star-triangle-down-open', 220, '220',
'star-triangle-down-dot', 320, '320',
'star-triangle-down-open-dot', 21, '21', 'star-square',
121, '121', 'star-square-open', 221, '221',
'star-square-dot', 321, '321', 'star-square-open-dot', 22,
'22', 'star-diamond', 122, '122', 'star-diamond-open',
222, '222', 'star-diamond-dot', 322, '322',
'star-diamond-open-dot', 23, '23', 'diamond-tall', 123,
'123', 'diamond-tall-open', 223, '223',
'diamond-tall-dot', 323, '323', 'diamond-tall-open-dot',
24, '24', 'diamond-wide', 124, '124', 'diamond-wide-open',
224, '224', 'diamond-wide-dot', 324, '324',
'diamond-wide-open-dot', 25, '25', 'hourglass', 125,
'125', 'hourglass-open', 26, '26', 'bowtie', 126, '126',
'bowtie-open', 27, '27', 'circle-cross', 127, '127',
'circle-cross-open', 28, '28', 'circle-x', 128, '128',
'circle-x-open', 29, '29', 'square-cross', 129, '129',
'square-cross-open', 30, '30', 'square-x', 130, '130',
'square-x-open', 31, '31', 'diamond-cross', 131, '131',
'diamond-cross-open', 32, '32', 'diamond-x', 132, '132',
'diamond-x-open', 33, '33', 'cross-thin', 133, '133',
'cross-thin-open', 34, '34', 'x-thin', 134, '134',
'x-thin-open', 35, '35', 'asterisk', 135, '135',
'asterisk-open', 36, '36', 'hash', 136, '136',
'hash-open', 236, '236', 'hash-dot', 336, '336',
'hash-open-dot', 37, '37', 'y-up', 137, '137',
'y-up-open', 38, '38', 'y-down', 138, '138',
'y-down-open', 39, '39', 'y-left', 139, '139',
'y-left-open', 40, '40', 'y-right', 140, '140',
'y-right-open', 41, '41', 'line-ew', 141, '141',
'line-ew-open', 42, '42', 'line-ns', 142, '142',
'line-ns-open', 43, '43', 'line-ne', 143, '143',
'line-ne-open', 44, '44', 'line-nw', 144, '144',
'line-nw-open', 45, '45', 'arrow-up', 145, '145',
'arrow-up-open', 46, '46', 'arrow-down', 146, '146',
'arrow-down-open', 47, '47', 'arrow-left', 147, '147',
'arrow-left-open', 48, '48', 'arrow-right', 148, '148',
'arrow-right-open', 49, '49', 'arrow-bar-up', 149, '149',
'arrow-bar-up-open', 50, '50', 'arrow-bar-down', 150,
'150', 'arrow-bar-down-open', 51, '51', 'arrow-bar-left',
151, '151', 'arrow-bar-left-open', 52, '52',
'arrow-bar-right', 152, '152', 'arrow-bar-right-open', 53,
'53', 'arrow', 153, '153', 'arrow-open', 54, '54',
'arrow-wide', 154, '154', 'arrow-wide-open']
Returns
-------
Any
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def _prop_descriptions(self):
return """\
angle
Sets the marker angle in respect to `angleref`.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
line
:class:`plotly.graph_objects.box.marker.Line` instance
or dict with compatible properties
opacity
Sets the marker opacity.
outliercolor
Sets the color of the outlier sample points.
size
Sets the marker size (in px).
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
"""
def __init__(
self,
arg=None,
angle=None,
color=None,
line=None,
opacity=None,
outliercolor=None,
size=None,
symbol=None,
**kwargs,
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.box.Marker`
angle
Sets the marker angle in respect to `angleref`.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
line
:class:`plotly.graph_objects.box.marker.Line` instance
or dict with compatible properties
opacity
Sets the marker opacity.
outliercolor
Sets the color of the outlier sample points.
size
Sets the marker size (in px).
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.box.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.box.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("angle", arg, angle)
self._set_property("color", arg, color)
self._set_property("line", arg, line)
self._set_property("opacity", arg, opacity)
self._set_property("outliercolor", arg, outliercolor)
self._set_property("size", arg, size)
self._set_property("symbol", arg, symbol)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | boto__boto3 | boto3/docs/docstring.py | {
"start": 1477,
"end": 1619
} | class ____(LazyLoadedDocstring):
def _write_docstring(self, *args, **kwargs):
document_attribute(*args, **kwargs)
| AttributeDocstring |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py | {
"start": 185,
"end": 261
} | class ____(str, Enum):
A = "1"
B = "2"
C = "2" # PIE796
| FakeEnum3 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 233040,
"end": 233585
} | class ____(sgqlc.types.Input):
"""Ordering options for Enterprise Server user accounts upload
connections.
"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseServerUserAccountsUploadOrderField), graphql_name="field")
"""The field to order user accounts uploads by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| EnterpriseServerUserAccountsUploadOrder |
python | getsentry__sentry | src/sentry/notifications/notification_action/metric_alert_registry/handlers/sentry_app_metric_alert_handler.py | {
"start": 911,
"end": 2164
} | class ____(BaseMetricAlertHandler):
@classmethod
def send_alert(
cls,
notification_context: NotificationContext,
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
open_period_context: OpenPeriodContext,
trigger_status: TriggerStatus,
notification_uuid: str,
organization: Organization,
project: Project,
) -> None:
open_period = GroupOpenPeriod.objects.get(id=open_period_context.id)
if not open_period:
raise ValueError("Open period not found")
incident_serialized_response = get_incident_serializer(open_period)
logger.info(
"notification_action.execute_via_metric_alert_handler.sentry_app",
extra={
"action_id": alert_context.action_identifier_id,
},
)
send_incident_alert_notification(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
incident_serialized_response=incident_serialized_response,
organization=organization,
notification_uuid=notification_uuid,
)
| SentryAppMetricAlertHandler |
python | numba__numba | numba/tests/test_npdatetime.py | {
"start": 22515,
"end": 33079
} | class ____(TestCase):
jitargs = dict(forceobj=True)
def jit(self, pyfunc):
return jit(**self.jitargs)(pyfunc)
@contextlib.contextmanager
def silence_numpy_warnings(self):
# Numpy can raise warnings when combining e.g. a generic timedelta64
# with a non-generic datetime64.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='Implicitly casting between incompatible kinds',
category=DeprecationWarning)
yield
def test_add_sub_timedelta(self):
"""
Test `datetime64 + timedelta64` and `datetime64 - timedelta64`.
"""
add = self.jit(add_usecase)
sub = self.jit(sub_usecase)
def check(a, b, expected):
with self.silence_numpy_warnings():
self.assertPreciseEqual(add(a, b), expected, (a, b))
self.assertPreciseEqual(add(b, a), expected, (a, b))
self.assertPreciseEqual(sub(a, -b), expected, (a, b))
# Did we get it right?
self.assertPreciseEqual(a + b, expected)
# Y + ...
check(DT('2014'), TD(2, 'Y'), DT('2016'))
check(DT('2014'), TD(2, 'M'), DT('2014-03'))
check(DT('2014'), TD(3, 'W'), DT('2014-01-16', 'W'))
check(DT('2014'), TD(4, 'D'), DT('2014-01-05'))
check(DT('2000'), TD(365, 'D'), DT('2000-12-31'))
# M + ...
check(DT('2014-02'), TD(2, 'Y'), DT('2016-02'))
check(DT('2014-02'), TD(2, 'M'), DT('2014-04'))
check(DT('2014-02'), TD(2, 'D'), DT('2014-02-03'))
# W + ...
check(DT('2014-01-07', 'W'), TD(2, 'W'), DT('2014-01-16', 'W'))
# D + ...
check(DT('2014-02-02'), TD(27, 'D'), DT('2014-03-01'))
check(DT('2012-02-02'), TD(27, 'D'), DT('2012-02-29'))
check(DT('2012-02-02'), TD(2, 'W'), DT('2012-02-16'))
# s + ...
check(DT('2000-01-01T01:02:03Z'), TD(2, 'h'), DT('2000-01-01T03:02:03Z'))
check(DT('2000-01-01T01:02:03Z'), TD(2, 'ms'), DT('2000-01-01T01:02:03.002Z'))
# More thorough checking with leap years and faraway years
for dt_str in ('600', '601', '604', '801',
'1900', '1904', '2200', '2300', '2304',
'2400', '6001'):
for dt_suffix in ('', '-01', '-12'):
dt = DT(dt_str + dt_suffix)
for td in [TD(2, 'D'), TD(2, 'W'),
TD(100, 'D'), TD(10000, 'D'),
TD(-100, 'D'), TD(-10000, 'D'),
TD(100, 'W'), TD(10000, 'W'),
TD(-100, 'W'), TD(-10000, 'W'),
TD(100, 'M'), TD(10000, 'M'),
TD(-100, 'M'), TD(-10000, 'M')]:
self.assertEqual(add(dt, td), dt + td, (dt, td))
self.assertEqual(add(td, dt), dt + td, (dt, td))
self.assertEqual(sub(dt, -td), dt + td, (dt, td))
# NaTs
check(DT('NaT'), TD(2), DT('NaT'))
check(DT('NaT', 's'), TD(2, 'h'), DT('NaT', 's'))
check(DT('NaT', 's'), TD(2, 'ms'), DT('NaT', 'ms'))
check(DT('2014'), TD('NaT', 'W'), DT('NaT', 'W'))
check(DT('2014-01-01'), TD('NaT', 'W'), DT('NaT', 'D'))
check(DT('NaT', 's'), TD('NaT', 'ms'), DT('NaT', 'ms'))
# Cannot add datetime days and timedelta months or years
for f in (add, sub):
with self.assertRaises((TypeError, TypingError)):
f(DT(1, '2014-01-01'), TD(1, 'Y'))
with self.assertRaises((TypeError, TypingError)):
f(DT(1, '2014-01-01'), TD(1, 'M'))
def datetime_samples(self):
dt_years = ['600', '601', '604', '1968', '1969', '1973',
'2000', '2004', '2005', '2100', '2400', '2401']
dt_suffixes = ['', '-01', '-12', '-02-28', '-12-31',
'-01-05T12:30:56Z', '-01-05T12:30:56.008Z']
dts = [DT(a + b) for (a, b) in itertools.product(dt_years, dt_suffixes)]
dts += [DT(s, 'W') for s in dt_years]
return dts
def test_datetime_difference(self):
"""
Test `datetime64 - datetime64`.
"""
sub = self.jit(sub_usecase)
def check(a, b, expected=None):
with self.silence_numpy_warnings():
self.assertPreciseEqual(sub(a, b), a - b, (a, b))
self.assertPreciseEqual(sub(b, a), b - a, (a, b))
# Did we get it right?
self.assertPreciseEqual(a - b, expected)
check(DT('2014'), DT('2017'), TD(-3, 'Y'))
check(DT('2014-02'), DT('2017-01'), TD(-35, 'M'))
check(DT('2014-02-28'), DT('2015-03-01'), TD(-366, 'D'))
# NaTs
check(DT('NaT', 'M'), DT('2000'), TD('NaT', 'M'))
check(DT('NaT', 'M'), DT('2000-01-01'), TD('NaT', 'D'))
check(DT('NaT'), DT('NaT'), TD('NaT'))
# Test many more values
with self.silence_numpy_warnings():
dts = self.datetime_samples()
for a, b in itertools.product(dts, dts):
if (not npdatetime_helpers.same_kind(value_unit(a), value_unit(b))):
continue
self.assertPreciseEqual(sub(a, b), a - b, (a, b))
def test_comparisons(self):
# Test all datetime comparisons all at once
eq = self.jit(eq_usecase)
ne = self.jit(ne_usecase)
lt = self.jit(lt_usecase)
le = self.jit(le_usecase)
gt = self.jit(gt_usecase)
ge = self.jit(ge_usecase)
def check_eq(a, b, expected):
expected_val = expected
not_expected_val = not expected
# since np 1.16 all NaT comparisons bar != are False, including
# NaT==NaT
if np.isnat(a) or np.isnat(b):
expected_val = False
not_expected_val = True
self.assertFalse(le(a, b), (a, b))
self.assertFalse(ge(a, b), (a, b))
self.assertFalse(le(b, a), (a, b))
self.assertFalse(ge(b, a), (a, b))
self.assertFalse(lt(a, b), (a, b))
self.assertFalse(gt(a, b), (a, b))
self.assertFalse(lt(b, a), (a, b))
self.assertFalse(gt(b, a), (a, b))
with self.silence_numpy_warnings():
self.assertPreciseEqual(eq(a, b), expected_val, (a, b, expected))
self.assertPreciseEqual(eq(b, a), expected_val, (a, b, expected))
self.assertPreciseEqual(ne(a, b), not_expected_val, (a, b, expected))
self.assertPreciseEqual(ne(b, a), not_expected_val, (a, b, expected))
if expected_val:
# If equal, then equal-ordered comparisons are true
self.assertTrue(le(a, b), (a, b))
self.assertTrue(ge(a, b), (a, b))
self.assertTrue(le(b, a), (a, b))
self.assertTrue(ge(b, a), (a, b))
# and strictly ordered comparisons are false
self.assertFalse(lt(a, b), (a, b))
self.assertFalse(gt(a, b), (a, b))
self.assertFalse(lt(b, a), (a, b))
self.assertFalse(gt(b, a), (a, b))
# Did we get it right?
self.assertPreciseEqual(a == b, expected_val)
def check_lt(a, b, expected):
expected_val = expected
not_expected_val = not expected
# since np 1.16 all NaT magnitude comparisons including equality
# are False (as NaT == NaT is now False)
if np.isnat(a) or np.isnat(b):
expected_val = False
not_expected_val = False
with self.silence_numpy_warnings():
lt = self.jit(lt_usecase)
self.assertPreciseEqual(lt(a, b), expected_val, (a, b, expected))
self.assertPreciseEqual(gt(b, a), expected_val, (a, b, expected))
self.assertPreciseEqual(ge(a, b), not_expected_val, (a, b, expected))
self.assertPreciseEqual(le(b, a), not_expected_val, (a, b, expected))
if expected_val:
# If true, then values are not equal
check_eq(a, b, False)
# Did we get it right?
self.assertPreciseEqual(a < b, expected_val)
check_eq(DT('2014'), DT('2017'), False)
check_eq(DT('2014'), DT('2014-01'), True)
check_eq(DT('2014'), DT('2014-01-01'), True)
check_eq(DT('2014'), DT('2014-01-01', 'W'), True)
check_eq(DT('2014-01'), DT('2014-01-01', 'W'), True)
# Yes, it's not transitive
check_eq(DT('2014-01-01'), DT('2014-01-01', 'W'), False)
check_eq(DT('2014-01-02'), DT('2014-01-06', 'W'), True)
# with times
check_eq(DT('2014-01-01T00:01:00Z', 's'),
DT('2014-01-01T00:01Z', 'm'), True)
check_eq(DT('2014-01-01T00:01:01Z', 's'),
DT('2014-01-01T00:01Z', 'm'), False)
# NaTs
check_lt(DT('NaT', 'Y'), DT('2017'), True)
check_eq(DT('NaT'), DT('NaT'), True)
# Check comparison between various units
dts = self.datetime_samples()
for a in dts:
# Take a number of smaller units
a_unit = a.dtype.str.split('[')[1][:-1]
i = all_units.index(a_unit)
units = all_units[i:i+6]
for unit in units:
# Force conversion
b = a.astype('M8[%s]' % unit)
if (not npdatetime_helpers.same_kind(value_unit(a),
value_unit(b))):
continue
check_eq(a, b, True)
check_lt(a, b + np.timedelta64(1, unit), True)
check_lt(b - np.timedelta64(1, unit), a, True)
def _test_min_max(self, usecase):
f = self.jit(usecase)
def check(a, b):
self.assertPreciseEqual(f(a, b), usecase(a, b))
for cases in (
(DT(0, 'ns'), DT(1, 'ns'), DT(2, 'ns'), DT('NaT', 'ns')),
(DT(0, 's'), DT(1, 's'), DT(2, 's'), DT('NaT', 's')),
):
for a, b in itertools.product(cases, cases):
check(a, b)
def test_min(self):
self._test_min_max(min_usecase)
def test_max(self):
self._test_min_max(max_usecase)
| TestDatetimeArithmetic |
python | huggingface__transformers | src/transformers/models/afmoe/modeling_afmoe.py | {
"start": 5155,
"end": 5904
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
AfmoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return (self.weight * hidden_states).to(input_dtype) # main diff with Llama
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| AfmoeRMSNorm |
python | cherrypy__cherrypy | cherrypy/test/test_wsgi_ns.py | {
"start": 51,
"end": 2755
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
class WSGIResponse(object):
def __init__(self, appresults):
self.appresults = appresults
self.iter = iter(appresults)
def __iter__(self):
return self
def next(self):
return self.iter.next()
def __next__(self):
return next(self.iter)
def close(self):
if hasattr(self.appresults, 'close'):
self.appresults.close()
class ChangeCase(object):
def __init__(self, app, to=None):
self.app = app
self.to = to
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class CaseResults(WSGIResponse):
def next(this):
return getattr(this.iter.next(), self.to)()
def __next__(this):
return getattr(next(this.iter), self.to)()
return CaseResults(res)
class Replacer(object):
def __init__(self, app, map={}):
self.app = app
self.map = map
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class ReplaceResults(WSGIResponse):
def next(this):
line = this.iter.next()
for k, v in self.map.iteritems():
line = line.replace(k, v)
return line
def __next__(this):
line = next(this.iter)
for k, v in self.map.items():
line = line.replace(k, v)
return line
return ReplaceResults(res)
class Root(object):
@cherrypy.expose
def index(self):
return 'HellO WoRlD!'
root_conf = {
'wsgi.pipeline': [('replace', Replacer)],
'wsgi.replace.map': {b'L': b'X', b'l': b'r'},
}
app = cherrypy.Application(Root())
app.wsgiapp.pipeline.append(('changecase', ChangeCase))
app.wsgiapp.config['changecase'] = {'to': 'upper'}
cherrypy.tree.mount(app, config={'/': root_conf})
def test_pipeline(self):
if not cherrypy.server.httpserver:
return self.skip()
self.getPage('/')
# If body is "HEXXO WORXD!", the middleware was applied out of order.
self.assertBody('HERRO WORRD!')
| WSGI_Namespace_Test |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/code_location.py | {
"start": 2877,
"end": 12826
} | class ____(AbstractContextManager):
"""A CodeLocation represents a target containing user code which has a set of Dagster
definition objects. A given location will contain some number of uniquely named
RepositoryDefinitions, which therein contains job, op, and other definitions.
Dagster tools are typically "host" processes, meaning they load a CodeLocation and
communicate with it over an IPC/RPC layer. Currently this IPC layer is implemented by
invoking the dagster CLI in a target python interpreter (e.g. a virtual environment) in either
a) the current node
b) a container
In the near future, we may also make this communication channel able over an RPC layer, in
which case the information needed to load a CodeLocation will be a url that abides by
some RPC contract.
We also allow for InProcessCodeLocation which actually loads the user-defined artifacts
into process with the host tool. This is mostly for test scenarios.
"""
@abstractmethod
def get_repository(self, name: str) -> RemoteRepository:
pass
@abstractmethod
def has_repository(self, name: str) -> bool:
pass
@abstractmethod
def get_repositories(self) -> Mapping[str, RemoteRepository]:
pass
def get_repository_names(self) -> Sequence[str]:
return list(self.get_repositories().keys())
@property
def name(self) -> str:
return self.origin.location_name
@abstractmethod
def get_execution_plan(
self,
remote_job: RemoteJob,
run_config: Mapping[str, object],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
instance: Optional[DagsterInstance] = None,
) -> RemoteExecutionPlan: ...
@abstractmethod
async def gen_execution_plan(
self,
remote_job: RemoteJob,
run_config: Mapping[str, object],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
instance: Optional[DagsterInstance] = None,
) -> RemoteExecutionPlan: ...
def _get_remote_job_from_subset_result(
self,
selector: JobSubsetSelector,
subset_result: RemoteJobSubsetResult,
) -> RemoteJob:
if subset_result.repository_python_origin:
# Prefer the python origin from the result if it is set, in case the code location
# just updated and any origin information (most frequently the image) has changed
repo_handle = RepositoryHandle(
repository_name=selector.repository_name,
code_location_origin=self.origin,
repository_python_origin=subset_result.repository_python_origin,
display_metadata=self.get_display_metadata(),
)
else:
repo_handle = RepositoryHandle.from_location(
repository_name=selector.repository_name,
code_location=self,
)
job_data_snap = subset_result.job_data_snap
if job_data_snap is None:
error = check.not_none(subset_result.error)
if error.cls_name == "DagsterInvalidSubsetError":
raise DagsterInvalidSubsetError(check.not_none(error.message))
else:
check.failed(
f"Failed to fetch subset data, success: {subset_result.success} error: {error}"
)
return RemoteJob(job_data_snap, repo_handle)
def get_job(self, selector: JobSubsetSelector) -> RemoteJob:
"""Return the RemoteJob for a specific pipeline. Subclasses only
need to implement get_subset_remote_job_result to handle the case where
an op selection is specified, which requires access to the underlying JobDefinition
to generate the subsetted pipeline snapshot.
"""
if not selector.is_subset_selection:
return self.get_repository(selector.repository_name).get_full_job(selector.job_name)
subset_result = self._get_subset_remote_job_result(
selector,
lambda selector: self.get_repository(selector.repository_name).get_full_job(
selector.job_name
),
)
return self._get_remote_job_from_subset_result(selector, subset_result)
async def gen_subset_job(
self, selector: JobSubsetSelector, get_full_job: Callable[[JobSubsetSelector], RemoteJob]
) -> RemoteJob:
subset_result = await self._gen_subset_remote_job_result(selector, get_full_job)
return self._get_remote_job_from_subset_result(selector, subset_result)
@abstractmethod
def _get_subset_remote_job_result(
self, selector: JobSubsetSelector, get_full_job: Callable[[JobSubsetSelector], RemoteJob]
) -> RemoteJobSubsetResult:
"""Returns a snapshot about an RemoteJob with an op selection, which requires
access to the underlying JobDefinition. Callsites should likely use
`get_job` instead.
"""
@abstractmethod
async def _gen_subset_remote_job_result(
self, selector: JobSubsetSelector, get_full_job: Callable[[JobSubsetSelector], RemoteJob]
) -> RemoteJobSubsetResult:
"""Returns a snapshot about an RemoteJob with an op selection, which requires
access to the underlying JobDefinition. Callsites should likely use
`gen_job` instead.
"""
@abstractmethod
def get_partition_config(
self,
repository_handle: RepositoryHandle,
job_name: str,
partition_name: str,
instance: DagsterInstance,
) -> Union["PartitionConfigSnap", "PartitionExecutionErrorSnap"]:
pass
@abstractmethod
def get_partition_tags_from_repo(
self,
repository_handle: RepositoryHandle,
job_name: str,
partition_name: str,
instance: DagsterInstance,
) -> Union["PartitionTagsSnap", "PartitionExecutionErrorSnap"]:
pass
@abstractmethod
def get_partition_names_from_repo(
self,
repository_handle: RepositoryHandle,
job_name: str,
) -> Union["PartitionNamesSnap", "PartitionExecutionErrorSnap"]:
pass
@abstractmethod
def get_partition_set_execution_params(
self,
repository_handle: RepositoryHandle,
partition_set_name: str,
partition_names: Sequence[str],
instance: DagsterInstance,
) -> Union["PartitionSetExecutionParamSnap", "PartitionExecutionErrorSnap"]:
pass
@abstractmethod
def get_schedule_execution_data(
self,
instance: DagsterInstance,
repository_handle: RepositoryHandle,
schedule_name: str,
scheduled_execution_time: Optional[TimestampWithTimezone],
log_key: Optional[Sequence[str]],
) -> "ScheduleExecutionData":
pass
@abstractmethod
def get_sensor_execution_data(
self,
instance: DagsterInstance,
repository_handle: RepositoryHandle,
name: str,
last_tick_completion_time: Optional[float],
last_run_key: Optional[str],
cursor: Optional[str],
log_key: Optional[Sequence[str]],
last_sensor_start_time: Optional[float],
) -> "SensorExecutionData":
pass
@abstractmethod
def get_notebook_data(self, notebook_path: str) -> bytes:
pass
@property
@abstractmethod
def is_reload_supported(self) -> bool:
pass
def __del__(self):
self.cleanup()
def __exit__(self, _exception_type, _exception_value, _traceback):
self.cleanup()
def cleanup(self) -> None:
pass
@property
@abstractmethod
def origin(self) -> CodeLocationOrigin:
pass
def get_display_metadata(self) -> Mapping[str, str]:
return merge_dicts(
self.origin.get_display_metadata(),
({"image": self.container_image} if self.container_image else {}),
)
@property
@abstractmethod
def executable_path(self) -> Optional[str]:
pass
@property
@abstractmethod
def container_image(self) -> Optional[str]:
pass
@cached_property
def container_context(self) -> Optional[Mapping[str, Any]]:
return None
@property
@abstractmethod
def entry_point(self) -> Optional[Sequence[str]]:
pass
@property
@abstractmethod
def repository_code_pointer_dict(self) -> Mapping[str, Optional[CodePointer]]:
pass
def get_repository_python_origin(
self, repository_name: str
) -> Optional["RepositoryPythonOrigin"]:
code_pointer = self.repository_code_pointer_dict.get(repository_name)
if not code_pointer:
return None
return RepositoryPythonOrigin(
executable_path=self.executable_path or sys.executable,
code_pointer=code_pointer,
container_image=self.container_image,
entry_point=self.entry_point,
container_context=self.container_context,
)
@abstractmethod
def get_dagster_library_versions(self) -> Optional[Mapping[str, str]]: ...
def get_defs_state_info(self) -> Optional[DefsStateInfo]:
all_infos = list(
filter(
None,
[repo.repository_snap.defs_state_info for repo in self.get_repositories().values()],
)
)
if len(all_infos) == 0:
return None
elif len(all_infos) == 1:
return all_infos[0]
else:
# in theory this would be extremely rare, as having multiple
# repositories in the same location has long been deprecated
combined_mapping = {}
for info in all_infos:
combined_mapping.update(info.info_mapping)
return DefsStateInfo(info_mapping=combined_mapping)
| CodeLocation |
python | Pylons__pyramid | tests/test_response.py | {
"start": 6376,
"end": 6570
} | class ____:
def __init__(self):
self.adapters = []
def add_response_adapter(self, wrapped, type_or_iface):
self.adapters.append((wrapped, type_or_iface))
| DummyConfigurator |
python | pyqtgraph__pyqtgraph | pyqtgraph/GraphicsScene/mouseEvents.py | {
"start": 8718,
"end": 14267
} | class ____(object):
"""
Instances of this class are delivered to items in a :class:`GraphicsScene <pyqtgraph.GraphicsScene>` via their hoverEvent() method when the mouse is hovering over the item.
This event class both informs items that the mouse cursor is nearby and allows items to
communicate with one another about whether each item will accept *potential* mouse events.
It is common for multiple overlapping items to receive hover events and respond by changing
their appearance. This can be misleading to the user since, in general, only one item will
respond to mouse events. To avoid this, items make calls to event.acceptClicks(button)
and/or acceptDrags(button).
Each item may make multiple calls to acceptClicks/Drags, each time for a different button.
If the method returns True, then the item is guaranteed to be
the recipient of the claimed event IF the user presses the specified mouse button before
moving. If claimEvent returns False, then this item is guaranteed NOT to get the specified
event (because another has already claimed it) and the item should change its appearance
accordingly.
event.isEnter() returns True if the mouse has just entered the item's shape;
event.isExit() returns True if the mouse has just left.
"""
def __init__(self, moveEvent, acceptable):
self.enter = False
self.acceptable = acceptable
self.exit = False
self.__clickItems = weakref.WeakValueDictionary()
self.__dragItems = weakref.WeakValueDictionary()
self.currentItem = None
if moveEvent is not None:
self._scenePos = moveEvent.scenePos()
self._screenPos = moveEvent.screenPos()
self._lastScenePos = moveEvent.lastScenePos()
self._lastScreenPos = moveEvent.lastScreenPos()
self._buttons = moveEvent.buttons()
self._modifiers = moveEvent.modifiers()
else:
self.exit = True
def isEnter(self):
"""Returns True if the mouse has just entered the item's shape"""
return self.enter
def isExit(self):
"""Returns True if the mouse has just exited the item's shape"""
return self.exit
def acceptClicks(self, button):
"""Inform the scene that the item (that the event was delivered to)
would accept a mouse click event if the user were to click before
moving the mouse again.
Returns True if the request is successful, otherwise returns False (indicating
that some other item would receive an incoming click).
"""
if not self.acceptable:
return False
if button not in self.__clickItems:
self.__clickItems[button] = self.currentItem
return True
return False
def acceptDrags(self, button):
"""Inform the scene that the item (that the event was delivered to)
would accept a mouse drag event if the user were to drag before
the next hover event.
Returns True if the request is successful, otherwise returns False (indicating
that some other item would receive an incoming drag event).
"""
if not self.acceptable:
return False
if button not in self.__dragItems:
self.__dragItems[button] = self.currentItem
return True
return False
def scenePos(self):
"""Return the current scene position of the mouse."""
return Point(self._scenePos)
def screenPos(self):
"""Return the current screen position of the mouse."""
return Point(self._screenPos)
def lastScenePos(self):
"""Return the previous scene position of the mouse."""
return Point(self._lastScenePos)
def lastScreenPos(self):
"""Return the previous screen position of the mouse."""
return Point(self._lastScreenPos)
def buttons(self):
"""
Return the buttons currently pressed on the mouse.
(see QGraphicsSceneMouseEvent::buttons in the Qt documentation)
"""
return self._buttons
def pos(self):
"""
Return the current position of the mouse in the coordinate system of the item
that the event was delivered to.
"""
return Point(self.currentItem.mapFromScene(self._scenePos))
def lastPos(self):
"""
Return the previous position of the mouse in the coordinate system of the item
that the event was delivered to.
"""
return Point(self.currentItem.mapFromScene(self._lastScenePos))
def __repr__(self):
if self.exit:
return "<HoverEvent exit=True>"
if self.currentItem is None:
lp = self._lastScenePos
p = self._scenePos
else:
lp = self.lastPos()
p = self.pos()
return "<HoverEvent (%g,%g)->(%g,%g) buttons=%s enter=%s exit=%s>" % (lp.x(), lp.y(), p.x(), p.y(), str(self.buttons()), str(self.isEnter()), str(self.isExit()))
def modifiers(self):
"""Return any keyboard modifiers currently pressed.
(see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)
"""
return self._modifiers
def clickItems(self):
return self.__clickItems
def dragItems(self):
return self.__dragItems
| HoverEvent |
python | fastapi__sqlmodel | docs_src/tutorial/one/tutorial008.py | {
"start": 92,
"end": 1538
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
hero = session.get(Hero, 1)
print("Hero:", hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/benchmarks/pdsds.py | {
"start": 1216,
"end": 1716
} | class ____(type):
"""Metaclass used for query lookup."""
def __getattr__(cls, name: str): # type: ignore[no-untyped-def]
"""Query lookup."""
if valid_query(name):
q_num = int(name[1:])
module: ModuleType = importlib.import_module(
f"cudf_polars.experimental.benchmarks.pdsds_queries.q{q_num}"
)
return getattr(module, cls.q_impl)
raise AttributeError(f"{name} is not a valid query name")
| PDSDSQueriesMeta |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_public_methods.py | {
"start": 141,
"end": 7734
} | class ____:
"""Test that only @public methods on @public classes are identified for validation."""
def test_identifies_only_public_methods_on_public_classes(self):
"""Test that only @public methods on @public classes are found for validation.
This is the core test that verifies the main functionality:
- @public methods on @public classes should be included
- Non-public methods on @public classes should be excluded
- All methods on non-public classes should be excluded (even if marked @public)
- Different method types (instance, static, class, property) should all work
"""
# Import our test module
module_path = "automation_tests.dagster_docs_tests.test_fixtures.test_public_class"
methods = SymbolImporter.get_all_public_annotated_methods(module_path)
# Extract just the method names from the full dotted paths for easier testing
method_names = [method.dotted_path.split(".")[-1] for method in methods]
# Methods that SHOULD be found (public methods on public classes)
expected_public_methods = {
"public_instance_method",
"public_static_method",
"public_class_method",
"public_property",
"another_public_method", # From AnotherPublicClass
}
# Methods that should NOT be found
excluded_methods = {
# Non-public methods on public classes
"non_public_instance_method",
"non_public_static_method",
"non_public_class_method",
"non_public_property",
"another_non_public_method",
# Any methods from NonPublicClass (even if marked @public)
"public_method_on_non_public_class",
"regular_method_on_non_public_class",
}
# Verify that all expected public methods are found
for expected_method in expected_public_methods:
assert expected_method in method_names, (
f"Expected @public method '{expected_method}' was not found"
)
# Verify that excluded methods are NOT found
for excluded_method in excluded_methods:
assert excluded_method not in method_names, (
f"Non-public method '{excluded_method}' should not be found"
)
# Verify the total count matches expectations
assert len(methods) == len(expected_public_methods), (
f"Expected {len(expected_public_methods)} public methods, but found {len(methods)}. "
f"Found methods: {method_names}"
)
def test_method_types_are_correctly_handled(self):
"""Test that different method types (instance, static, class, property) are all handled correctly."""
module_path = "automation_tests.dagster_docs_tests.test_fixtures.test_public_class"
methods = SymbolImporter.get_all_public_annotated_methods(module_path)
# Create a mapping of method names to their SymbolInfo objects
methods_by_name = {method.dotted_path.split(".")[-1]: method for method in methods}
# Verify we have the expected method types
assert "public_instance_method" in methods_by_name
assert "public_static_method" in methods_by_name
assert "public_class_method" in methods_by_name
assert "public_property" in methods_by_name
# Verify each method has the correct symbol type and docstring
instance_method = methods_by_name["public_instance_method"]
assert instance_method.docstring is not None
assert "public instance method" in instance_method.docstring
static_method = methods_by_name["public_static_method"]
assert static_method.docstring is not None
assert "public static method" in static_method.docstring
class_method = methods_by_name["public_class_method"]
assert class_method.docstring is not None
assert "public class method" in class_method.docstring
property_method = methods_by_name["public_property"]
assert property_method.docstring is not None
assert "public property" in property_method.docstring
def test_multiple_public_classes_are_handled(self):
"""Test that methods from multiple @public classes in the same module are found."""
module_path = "automation_tests.dagster_docs_tests.test_fixtures.test_public_class"
methods = SymbolImporter.get_all_public_annotated_methods(module_path)
# Extract class names from the dotted paths
class_names = set()
for method in methods:
parts = method.dotted_path.split(".")
if len(parts) >= 2:
class_names.add(parts[-2]) # Second to last part is class name
# Should find methods from both public classes
expected_classes = {"PublicClass", "AnotherPublicClass"}
assert expected_classes.issubset(class_names), (
f"Expected to find methods from classes {expected_classes}, "
f"but only found classes {class_names}"
)
# Should NOT find any methods from NonPublicClass
assert "NonPublicClass" not in class_names, (
"Should not find any methods from NonPublicClass"
)
def test_nonexistent_module_raises_import_error(self):
"""Test that trying to analyze a nonexistent module raises ModuleNotFoundError."""
with pytest.raises(ModuleNotFoundError, match="No module named"):
SymbolImporter.get_all_public_annotated_methods(
"nonexistent.module.that.does.not.exist"
)
def test_empty_module_returns_empty_list(self):
"""Test that a module with no @public classes returns an empty list."""
# Use a standard library module that shouldn't have @public decorators
try:
methods = SymbolImporter.get_all_public_annotated_methods("json")
assert isinstance(methods, list)
# Should be empty since standard library modules don't use @public decorators
assert len(methods) == 0
except ImportError:
pytest.skip("json module not available for testing")
def test_dotted_paths_are_correctly_formed(self):
"""Test that the dotted paths in SymbolInfo objects are correctly formed."""
module_path = "automation_tests.dagster_docs_tests.test_fixtures.test_public_class"
methods = SymbolImporter.get_all_public_annotated_methods(module_path)
for method in methods:
# Each dotted path should have the format: module.class.method
parts = method.dotted_path.split(".")
assert len(parts) >= 3, (
f"Dotted path '{method.dotted_path}' should have at least 3 parts"
)
# Module part should match what we requested
module_part = ".".join(parts[:-2]) # All but last 2 parts
assert module_part == module_path
# Class part should be one of our public classes
class_part = parts[-2]
assert class_part in {"PublicClass", "AnotherPublicClass"}
# Method part should be one of our expected public methods
method_part = parts[-1]
expected_methods = {
"public_instance_method",
"public_static_method",
"public_class_method",
"public_property",
"another_public_method",
}
assert method_part in expected_methods
| TestPublicMethodFiltering |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 8254,
"end": 8638
} | class ____(PackageTemplate):
"""Provides appropriate overrides for QMake-based packages"""
base_class_name = "QMakePackage"
package_class_import = "from spack_repo.builtin.build_systems.qmake import QMakePackage"
body_def = """\
def qmake_args(self):
# FIXME: If not needed delete this function
args = []
return args"""
| QMakePackageTemplate |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 301315,
"end": 301520
} | class ____(VegaLiteSchema):
"""DataFormat schema wrapper."""
_schema = {"$ref": "#/definitions/DataFormat"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| DataFormat |
python | huggingface__transformers | src/transformers/models/sam_hq/modular_sam_hq.py | {
"start": 20223,
"end": 31507
} | class ____(SamModel):
_keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"]
def __init__(self, config):
super().__init__(config)
self.vision_encoder = SamHQVisionEncoder(config.vision_config)
self.mask_decoder = SamHQMaskDecoder(config.mask_decoder_config)
self.post_init()
@torch.no_grad()
def get_image_embeddings(
self,
pixel_values,
):
r"""
Returns the image embeddings by passing the pixel values through the vision encoder.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Input pixel values
"""
vision_output = self.vision_encoder(pixel_values=pixel_values)
image_embeddings = vision_output[0]
intermediate_embeddings = vision_output[1]
return image_embeddings, intermediate_embeddings
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
input_points: Optional[torch.FloatTensor] = None,
input_labels: Optional[torch.LongTensor] = None,
input_boxes: Optional[torch.FloatTensor] = None,
input_masks: Optional[torch.LongTensor] = None,
image_embeddings: Optional[torch.FloatTensor] = None,
multimask_output: bool = True,
hq_token_only: bool = False,
attention_similarity: Optional[torch.FloatTensor] = None,
target_embedding: Optional[torch.FloatTensor] = None,
intermediate_embeddings: Optional[list[torch.FloatTensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> list[dict[str, torch.Tensor]]:
r"""
input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):
Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much
better results. The points can be obtained by passing a list of list of list to the processor that will
create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the
second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict
per input point), the third dimension is the number of points per segmentation mask (it is possible to pass
multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)
coordinates of the point. If a different number of points is passed either for each image, or for each
mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the
computation of the embedding will be skipped for these points using the labels.
input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):
Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the
official implementation, there are 3 types of labels
- `1`: the point is a point that contains the object of interest
- `0`: the point is a point that does not contain the object of interest
- `-1`: the point corresponds to the background
We added the label:
- `-10`: the point is a padding point, thus should be ignored by the prompt encoder
The padding labels should be automatically done by the processor.
input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):
Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to
much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,
that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch
size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.
In the order (`x1`, `y1`, `x2`, `y2`):
- `x1`: the x coordinate of the top left point of the input box
- `y1`: the y coordinate of the top left point of the input box
- `x2`: the x coordinate of the bottom right point of the input box
- `y2`: the y coordinate of the bottom right point of the input box
input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):
SAM_HQ model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to
generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be
manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):
Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory
efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`
method, and then feed them to the `forward` method instead of feeding the `pixel_values`.
multimask_output (`bool`, *optional*):
In the original implementation and paper, the model always outputs 3 masks per image (or per point / per
bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the
"best" mask, by specifying `multimask_output=False`.
hq_token_only (`bool`, *optional*, defaults to `False`):
Whether to use only the HQ token path for mask generation. When False, combines both standard and HQ paths.
This is specific to SAM-HQ's architecture.
attention_similarity (`torch.FloatTensor`, *optional*):
Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the
model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
target_embedding (`torch.FloatTensor`, *optional*):
Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case
the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).
intermediate_embeddings (`List[torch.FloatTensor]`, *optional*):
Intermediate embeddings from vision encoder's non-windowed blocks, used by SAM-HQ for enhanced mask quality.
Required when providing pre-computed image_embeddings instead of pixel_values.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoModel, AutoProcessor
>>> model = AutoModel.from_pretrained("sushmanth/sam_hq_vit_b")
>>> processor = AutoProcessor.from_pretrained("sushmanth/sam_hq_vit_b")
>>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png"
>>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
>>> input_points = [[[400, 650]]] # 2D location of a window on the car
>>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt")
>>> # Get high-quality segmentation mask
>>> outputs = model(**inputs)
>>> # For high-quality mask only
>>> outputs = model(**inputs, hq_token_only=True)
>>> # Postprocess masks
>>> masks = processor.post_process_masks(
... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
... )
```
"""
if pixel_values is None and image_embeddings is None:
raise ValueError("Either pixel_values or image_embeddings must be provided.")
if pixel_values is not None and image_embeddings is not None:
raise ValueError("Only one of pixel_values and image_embeddings can be provided.")
if input_points is not None and len(input_points.shape) != 4:
raise ValueError(
"The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`."
f" got {input_points.shape}."
)
if input_boxes is not None and len(input_boxes.shape) != 3:
raise ValueError(
"The input_boxes must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`."
f" got {input_boxes.shape}."
)
# Add validation for point and box batch sizes
if input_points is not None and input_boxes is not None:
point_batch_size = input_points.shape[1]
box_batch_size = input_boxes.shape[1]
if point_batch_size != box_batch_size:
raise ValueError(
f"You should provide as many bounding boxes as input points per box. Got {point_batch_size} and {box_batch_size}."
)
image_positional_embeddings = self.get_image_wide_positional_embeddings()
# repeat with batch size
batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0]
image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)
if pixel_values is not None:
vision_outputs = self.vision_encoder(pixel_values, **kwargs)
image_embeddings = vision_outputs.last_hidden_state
intermediate_embeddings = vision_outputs.intermediate_embeddings
if input_points is not None and input_labels is None:
input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)
sparse_embeddings, dense_embeddings = self.prompt_encoder(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
input_masks=input_masks,
)
# Predict masks
mask_decoder_output = self.mask_decoder(
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
hq_token_only=hq_token_only,
intermediate_embeddings=intermediate_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
)
return SamHQImageSegmentationOutput(
iou_scores=mask_decoder_output[1],
pred_masks=mask_decoder_output[0],
vision_hidden_states=vision_outputs.hidden_states if pixel_values is not None else None,
vision_attentions=vision_outputs.attentions if pixel_values is not None else None,
)
__all__ = [
"SamHQVisionConfig",
"SamHQMaskDecoderConfig",
"SamHQPromptEncoderConfig",
"SamHQConfig",
"SamHQModel",
"SamHQPreTrainedModel",
"SamHQVisionModel",
]
| SamHQModel |
python | doocs__leetcode | solution/3300-3399/3378.Count Connected Components in LCM Graph/Solution2.py | {
"start": 0,
"end": 743
} | class ____:
def dfs(self, node, adj, vis):
if vis[node]:
return
vis[node] = True
for neighbor in adj[node]:
self.dfs(neighbor, adj, vis)
def countComponents(self, nums, threshold):
adj = [[] for _ in range(threshold + 1)]
vis = [False] * (threshold + 1)
ans = 0
for num in nums:
if num > threshold:
ans += 1
continue
for j in range(2 * num, threshold + 1, num):
adj[num].append(j)
adj[j].append(num)
for num in nums:
if num <= threshold and not vis[num]:
self.dfs(num, adj, vis)
ans += 1
return ans
| Solution |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py | {
"start": 1708,
"end": 2412
} | class ____(StrictBaseModel):
"""Schema for updating TaskInstance to 'RUNNING' state with minimal required fields."""
state: Annotated[
Literal[TIState.RUNNING],
# Specify a default in the schema, but not in code.
WithJsonSchema({"type": "string", "enum": [TIState.RUNNING], "default": TIState.RUNNING}),
]
hostname: str
"""Hostname where this task has started"""
unixname: str
"""Local username of the process where this task has started"""
pid: int
"""Process Identifier on `hostname`"""
start_date: UtcDateTime
"""When the task started executing"""
# Create an enum to give a nice name in the generated datamodels
| TIEnterRunningPayload |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 164246,
"end": 168577
} | class ____:
def __init__(self, s: str) -> None:
self.s = s
def __repr__(self) -> str:
return self.s
warn_once_cache: set[str] = set()
def warn_once(msg: str, stacklevel: int = 1) -> None:
# Dynamo causes all warnings.warn (in user code and in Dynamo code) to print all the time.
# https://github.com/pytorch/pytorch/issues/128427.
# warn_once is a workaround: if the msg has been warned on before, then we will not
# warn again.
# NB: it's totally ok to store a cache of all the strings: this is what warnings.warn does as well.
if msg in warn_once_cache:
return
warn_once_cache.add(msg)
warnings.warn(msg, stacklevel=stacklevel + 1)
def strip_color_from_string(text: str) -> str:
# This regular expression matches ANSI escape codes
ansi_escape = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]")
return ansi_escape.sub("", text)
@contextlib.contextmanager
def _disable_saved_tensors_hooks_during_tracing() -> Generator[None, None, None]:
# See NOTE: [Deferring tensor pack/unpack hooks until runtime]
try:
prior = torch._C._autograd._saved_tensors_hooks_set_tracing(True)
yield
finally:
torch._C._autograd._saved_tensors_hooks_set_tracing(prior)
def is_parameter_freezing() -> bool:
return torch._inductor.config.freezing and not torch.is_grad_enabled()
def get_torch_function_mode_stack() -> list[Any]:
return [
get_torch_function_mode_stack_at(i) for i in range(_len_torch_function_stack())
]
def get_torch_function_mode_stack_at(ind: int) -> Any:
assert ind < _len_torch_function_stack() and ind >= 0
return torch._C._get_function_stack_at(ind)
def set_torch_function_mode_stack(stack: list[Any]) -> None:
for _ in range(_len_torch_function_stack()):
_pop_torch_function_stack()
for mode in stack:
_push_on_torch_function_stack(mode)
def clear_torch_function_mode_stack() -> None:
for _ in range(_len_torch_function_stack()):
_pop_torch_function_stack()
def get_current_stream(device: torch.device) -> torch.Stream:
return torch.accelerator.current_stream(device)
# call from C dynamo in order to inspect values in pdb
def _breakpoint_for_c_dynamo(*args: Any) -> None:
breakpoint()
def verify_guard_fn_signature(value: Any) -> None:
fn = value.__metadata_guard__
sig = inspect.signature(fn)
if len(sig.parameters) != 2:
from .exc import InternalTorchDynamoError
raise InternalTorchDynamoError(
"Tensor subclass method __metadata_guard__ must take exactly two subclass metadata arguments"
)
if fn.__self__ != value.__class__:
from .exc import InternalTorchDynamoError
raise InternalTorchDynamoError(
"Tensor subclass method __metadata_guard__ must be a classmethod"
)
def does_not_override_dict_iter_methods(user_cls: Any) -> bool:
return (
user_cls.items in (dict.items, OrderedDict.items)
and user_cls.values in (dict.values, OrderedDict.values)
and user_cls.keys in (dict.keys, OrderedDict.keys)
and user_cls.__iter__ in (dict.__iter__, OrderedDict.__iter__)
)
# Helper functions below are to prevent TorchDynamo to prevent tracing of
# __torch_function__ calls triggered on tensor properties in the pre graph
# bytecode.
@torch._disable_dynamo
def call_size(x: Any, i: int) -> int:
return x.size(i)
@torch._disable_dynamo
def call_stride(x: Any, i: int) -> int:
return x.stride(i)
@torch._disable_dynamo
def call_storage_offset(x: Any) -> int:
return x.storage_offset()
# Helper function to extract relevant parts of a tensor's __dict__ to store in node meta.
# To avoid ref cycles, it's important that no tensors are present here, so leave those out.
def _extract_tensor_dict(t: torch.Tensor) -> dict[str, Any]:
KEYS_TO_COPY = [
"_dynamo_static_input_type",
"tag",
]
tensor_dict = {
key: copy.copy(t.__dict__[key]) for key in KEYS_TO_COPY if key in t.__dict__
}
return tensor_dict
def build_stream(args: tuple[Any], kwargs: dict[Any, Any]) -> torch.Stream:
return torch._C.Stream(*args, **kwargs)
def build_event(args: tuple[Any], kwargs: dict[Any, Any]) -> torch.Event:
return torch._C.Event(*args, **kwargs)
| Lit |
python | kamyu104__LeetCode-Solutions | Python/maximum-product-after-k-increments.py | {
"start": 667,
"end": 1308
} | class ____(object):
def maximumProduct(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9+7
cnt = collections.Counter(nums)
min_num = min(cnt.iterkeys())
while k:
c = min(cnt[min_num], k)
cnt[min_num] -= c
cnt[min_num+1] += c
if not cnt[min_num]:
del cnt[min_num]
min_num += 1
k -= c
return reduce(lambda total, x: total*pow(x[0], x[1], MOD)%MOD, cnt.iteritems(), 1)
# Time: O(n + klogn)
# Space: O(1)
import heapq
# heap
| Solution2 |
python | paramiko__paramiko | paramiko/_winapi.py | {
"start": 7400,
"end": 7554
} | class ____(ctypes.Structure):
num = 1
_fields_ = [
("SID", ctypes.c_void_p),
("ATTRIBUTES", ctypes.wintypes.DWORD),
]
| TOKEN_USER |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 6888,
"end": 7055
} | class ____(desc_type):
"""Node for a "returns" annotation (a la -> in Python)."""
def astext(self) -> str:
return ' -> ' + super().astext()
| desc_returns |
python | pypa__setuptools | setuptools/_vendor/wheel/vendored/packaging/_parser.py | {
"start": 746,
"end": 1148
} | class ____(Node):
def serialize(self) -> str:
return str(self)
MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
# mypy does not support recursive type definition
# https://github.com/python/mypy/issues/731
MarkerAtom = Any
MarkerList = List[Any]
| Op |
python | pypa__pipenv | pipenv/patched/pip/_internal/metadata/pkg_resources.py | {
"start": 2359,
"end": 8539
} | class ____(BaseDistribution):
def __init__(self, dist: pkg_resources.Distribution) -> None:
self._dist = dist
# This is populated lazily, to avoid loading metadata for all possible
# distributions eagerly.
self.__extra_mapping: Optional[Mapping[NormalizedName, str]] = None
@property
def _extra_mapping(self) -> Mapping[NormalizedName, str]:
if self.__extra_mapping is None:
self.__extra_mapping = {
canonicalize_name(extra): extra for extra in self._dist.extras
}
return self.__extra_mapping
@classmethod
def from_directory(cls, directory: str) -> BaseDistribution:
dist_dir = directory.rstrip(os.sep)
# Build a PathMetadata object, from path to metadata. :wink:
base_dir, dist_dir_name = os.path.split(dist_dir)
metadata = pkg_resources.PathMetadata(base_dir, dist_dir)
# Determine the correct Distribution object type.
if dist_dir.endswith(".egg-info"):
dist_cls = pkg_resources.Distribution
dist_name = os.path.splitext(dist_dir_name)[0]
else:
assert dist_dir.endswith(".dist-info")
dist_cls = pkg_resources.DistInfoDistribution
dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0]
dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata)
return cls(dist)
@classmethod
def from_metadata_file_contents(
cls,
metadata_contents: bytes,
filename: str,
project_name: str,
) -> BaseDistribution:
metadata_dict = {
"METADATA": metadata_contents,
}
dist = pkg_resources.DistInfoDistribution(
location=filename,
metadata=InMemoryMetadata(metadata_dict, filename),
project_name=project_name,
)
return cls(dist)
@classmethod
def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
try:
with wheel.as_zipfile() as zf:
info_dir, _ = parse_wheel(zf, name)
metadata_dict = {
path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path)
for path in zf.namelist()
if path.startswith(f"{info_dir}/")
}
except zipfile.BadZipFile as e:
raise InvalidWheel(wheel.location, name) from e
except UnsupportedWheel as e:
raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
dist = pkg_resources.DistInfoDistribution(
location=wheel.location,
metadata=InMemoryMetadata(metadata_dict, wheel.location),
project_name=name,
)
return cls(dist)
@property
def location(self) -> Optional[str]:
return self._dist.location
@property
def installed_location(self) -> Optional[str]:
egg_link = egg_link_path_from_location(self.raw_name)
if egg_link:
location = egg_link
elif self.location:
location = self.location
else:
return None
return normalize_path(location)
@property
def info_location(self) -> Optional[str]:
return self._dist.egg_info
@property
def installed_by_distutils(self) -> bool:
# A distutils-installed distribution is provided by FileMetadata. This
# provider has a "path" attribute not present anywhere else. Not the
# best introspection logic, but pip has been doing this for a long time.
try:
return bool(self._dist._provider.path)
except AttributeError:
return False
@property
def canonical_name(self) -> NormalizedName:
return canonicalize_name(self._dist.project_name)
@property
def version(self) -> Version:
return parse_version(self._dist.version)
@property
def raw_version(self) -> str:
return self._dist.version
def is_file(self, path: InfoPath) -> bool:
return self._dist.has_metadata(str(path))
def iter_distutils_script_names(self) -> Iterator[str]:
yield from self._dist.metadata_listdir("scripts")
def read_text(self, path: InfoPath) -> str:
name = str(path)
if not self._dist.has_metadata(name):
raise FileNotFoundError(name)
content = self._dist.get_metadata(name)
if content is None:
raise NoneMetadataError(self, name)
return content
def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
for group, entries in self._dist.get_entry_map().items():
for name, entry_point in entries.items():
name, _, value = str(entry_point).partition("=")
yield EntryPoint(name=name.strip(), value=value.strip(), group=group)
def _metadata_impl(self) -> email.message.Message:
"""
:raises NoneMetadataError: if the distribution reports `has_metadata()`
True but `get_metadata()` returns None.
"""
if isinstance(self._dist, pkg_resources.DistInfoDistribution):
metadata_name = "METADATA"
else:
metadata_name = "PKG-INFO"
try:
metadata = self.read_text(metadata_name)
except FileNotFoundError:
if self.location:
displaying_path = display_path(self.location)
else:
displaying_path = repr(self.location)
logger.warning("No metadata found in %s", displaying_path)
metadata = ""
feed_parser = email.parser.FeedParser()
feed_parser.feed(metadata)
return feed_parser.close()
def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
if extras:
relevant_extras = set(self._extra_mapping) & set(
map(canonicalize_name, extras)
)
extras = [self._extra_mapping[extra] for extra in relevant_extras]
return self._dist.requires(extras)
def iter_provided_extras(self) -> Iterable[NormalizedName]:
return self._extra_mapping.keys()
| Distribution |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 15151,
"end": 15468
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = SqueezeBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@auto_docstring
| SqueezeBertOnlyMLMHead |
python | django__django | tests/generic_relations/models.py | {
"start": 1272,
"end": 1839
} | class ____(AbstractComparison):
"""
A model that tests having multiple GenericForeignKeys. One is defined
through an inherited abstract model and one defined directly on this class.
"""
content_type2 = models.ForeignKey(
ContentType, models.CASCADE, related_name="comparative2_set"
)
object_id2 = models.PositiveIntegerField()
other_obj = GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
| Comparison |
python | kamyu104__LeetCode-Solutions | Python/number-of-smooth-descent-periods-of-a-stock.py | {
"start": 29,
"end": 396
} | class ____(object):
def getDescentPeriods(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
result = l = 0
for i in xrange(len(prices)):
l += 1
if i+1 == len(prices) or prices[i]-1 != prices[i+1]:
result += l*(l+1)//2
l = 0
return result
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 79808,
"end": 91059
} | class ____(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = "each"
def _fixture(self, label=True, polymorphic=False):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
stmt = (
select(func.max(addresses.c.email_address))
.where(addresses.c.user_id == users.c.id)
.correlate(users)
)
if label:
stmt = stmt.label("email_ad")
else:
stmt = stmt.scalar_subquery()
self.mapper_registry.map_imperatively(
User,
users,
properties={"ead": column_property(stmt)},
with_polymorphic="*" if polymorphic else None,
)
self.mapper_registry.map_imperatively(Address, addresses)
def _func_fixture(self, label=False):
User = self.classes.User
users = self.tables.users
if label:
self.mapper_registry.map_imperatively(
User,
users,
properties={
"foobar": column_property(
func.foob(users.c.name).label(None)
)
},
)
else:
self.mapper_registry.map_imperatively(
User,
users,
properties={
"foobar": column_property(func.foob(users.c.name))
},
)
def test_anon_label_function_auto(self):
self._func_fixture()
User = self.classes.User
s = fixture_session()
u1 = aliased(User)
self.assert_compile(
s.query(User.foobar, u1.foobar),
"SELECT foob(users.name) AS foob_1, foob(users_1.name) AS foob_2 "
"FROM users, users AS users_1",
)
def test_anon_label_function_manual(self):
self._func_fixture(label=True)
User = self.classes.User
s = fixture_session()
u1 = aliased(User)
self.assert_compile(
s.query(User.foobar, u1.foobar),
"SELECT foob(users.name) AS foob_1, foob(users_1.name) AS foob_2 "
"FROM users, users AS users_1",
)
def test_anon_label_ad_hoc_labeling(self):
self._func_fixture()
User = self.classes.User
s = fixture_session()
u1 = aliased(User)
self.assert_compile(
s.query(User.foobar.label("x"), u1.foobar.label("y")),
"SELECT foob(users.name) AS x, foob(users_1.name) AS y "
"FROM users, users AS users_1",
)
def test_order_by_column_prop_string(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = fixture_session()
q = s.query(User).order_by("email_ad")
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id) AS email_ad, "
"users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY email_ad",
)
def test_order_by_column_prop_aliased_string(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = fixture_session()
ua = aliased(User)
q = s.query(ua).order_by("email_ad")
assert_raises_message(
sa.exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY",
q.set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
).statement.compile,
)
def test_order_by_column_labeled_prop_attr_aliased_one(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = fixture_session()
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_labeled_prop_attr_aliased_two(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = fixture_session()
q = s.query(ua.ead).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, "
"users AS users_1 WHERE addresses.user_id = users_1.id) "
"AS anon_1 ORDER BY anon_1",
)
# we're also testing that the state of "ua" is OK after the
# previous call, so the batching into one test is intentional
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_labeled_prop_attr_aliased_three(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = fixture_session()
q = s.query(User.ead, ua.ead).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users WHERE addresses.user_id = users.id) "
"AS email_ad, (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users AS users_1 WHERE addresses.user_id = "
"users_1.id) AS anon_1 ORDER BY email_ad, anon_1",
)
q = s.query(User, ua).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users.id) AS "
"email_ad, users.id AS users_id, users.name AS users_name, "
"(SELECT max(addresses.email_address) AS max_1 FROM addresses "
"WHERE addresses.user_id = users_1.id) AS anon_1, users_1.id "
"AS users_1_id, users_1.name AS users_1_name FROM users, "
"users AS users_1 ORDER BY email_ad, anon_1",
)
def test_order_by_column_labeled_prop_attr_aliased_four(self):
User = self.classes.User
self._fixture(label=True, polymorphic=True)
ua = aliased(User)
s = fixture_session()
q = s.query(ua, User.id).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 FROM "
"addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name, "
"users.id AS users_id FROM users AS users_1, "
"users ORDER BY anon_1",
)
def test_order_by_column_unlabeled_prop_attr_aliased_one(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = fixture_session()
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_unlabeled_prop_attr_aliased_two(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = fixture_session()
q = s.query(ua.ead).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, "
"users AS users_1 WHERE addresses.user_id = users_1.id) "
"AS anon_1 ORDER BY anon_1",
)
# we're also testing that the state of "ua" is OK after the
# previous call, so the batching into one test is intentional
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_unlabeled_prop_attr_aliased_three(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = fixture_session()
q = s.query(User.ead, ua.ead).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users WHERE addresses.user_id = users.id) "
"AS anon_1, (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users AS users_1 "
"WHERE addresses.user_id = users_1.id) AS anon_2 "
"ORDER BY anon_1, anon_2",
)
q = s.query(User, ua).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users.id) AS "
"anon_1, users.id AS users_id, users.name AS users_name, "
"(SELECT max(addresses.email_address) AS max_1 FROM addresses "
"WHERE addresses.user_id = users_1.id) AS anon_2, users_1.id "
"AS users_1_id, users_1.name AS users_1_name FROM users, "
"users AS users_1 ORDER BY anon_1, anon_2",
)
def test_order_by_column_prop_attr(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = fixture_session()
q = s.query(User).order_by(User.ead)
# this one is a bit of a surprise; this is compiler
# label-order-by logic kicking in, but won't work in more
# complex cases.
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id) AS email_ad, "
"users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY email_ad",
)
def test_order_by_column_prop_attr_non_present(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = fixture_session()
q = s.query(User).options(defer(User.ead)).order_by(User.ead)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY "
"(SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id)",
)
| ColumnPropertyTest |
python | huggingface__transformers | src/transformers/models/zamba/modeling_zamba.py | {
"start": 28809,
"end": 32069
} | class ____(nn.Module):
def __init__(self, config: ZambaConfig, layer_idx: Optional[int] = None):
super().__init__()
self.self_attn = ZambaAttention(config, layer_idx)
self.feed_forward = ZambaMLP(config)
self.input_layernorm = ZambaRMSNorm(config.attention_hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
original_hidden_states: torch.Tensor,
layer_idx: int,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[ZambaHybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
layer_idx=layer_idx,
attention_mask=attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
**kwargs,
)
# feed-forward (MLP)
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
| ZambaAttentionDecoderLayer |
python | pytorch__pytorch | torch/utils/bundled_inputs.py | {
"start": 306,
"end": 22698
} | class ____(NamedTuple):
"""Helper type for bundled inputs.
'value' is the compressed/deflated input that is stored in the model. Value
must be of the same type as the argument to the function that it is a deflated
input for.
'fmt' is a formatable code string that is executed to inflate the compressed data into
the appropriate input. It can use 'value' as an input to the format str. It must result
in a value of the same type as 'value'.
'fmt_fn' is a formatable function code string that is executed to inflate the compressed
data into the appropriate input. It must result in a value of the same type as 'value'.
The function name should be the formatable part of the string.
Note: Only top level InflatableArgs can be inflated. i.e. you cannot place
an inflatable arg inside of some other structure. You should instead create
an inflatable arg such that the fmt code string returns the full structure
of your input.
"""
value: Any
fmt: str = "{}"
fmt_fn: str = ""
def bundle_inputs(
model: torch.jit.ScriptModule,
inputs: Sequence[tuple[Any, ...]] | None | dict[Callable, Sequence[tuple[Any, ...]] | None],
info: list[str] | dict[Callable, list[str]] | None = None,
*,
_receive_inflate_expr: list[str] | None = None,
) -> torch.jit.ScriptModule:
"""Create and return a copy of the specified model with inputs attached.
The original model is not mutated or changed in any way.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
If inputs is passed in as a list then the inputs will be bundled for 'forward'.
If inputs is instead passed in as a map then all the methods specified in the map
will have their corresponding inputs bundled. Info should match watchever type is
chosen for the inputs.
The returned model will support the following methods:
`get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
If forward has bundled inputs then these following functions will also be defined on the returned module:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_<function_name>`.
If the user chooses this method inputs[<function>] should map to None
- The `inputs` argument to this function can be a dictionary mapping functions to a
list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>.
Alternatively if only bundling inputs for forward the map can be omitted and a singular list of inputs
can be provided instead.
The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
list of inputs, the inner tuple is the list of args that together make up one input.
For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
is the actual data that makes up the args, e.g. a tensor.
Info is an optional parameter that maps functions to a list of strings providing extra information about that
function's bundled inputs. Alternatively if only bundling inputs for forward the map can be omitted and
a singular list of information can be provided instead. This could be descriptions, expected outputs, etc.
- Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
This function will attempt to optimize arguments so that (e.g.)
arguments like `torch.zeros(1000)` will be represented compactly.
Only top-level arguments will be optimized.
Tensors in lists or tuples will not.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.") # noqa: TRY002
ignored_methods, ignored_attrs = _get_bundled_inputs_attributes_and_methods(model)
clone = torch._C._hack_do_not_use_clone_module_with_class( # type: ignore[attr-defined]
model._c,
ignored_methods,
ignored_attrs,
)
# The above cloning function returns a torch._C.scriptmodule and we need a torch.jit.scriptmodule.
# Fortunately there is a function in _recursive that does exactly that conversion.
cloned_module = wrap_cpp_module(clone)
if isinstance(inputs, dict):
if not isinstance(info, dict) and info is not None:
raise AssertionError("If inputs is a dict, info must be a dict or None")
augment_many_model_functions_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info)
else:
if not isinstance(info, list) and info is not None:
raise AssertionError("If inputs is a list, info must be a list or None")
augment_model_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info)
return cloned_module
def augment_model_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: Sequence[tuple[Any, ...]] | None = None,
_receive_inflate_expr: list[str] | None = None, # For debugging.
info: list[str] | None = None, # Optional argument to provide info about forward or its inputs
skip_size_check=False,
) -> None:
"""Add bundled sample inputs to a model for the forward function.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_forward`.
If the user chooses this method inputs should be None
- `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements
of each tuple are the args that make up one input.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.") # noqa: TRY002
forward: Callable = model.forward
# Sometimes forward won't have a name attached so just in case
if not hasattr(forward, "__name__"):
forward.__name__ = 'forward'
augment_many_model_functions_with_bundled_inputs(
model,
inputs={forward : inputs},
_receive_inflate_expr=_receive_inflate_expr,
info={forward : info} if info else None,
skip_size_check=skip_size_check,
)
def augment_many_model_functions_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: dict[Callable, Sequence[tuple[Any, ...]] | None],
_receive_inflate_expr: list[str] | None = None, # For debugging.
info: dict[Callable, list[str]] | None = None, # Optional argument to provide info about the function or its inputs
skip_size_check=False,
) -> None:
"""Add bundled sample inputs to a model for an arbitrary list of public functions.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
If forward has bundled inputs then these following functions are also defined:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_<function_name>`.
If the user chooses this method inputs[<function>] should map to None
- The `inputs` argument to this function can be a dictionary mapping functions to a
list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>.
The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
list of inputs, the inner tuple is the list of args that together make up one input.
For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
is the actual data that makes up the args, e.g. a tensor.
Info is an optional parameter that maps functions to a list of strings providing extra information about that
function's bundled inputs. This could be descriptions, expected outputs, etc.
- Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
This function will attempt to optimize arguments so that (e.g.)
arguments like `torch.zeros(1000)` will be represented compactly.
Only top-level arguments will be optimized.
Tensors in lists or tuples will not.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.") # noqa: TRY002
if not inputs:
raise Exception("Please provide inputs for at least 1 function") # noqa: TRY002
if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"):
raise Exception( # noqa: TRY002
"Models can only be augmented with bundled inputs once. "
"This Model seems to have already been augmented with "
"bundled inputs. Please start afresh with one that "
"doesn't have bundled inputs.",
)
get_bundled_inputs_functions_and_info_template = ""
for function, input_list in inputs.items():
if hasattr(function, "__name__"):
function_name = function.__name__
else:
if hasattr(function, "name"):
function_name = function.name # type: ignore[attr-defined]
else:
raise Exception( # noqa: TRY002
'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"')
if input_list is not None and not isinstance(input_list, Sequence):
raise TypeError(f"Error inputs for function {function_name} is not a Sequence")
function_arg_types = [arg.type for arg in function.schema.arguments[1:]] # type: ignore[attr-defined]
deflated_inputs_type: ListType = ListType(TupleType(function_arg_types))
model._c._register_attribute(f"_bundled_inputs_deflated_{function_name}", deflated_inputs_type, [])
if hasattr(model, "_generate_bundled_inputs_for_" + function_name):
if input_list is not None:
raise Exception( # noqa: TRY002
f"inputs[{function_name}] is not None, but _generate_bundled_inputs_for_{function_name} is already defined"
)
# Model author already defined _generate_bundled_inputs_for_<function_name>.
elif input_list is None or len(input_list) == 0:
raise Exception( # noqa: TRY002
f"inputs for {function_name} must be specified if "
f"_generate_bundled_inputs_for_{function_name} is not already defined"
)
else:
# Iterate over the inputs and args in each input.
# Accumulate `deflated_inputs` as (possibly) compressed values
# and `parts` to be joined into the expression that unpacks them.
deflated_inputs = []
parts = []
for inp_idx, args in enumerate(input_list):
if not isinstance(args, tuple) and not isinstance(args, list): # type: ignore[arg-type]
raise TypeError(
f"Error bundled input for function {function_name} idx: {inp_idx} is not a Tuple or a List"
)
deflated_args = []
parts.append("(")
for arg_idx, arg in enumerate(args):
inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name)
deflated, inflater, helper_definition = _inflate_expr(
arg,
f"deflated[{inp_idx}][{arg_idx}]",
inflate_helper_fn_name,
skip_size_check=skip_size_check,
)
deflated_args.append(deflated)
parts.append(f" {inflater},")
if helper_definition:
model.define(textwrap.dedent(helper_definition))
deflated_inputs.append(tuple(deflated_args))
parts.append("),")
parts.append("")
expr = "\n".join(parts)
# Back-channel return this expr for debugging.
if _receive_inflate_expr is not None:
_receive_inflate_expr.append(expr)
setattr(model, f"_bundled_inputs_deflated_{function_name}", deflated_inputs)
definition = textwrap.dedent("""
def _generate_bundled_inputs_for_{name}(self):
deflated = self._bundled_inputs_deflated_{name}
return [
{expr}
]
""").format(expr=expr, name=function_name)
model.define(definition)
# Define get_all_bundled_inputs_for_<function_name> that caches the generated inputs.
model.define(textwrap.dedent("""
def get_all_bundled_inputs_for_{name}(self):
all_inputs = self._generate_bundled_inputs_for_{name}()
assert all_inputs is not None
return all_inputs
""").format(name=function_name))
# Add to the high level helper methods
inputs_info = repr(info[function]) if info and function in info else '[]'
get_bundled_inputs_functions_and_info_template += f"""
temp_dict : Dict[str,List[str]] = {{}}
info: List[str] = {inputs_info}
temp_dict['info'] = info
temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}']
all_inputs['{function_name}'] = temp_dict
"""
# To ensure backwards compatibility and a streamlined api for forward these wrappers are provided
if function_name == 'forward':
model.define(textwrap.dedent("""
def get_all_bundled_inputs(self):
return self.get_all_bundled_inputs_for_forward()
"""))
model.define(textwrap.dedent("""
def get_num_bundled_inputs(self):
return len(self.get_all_bundled_inputs_for_forward())
"""))
# Define some high level helper methods that act on all bundled inputs
model.define(textwrap.dedent(f"""
def get_bundled_inputs_functions_and_info(self):
all_inputs : Dict[str, Dict[str,List[str]]] = {{}}
{get_bundled_inputs_functions_and_info_template}
return all_inputs
"""))
def _inflate_expr(
arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False
) -> tuple[T | torch.Tensor, str, str | None]:
# Allow custom inflation expressions any object.
# For example, calling custom image-decoding ops.
# Or just use "{}" as the format string to ignore size limits.
if isinstance(arg, InflatableArg):
if arg.fmt_fn:
if arg.fmt not in ["{}", ""]:
raise Exception( # noqa: TRY002
f"Bundled input argument at position '{ref}' has "
f"both arg.fmt_fn => \n{arg.fmt_fn} "
f"\n and arg.fmt => {arg.fmt}. "
"Please choose `arg.fmt` if the deflater is straightforward or "
"`arg.fmt_fn` if you need a function."
)
helper_definition = arg.fmt_fn.format(inflate_helper_fn_name)
expr = f"self.{inflate_helper_fn_name}({ref})"
return arg.value, expr, helper_definition
else:
return arg.value, arg.fmt.format(ref), None
if isinstance(arg, torch.Tensor):
# Small-storage tensors can just be saved directly.
if arg._typed_storage().size() <= MAX_RAW_TENSOR_SIZE or skip_size_check:
return arg, ref, None
# Small contiguous tensors can be cloned to have small storage.
# TODO: Should we do this even for non-contiguous tensors?
if arg.is_contiguous() and arg.numel() <= MAX_RAW_TENSOR_SIZE:
return arg.clone(), ref, None
# Example inputs commonly come from torch.zeros, torch.ones, or torch.full.
# These can be represented compactly.
for fmt in [torch.contiguous_format, torch.channels_last]:
if arg.is_contiguous(memory_format=fmt) and (arg == arg.flatten()[0]).all().item():
return (arg.flatten()[0].clone().expand(*arg.size()),
f"{ref}.contiguous(memory_format={fmt})", None)
# Prevent big tensors from being bundled by default.
# TODO: Provide more useful diagnostics.
raise Exception( # noqa: TRY002
f"Bundled input argument at position '{ref}' is "
f"a tensor with storage size {arg._typed_storage().size()}. "
f"You probably don't want to bundle this as an input. "
)
else:
return arg, ref, None
def _get_bundled_inputs_attributes_and_methods(script_module: torch.jit.ScriptModule) -> tuple[list[str], list[str]]:
methods: list[str] = []
attributes: list[str] = []
# Has bundled inputs for forward
if hasattr(script_module, 'get_all_bundled_inputs'):
methods.append('get_all_bundled_inputs')
methods.append('get_num_bundled_inputs')
methods.append('run_on_bundled_input')
if hasattr(script_module, 'get_bundled_inputs_functions_and_info'):
methods.append('get_bundled_inputs_functions_and_info')
all_info = script_module.get_bundled_inputs_functions_and_info()
for function_name in all_info:
methods.append("get_all_bundled_inputs_for_" + function_name)
methods.append("_generate_bundled_inputs_for_" + function_name)
attributes.append("_bundled_inputs_deflated_" + function_name)
bundled_inputs_fn = getattr(
script_module,
f"get_all_bundled_inputs_for_{function_name}"
)
num_bundled_inputs: int = len(bundled_inputs_fn())
# Check inflate helper functions for each function, argument and bundled input
func = getattr(script_module, function_name)
for arg_idx in range(len(func.schema.arguments) - 1):
for input_idx in range(num_bundled_inputs):
helper_fn_name = _get_inflate_helper_fn_name(
arg_idx=arg_idx,
input_idx=input_idx,
function_name=function_name
)
# if the arg has an InflatableArg with fmt_fn, add the helper function name
if hasattr(script_module, helper_fn_name):
methods.append(helper_fn_name)
return (methods, attributes)
def _get_inflate_helper_fn_name(
arg_idx: int,
input_idx: int,
function_name: str,
) -> str:
return f"_inflate_helper_for_{function_name}_input_{input_idx}_arg_{arg_idx}"
def bundle_randn(*size, dtype=None):
"""Generate a tensor that will be inflated with torch.randn."""
stub = torch.zeros(1, dtype=dtype).expand(*size)
return InflatableArg(value=stub, fmt="torch.randn_like({})")
def bundle_large_tensor(t):
"""Wrap a tensor to allow bundling regardless of size."""
return InflatableArg(value=t, fmt="{}")
| InflatableArg |
python | Lightning-AI__lightning | examples/pytorch/domain_templates/semantic_segmentation.py | {
"start": 5595,
"end": 7404
} | class ____(nn.Module):
"""Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation.
Link - https://arxiv.org/abs/1505.04597
>>> UNet(num_classes=2, num_layers=3) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
UNet(
(layers): ModuleList(
(0): DoubleConv(...)
(1): Down(...)
(2): Down(...)
(3): Up(...)
(4): Up(...)
(5): Conv2d(64, 2, kernel_size=(1, 1), stride=(1, 1))
)
)
"""
def __init__(self, num_classes: int = 19, num_layers: int = 5, features_start: int = 64, bilinear: bool = False):
"""
Args:
num_classes: Number of output classes required (default 19 for KITTI dataset)
num_layers: Number of layers in each side of U-net
features_start: Number of features in first layer
bilinear: Whether to use bilinear interpolation or transposed convolutions for upsampling.
"""
super().__init__()
self.num_layers = num_layers
layers = [DoubleConv(3, features_start)]
feats = features_start
for _ in range(num_layers - 1):
layers.append(Down(feats, feats * 2))
feats *= 2
for _ in range(num_layers - 1):
layers.append(Up(feats, feats // 2, bilinear))
feats //= 2
layers.append(nn.Conv2d(feats, num_classes, kernel_size=1))
self.layers = nn.ModuleList(layers)
def forward(self, x):
xi = [self.layers[0](x)]
# Down path
for layer in self.layers[1 : self.num_layers]:
xi.append(layer(xi[-1]))
# Up path
for i, layer in enumerate(self.layers[self.num_layers : -1]):
xi[-1] = layer(xi[-1], xi[-2 - i])
return self.layers[-1](xi[-1])
| UNet |
python | sympy__sympy | sympy/core/kind.py | {
"start": 1694,
"end": 3005
} | class ____(object, metaclass=KindMeta):
"""
Base class for kinds.
Kind of the object represents the mathematical classification that
the entity falls into. It is expected that functions and classes
recognize and filter the argument by its kind.
Kind of every object must be carefully selected so that it shows the
intention of design. Expressions may have different kind according
to the kind of its arguments. For example, arguments of ``Add``
must have common kind since addition is group operator, and the
resulting ``Add()`` has the same kind.
For the performance, each kind is as broad as possible and is not
based on set theory. For example, ``NumberKind`` includes not only
complex number but expression containing ``S.Infinity`` or ``S.NaN``
which are not strictly number.
Kind may have arguments as parameter. For example, ``MatrixKind()``
may be constructed with one element which represents the kind of its
elements.
``Kind`` behaves in singleton-like fashion. Same signature will
return the same object.
"""
def __new__(cls, *args):
if args in cls._inst:
inst = cls._inst[args]
else:
inst = super().__new__(cls)
cls._inst[args] = inst
return inst
| Kind |
python | instagram__MonkeyType | demo/models.py | {
"start": 830,
"end": 934
} | class ____(enum.Enum):
COMMENTED = "commented"
FOLLOWED = "followed"
LIKED = "liked"
| EventType |
python | ansible__ansible | lib/ansible/executor/stats.py | {
"start": 838,
"end": 3180
} | class ____:
""" holds stats about per-host activity during playbook runs """
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
self.rescued = {}
self.ignored = {}
# user defined stats, which can be per host or global
self.custom = {}
def increment(self, what, host):
""" helper function to bump a statistic """
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev + 1
def decrement(self, what, host):
_what = getattr(self, what)
try:
if _what[host] - 1 < 0:
# This should never happen, but let's be safe
raise KeyError("Don't be so negative")
_what[host] -= 1
except KeyError:
_what[host] = 0
def summarize(self, host):
""" return information about a particular host """
return dict(
ok=self.ok.get(host, 0),
failures=self.failures.get(host, 0),
unreachable=self.dark.get(host, 0),
changed=self.changed.get(host, 0),
skipped=self.skipped.get(host, 0),
rescued=self.rescued.get(host, 0),
ignored=self.ignored.get(host, 0),
)
def set_custom_stats(self, which, what, host=None):
""" allow setting of a custom stat"""
if host is None:
host = '_run'
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what
def update_custom_stats(self, which, what, host=None):
""" allow aggregation of a custom stat"""
if host is None:
host = '_run'
if host not in self.custom or which not in self.custom[host]:
return self.set_custom_stats(which, what, host)
# mismatching types
if not isinstance(what, type(self.custom[host][which])):
return None
if isinstance(what, MutableMapping):
self.custom[host][which] = merge_hash(self.custom[host][which], what)
else:
# let overloaded + take care of other types
self.custom[host][which] += what
| AggregateStats |
python | google__jax | tests/linalg_test.py | {
"start": 4976,
"end": 56229
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(
shape=[(1, 1), (4, 4), (2, 5, 5), (200, 200), (1000, 0, 0)],
dtype=float_types + complex_types,
upper=[True, False]
)
def testCholesky(self, shape, dtype, upper):
rng = jtu.rand_default(self.rng())
def args_maker():
factor_shape = shape[:-1] + (2 * shape[-1],)
a = rng(factor_shape, dtype)
return [np.matmul(a, jnp.conj(T(a)))]
np_fun = partial(np.linalg.cholesky, upper=upper)
jnp_fun = partial(jnp.linalg.cholesky, upper=upper, symmetrize_input=True)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=1e-3)
self._CompileAndCheck(jnp_fun, args_maker)
if jnp.finfo(dtype).bits == 64:
jtu.check_grads(jnp.linalg.cholesky, args_maker(), order=2)
def testCholeskyGradPrecision(self):
rng = jtu.rand_default(self.rng())
a = rng((3, 3), np.float32)
a = np.dot(a, a.T)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, jnp.linalg.cholesky), (a,), (a,))
@jtu.sample_product(
n=[0, 2, 3, 4, 5, 25], # TODO(mattjj): complex64 unstable on large sizes?
dtype=float_types + complex_types,
)
def testDet(self, n, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((n, n), dtype)]
self._CheckAgainstNumpy(np.linalg.det, jnp.linalg.det, args_maker, tol=1e-3)
self._CompileAndCheck(jnp.linalg.det, args_maker,
rtol={np.float64: 1e-13, np.complex128: 1e-13})
def testDetOfSingularMatrix(self):
x = jnp.array([[-1., 3./2], [2./3, -1.]], dtype=np.float32)
self.assertAllClose(np.float32(0), jsp.linalg.det(x))
@jtu.sample_product(
shape=[(1, 1), (2, 2), (3, 3), (2, 2, 2), (2, 3, 3), (2, 4, 4), (5, 7, 7)],
dtype=float_types,
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
@jtu.skip_on_devices("tpu")
def testDetGrad(self, shape, dtype):
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
jtu.check_grads(jnp.linalg.det, (a,), 2, atol=1e-1, rtol=1e-1)
# make sure there are no NaNs when a matrix is zero
if len(shape) == 2:
jtu.check_grads(
jnp.linalg.det, (jnp.zeros_like(a),), 1, atol=1e-1, rtol=1e-1)
else:
a[0] = 0
jtu.check_grads(jnp.linalg.det, (a,), 1, atol=1e-1, rtol=1e-1)
def testDetGradIssue6121(self):
f = lambda x: jnp.linalg.det(x).sum()
x = jnp.ones((16, 1, 1))
jax.grad(f)(x)
jtu.check_grads(f, (x,), 2, atol=1e-1, rtol=1e-1)
def testDetGradOfSingularMatrixCorank1(self):
# Rank 2 matrix with nonzero gradient
a = jnp.array([[ 50, -30, 45],
[-30, 90, -81],
[ 45, -81, 81]], dtype=jnp.float32)
jtu.check_grads(jnp.linalg.det, (a,), 1, atol=1e-1, rtol=1e-1)
# TODO(phawkins): Test sometimes produces NaNs on TPU.
@jtu.skip_on_devices("tpu")
def testDetGradOfSingularMatrixCorank2(self):
# Rank 1 matrix with zero gradient
b = jnp.array([[ 36, -42, 18],
[-42, 49, -21],
[ 18, -21, 9]], dtype=jnp.float32)
jtu.check_grads(jnp.linalg.det, (b,), 1, atol=1e-1, rtol=1e-1, eps=1e-1)
@jtu.sample_product(
m=[1, 5, 7, 23],
nq=zip([2, 4, 6, 36], [(1, 2), (2, 2), (1, 2, 3), (3, 3, 1, 4)]),
dtype=float_types,
)
def testTensorsolve(self, m, nq, dtype):
rng = jtu.rand_default(self.rng())
# According to numpy docs the shapes are as follows:
# Coefficient tensor (a), of shape b.shape + Q.
# And prod(Q) == prod(b.shape)
# Therefore, n = prod(q)
n, q = nq
b_shape = (n, m)
# To accomplish prod(Q) == prod(b.shape) we append the m extra dim
# to Q shape
Q = q + (m,)
args_maker = lambda: [
rng(b_shape + Q, dtype), # = a
rng(b_shape, dtype)] # = b
a, b = args_maker()
result = jnp.linalg.tensorsolve(*args_maker())
self.assertEqual(result.shape, Q)
self._CheckAgainstNumpy(np.linalg.tensorsolve,
jnp.linalg.tensorsolve, args_maker,
tol={np.float32: 1e-2, np.float64: 1e-3})
self._CompileAndCheck(jnp.linalg.tensorsolve,
args_maker,
rtol={np.float64: 1e-13})
def testTensorsolveAxes(self):
a_shape = (2, 1, 3, 6)
b_shape = (1, 6)
axes = (0, 2)
dtype = "float32"
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
np_fun = partial(np.linalg.tensorsolve, axes=axes)
jnp_fun = partial(jnp.linalg.tensorsolve, axes=axes)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
[dict(dtype=dtype, method=method)
for dtype in float_types + complex_types
for method in (["lu"] if jnp.issubdtype(dtype, jnp.complexfloating)
else ["lu", "qr"])
],
shape=[(0, 0), (1, 1), (3, 3), (4, 4), (10, 10), (200, 200), (2, 2, 2),
(2, 3, 3), (3, 2, 2)],
)
@jtu.ignore_warning(message="(divide by zero|overflow|invalid value)", category=RuntimeWarning)
def testSlogdet(self, shape, dtype, method):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
slogdet = partial(jnp.linalg.slogdet, method=method)
self._CheckAgainstNumpy(np.linalg.slogdet, slogdet, args_maker,
tol=1e-3)
self._CompileAndCheck(slogdet, args_maker)
@jtu.sample_product(
shape=[(1, 1), (4, 4), (5, 5), (2, 7, 7)],
dtype=float_types + complex_types,
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testSlogdetGrad(self, shape, dtype):
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
jtu.check_grads(jnp.linalg.slogdet, (a,), 2, atol=1e-1, rtol=2e-1)
def testIssue1213(self):
for n in range(5):
mat = jnp.array([np.diag(np.ones([5], dtype=np.float32))*(-.01)] * 2)
args_maker = lambda: [mat]
self._CheckAgainstNumpy(np.linalg.slogdet, jnp.linalg.slogdet, args_maker,
tol=1e-3)
@jtu.sample_product(
shape=[(0, 0), (4, 4), (5, 5), (50, 50), (2, 6, 6)],
dtype=float_types + complex_types,
compute_left_eigenvectors=[False, True],
compute_right_eigenvectors=[False, True],
)
@jtu.run_on_devices("cpu", "gpu")
def testEig(self, shape, dtype, compute_left_eigenvectors,
compute_right_eigenvectors):
rng = jtu.rand_default(self.rng())
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * jnp.finfo(dtype).eps)
def check_right_eigenvectors(a, w, vr):
self.assertTrue(
np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100))
def check_left_eigenvectors(a, w, vl):
rank = len(a.shape)
aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2]))
wC = jnp.conj(w)
check_right_eigenvectors(aH, wC, vl)
a, = args_maker()
implementations = [None]
if (
jtu.is_device_cuda()
and not compute_left_eigenvectors
and cuda_versions
and cuda_versions.cusolver_get_version() >= 11701
):
implementations.append(jax.lax.linalg.EigImplementation.CUSOLVER)
for implementation in implementations:
results = lax.linalg.eig(
a, compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors,
implementation=implementation)
w = results[0]
if compute_left_eigenvectors:
check_left_eigenvectors(a, w, results[1])
if compute_right_eigenvectors:
check_right_eigenvectors(a, w, results[1 + compute_left_eigenvectors])
self._CompileAndCheck(partial(jnp.linalg.eig), args_maker, rtol=1e-3)
@jtu.sample_product(
shape=[(4, 4), (5, 5), (50, 50), (2, 6, 6)],
dtype=float_types + complex_types,
compute_left_eigenvectors=[False, True],
compute_right_eigenvectors=[False, True],
)
@jtu.run_on_devices("cpu", "gpu")
def testEigHandlesNanInputs(self, shape, dtype, compute_left_eigenvectors,
compute_right_eigenvectors):
"""Verifies that `eig` fails gracefully if given non-finite inputs."""
if jtu.is_device_cuda():
# TODO(phawkins): CUSOLVER's implementation does not pass this test.
implementation = jax.lax.linalg.EigImplementation.LAPACK
else:
implementation = None
a = jnp.full(shape, jnp.nan, dtype)
results = lax.linalg.eig(
a, compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors,
implementation=implementation)
for result in results:
self.assertTrue(np.all(np.isnan(result)))
@jtu.sample_product(
shape=[(4, 4), (5, 5), (8, 8), (7, 6, 6)],
dtype=float_types + complex_types,
)
@jtu.run_on_devices("cpu", "gpu")
def testEigvalsGrad(self, shape, dtype):
# This test sometimes fails for large matrices. I (@j-towns) suspect, but
# haven't checked, that might be because of perturbations causing the
# ordering of eigenvalues to change, which will trip up check_grads. So we
# just test on small-ish matrices.
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
tol = 1e-4 if dtype in (np.float64, np.complex128) else 1e-1
jtu.check_grads(lambda x: jnp.linalg.eigvals(x), (a,), order=1,
modes=['fwd', 'rev'], rtol=tol, atol=tol)
@jtu.sample_product(
shape=[(4, 4), (5, 5), (50, 50)],
dtype=float_types + complex_types,
)
@jtu.run_on_devices("cpu", "gpu")
def testEigvals(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
result = jnp.linalg.eig(a)
# Check that eig returns a namedtuple with the right fields
self.assertTrue(hasattr(result, 'eigenvalues'))
self.assertTrue(hasattr(result, 'eigenvectors'))
self.assertIs(result.eigenvalues, result[0])
self.assertIs(result.eigenvectors, result[1])
w1 = result.eigenvalues
w2 = jnp.linalg.eigvals(a)
self.assertAllClose(w1, w2, rtol={np.complex64: 1e-5, np.complex128: 2e-14})
@jtu.run_on_devices("cpu", "gpu")
def testEigvalsInf(self):
# https://github.com/jax-ml/jax/issues/2661
x = jnp.array([[jnp.inf]])
self.assertTrue(jnp.all(jnp.isnan(jnp.linalg.eigvals(x))))
@jtu.sample_product(
shape=[(1, 1), (4, 4), (5, 5)],
dtype=float_types + complex_types,
)
@jtu.run_on_devices("cpu", "gpu")
def testEigBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
shape = (10,) + shape
args = rng(shape, dtype)
ws, vs = vmap(jnp.linalg.eig)(args)
self.assertTrue(np.all(np.linalg.norm(
np.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@jtu.sample_product(
n=[0, 4, 5, 50, 512],
dtype=float_types + complex_types,
lower=[True, False],
)
def testEigh(self, n, dtype, lower):
rng = jtu.rand_default(self.rng())
eps = np.finfo(dtype).eps
args_maker = lambda: [rng((n, n), dtype)]
uplo = "L" if lower else "U"
a, = args_maker()
a = (a + np.conj(a.T)) / 2
w, v = jnp.linalg.eigh(np.tril(a) if lower else np.triu(a),
UPLO=uplo, symmetrize_input=False)
w = w.astype(v.dtype)
tol = 2 * n * eps
self.assertAllClose(
np.eye(n, dtype=v.dtype),
np.matmul(np.conj(T(v)), v),
atol=tol,
rtol=tol,
)
with jax.numpy_rank_promotion('allow'):
tol = 100 * eps
self.assertLessEqual(
np.linalg.norm(np.matmul(a, v) - w * v), tol * np.linalg.norm(a)
)
self._CompileAndCheck(
partial(jnp.linalg.eigh, UPLO=uplo), args_maker, rtol=eps
)
# Compare eigenvalues against Numpy using double precision. We do not compare
# eigenvectors because they are not uniquely defined, but the two checks above
# guarantee that that they satisfy the conditions for being eigenvectors.
double_type = dtype
if dtype == np.float32:
double_type = np.float64
if dtype == np.complex64:
double_type = np.complex128
w_np = np.linalg.eigvalsh(a.astype(double_type))
tol = 8 * eps
self.assertAllClose(
w_np.astype(w.dtype), w, atol=tol * np.linalg.norm(a), rtol=tol
)
@jax._src.config.explicit_x64_dtypes("allow")
@jtu.run_on_devices("gpu")
@unittest.skip("Needs a large amount of GPU memory, doesn't work in CI")
def testEighLargeMatrix(self):
# https://github.com/jax-ml/jax/issues/33062
n = 16384
A = jnp.eye(n, dtype=jnp.float64)
jax.block_until_ready(jax.lax.linalg.eigh(A))
@jtu.sample_product(
start=[0, 1, 63, 64, 65, 255],
end=[1, 63, 64, 65, 256],
)
@jtu.run_on_devices("tpu") # TODO(rmlarsen: enable on other devices)
def testEighSubsetByIndex(self, start, end):
if start >= end:
return
dtype = np.float32
n = 256
rng = jtu.rand_default(self.rng())
eps = np.finfo(dtype).eps
args_maker = lambda: [rng((n, n), dtype)]
subset_by_index = (start, end)
k = end - start
(a,) = args_maker()
a = (a + np.conj(a.T)) / 2
v, w = lax.linalg.eigh(
a, symmetrize_input=False, subset_by_index=subset_by_index
)
w = w.astype(v.dtype)
self.assertEqual(v.shape, (n, k))
self.assertEqual(w.shape, (k,))
with jax.numpy_rank_promotion("allow"):
tol = 200 * eps
self.assertLessEqual(
np.linalg.norm(np.matmul(a, v) - w * v), tol * np.linalg.norm(a)
)
tol = 3 * n * eps
self.assertAllClose(
np.eye(k, dtype=v.dtype),
np.matmul(np.conj(T(v)), v),
atol=tol,
rtol=tol,
)
self._CompileAndCheck(partial(jnp.linalg.eigh), args_maker, rtol=eps)
# Compare eigenvalues against Numpy. We do not compare eigenvectors because
# they are not uniquely defined, but the two checks above guarantee that
# that they satisfy the conditions for being eigenvectors.
double_type = dtype
if dtype == np.float32:
double_type = np.float64
if dtype == np.complex64:
double_type = np.complex128
w_np = np.linalg.eigvalsh(a.astype(double_type))[
subset_by_index[0] : subset_by_index[1]
]
tol = 20 * eps
self.assertAllClose(
w_np.astype(w.dtype), w, atol=tol * np.linalg.norm(a), rtol=tol
)
def testEighZeroDiagonal(self):
a = np.array([[0., -1., -1., 1.],
[-1., 0., 1., -1.],
[-1., 1., 0., -1.],
[1., -1., -1., 0.]], dtype=np.float32)
w, v = jnp.linalg.eigh(a)
w = w.astype(v.dtype)
eps = jnp.finfo(a.dtype).eps
with jax.numpy_rank_promotion('allow'):
self.assertLessEqual(
np.linalg.norm(np.matmul(a, v) - w * v), 2.5 * eps * np.linalg.norm(a)
)
def testEighTinyNorm(self):
rng = jtu.rand_default(self.rng())
a = rng((300, 300), dtype=np.float32)
eps = jnp.finfo(a.dtype).eps
a = eps * (a + np.conj(a.T))
w, v = jnp.linalg.eigh(a)
w = w.astype(v.dtype)
with jax.numpy_rank_promotion("allow"):
self.assertLessEqual(
np.linalg.norm(np.matmul(a, v) - w * v), 80 * eps * np.linalg.norm(a)
)
@jtu.sample_product(
rank=[1, 3, 299],
)
def testEighRankDeficient(self, rank):
rng = jtu.rand_default(self.rng())
eps = jnp.finfo(np.float32).eps
a = rng((300, rank), dtype=np.float32)
a = a @ np.conj(a.T)
w, v = jnp.linalg.eigh(a)
w = w.astype(v.dtype)
with jax.numpy_rank_promotion("allow"):
self.assertLessEqual(
np.linalg.norm(np.matmul(a, v) - w * v),
85 * eps * np.linalg.norm(a),
)
@jtu.sample_product(
n=[0, 4, 5, 50, 512],
dtype=float_types + complex_types,
lower=[True, False],
)
def testEighIdentity(self, n, dtype, lower):
tol = np.finfo(dtype).eps
uplo = "L" if lower else "U"
a = jnp.eye(n, dtype=dtype)
w, v = jnp.linalg.eigh(a, UPLO=uplo, symmetrize_input=False)
w = w.astype(v.dtype)
self.assertLessEqual(
np.linalg.norm(np.eye(n) - np.matmul(np.conj(T(v)), v)), tol
)
with jax.numpy_rank_promotion('allow'):
self.assertLessEqual(np.linalg.norm(np.matmul(a, v) - w * v),
tol * np.linalg.norm(a))
@jtu.sample_product(
shape=[(4, 4), (5, 5), (50, 50)],
dtype=float_types + complex_types,
)
def testEigvalsh(self, shape, dtype):
rng = jtu.rand_default(self.rng())
n = shape[-1]
def args_maker():
a = rng((n, n), dtype)
a = (a + np.conj(a.T)) / 2
return [a]
self._CheckAgainstNumpy(
np.linalg.eigvalsh, jnp.linalg.eigvalsh, args_maker, tol=2e-5
)
@jtu.sample_product(
shape=[(1, 1), (4, 4), (5, 5), (25, 25), (2, 10, 10)],
dtype=float_types + complex_types,
lower=[True, False],
)
def testEighGrad(self, shape, dtype, lower):
if platform.system() == "Windows":
self.skipTest("Skip on Windows due to tolerance issues.")
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
a = (a + np.conj(T(a))) / 2
ones = np.ones((a.shape[-1], a.shape[-1]), dtype=dtype)
a *= np.tril(ones) if lower else np.triu(ones)
# Gradient checks will fail without symmetrization as the eigh jvp rule
# is only correct for tangents in the symmetric subspace, whereas the
# checker checks against unconstrained (co)tangents.
f = partial(_normalizing_eigh, lower=lower, symmetrize_input=True)
norm_a = jnp.linalg.norm(a)
eps = 2e-5 * norm_a
atol = 5e-3 * norm_a
rtol = 0.025
jtu.check_grads(f, (a,), 2, atol=atol, rtol=rtol, eps=eps)
def testEighGradPrecision(self):
rng = jtu.rand_default(self.rng())
a = rng((3, 3), np.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, jnp.linalg.eigh), (a,), (a,))
def testEighGradRankPromotion(self):
rng = jtu.rand_default(self.rng())
a = rng((10, 3, 3), np.float32)
jvp(jnp.linalg.eigh, (a,), (a,)) # doesn't crash
@jtu.sample_product(
shape=[(1, 1), (4, 4), (5, 5), (300, 300)],
dtype=float_types + complex_types,
)
def testEighBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
shape = (10,) + shape
args = rng(shape, dtype)
args = (args + np.conj(T(args))) / 2
ws, vs = vmap(jsp.linalg.eigh)(args)
ws = ws.astype(vs.dtype)
norm = np.max(np.linalg.norm(np.matmul(args, vs) - ws[..., None, :] * vs))
self.assertLess(norm, 1.4e-2)
@jtu.sample_product(
shape=[(1,), (4,), (5,)],
dtype=(np.int32,),
)
def testLuPivotsToPermutation(self, shape, dtype):
pivots_size = shape[-1]
permutation_size = 2 * pivots_size
pivots = jnp.arange(permutation_size - 1, pivots_size - 1, -1, dtype=dtype)
pivots = jnp.broadcast_to(pivots, shape)
actual = lax.linalg.lu_pivots_to_permutation(pivots, permutation_size)
expected = jnp.arange(permutation_size - 1, -1, -1, dtype=dtype)
expected = jnp.broadcast_to(expected, actual.shape)
self.assertArraysEqual(actual, expected)
@jtu.sample_product(
shape=[(1,), (4,), (5,)],
dtype=(np.int32,),
)
def testLuPivotsToPermutationBatching(self, shape, dtype):
shape = (10,) + shape
pivots_size = shape[-1]
permutation_size = 2 * pivots_size
pivots = jnp.arange(permutation_size - 1, pivots_size - 1, -1, dtype=dtype)
pivots = jnp.broadcast_to(pivots, shape)
batched_fn = vmap(
lambda x: lax.linalg.lu_pivots_to_permutation(x, permutation_size))
actual = batched_fn(pivots)
expected = jnp.arange(permutation_size - 1, -1, -1, dtype=dtype)
expected = jnp.broadcast_to(expected, actual.shape)
self.assertArraysEqual(actual, expected)
@jtu.sample_product(
[dict(axis=axis, shape=shape, ord=ord)
for axis, shape in [
(None, (1,)), (None, (7,)), (None, (5, 8)),
(0, (9,)), (0, (4, 5)), ((1,), (10, 7, 3)), ((-2,), (4, 8)),
(-1, (6, 3)), ((0, 2), (3, 4, 5)), ((2, 0), (7, 8, 9)),
(None, (7, 8, 11))]
for ord in (
[None] if axis is None and len(shape) > 2
else [None, 0, 1, 2, 3, -1, -2, -3, jnp.inf, -jnp.inf]
if (axis is None and len(shape) == 1) or
isinstance(axis, int) or
(isinstance(axis, tuple) and len(axis) == 1)
else [None, 'fro', 1, 2, -1, -2, jnp.inf, -jnp.inf, 'nuc'])
],
keepdims=[False, True],
dtype=float_types + complex_types,
)
def testNorm(self, shape, dtype, ord, axis, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fn = partial(np.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
jnp_fn = partial(jnp.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(jnp_fn, args_maker)
def testStringInfNorm(self):
err, msg = ValueError, r"Invalid order 'inf' for vector norm."
with self.assertRaisesRegex(err, msg):
jnp.linalg.norm(jnp.array([1.0, 2.0, 3.0]), ord="inf")
@jtu.sample_product(
shape=[(2, 3), (4, 2, 3), (2, 3, 4, 5)],
dtype=float_types + complex_types,
keepdims=[True, False],
ord=[1, -1, 2, -2, np.inf, -np.inf, 'fro', 'nuc'],
)
def testMatrixNorm(self, shape, dtype, keepdims, ord):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fn = partial(np.linalg.matrix_norm, ord=ord, keepdims=keepdims)
jnp_fn = partial(jnp.linalg.matrix_norm, ord=ord, keepdims=keepdims)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=1e-3)
self._CompileAndCheck(jnp_fn, args_maker)
@jtu.sample_product(
shape=[(0, 2), (2, 0), (0, 0)],
dtype=float_types + complex_types,
ord=[1, 2, np.inf, 'fro', 'nuc'],
)
def testEmptyMatrixNorm(self, shape, dtype, ord):
x = jnp.zeros(shape, dtype)
norm = jnp.linalg.matrix_norm(x, ord=ord)
self.assertEqual(norm, 0)
@jtu.sample_product(
[
dict(shape=shape, axis=axis)
for shape in [(3,), (3, 4), (2, 3, 4, 5)]
for axis in _axis_for_ndim(len(shape))
],
dtype=float_types + complex_types,
keepdims=[True, False],
ord=[1, -1, 2, -2, np.inf, -np.inf],
)
def testVectorNorm(self, shape, dtype, keepdims, axis, ord):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fn = partial(np.linalg.vector_norm, ord=ord, keepdims=keepdims, axis=axis)
jnp_fn = partial(jnp.linalg.vector_norm, ord=ord, keepdims=keepdims, axis=axis)
tol = 1E-3 if jtu.test_device_matches(['tpu']) else None
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
self._CompileAndCheck(jnp_fn, args_maker, tol=tol)
@jtu.sample_product(
dtype=float_types + complex_types,
ord=[1, 2, np.inf],
)
def testEmptyVectorNorm(self, dtype, ord):
x = jnp.zeros(0, dtype)
norm = jnp.linalg.vector_norm(x, ord=ord)
self.assertEqual(norm, 0)
# jnp.linalg.vecdot is an alias of jnp.vecdot; do a minimal test here.
@jtu.sample_product(
[
dict(lhs_shape=(2, 2, 2), rhs_shape=(2, 2), axis=0),
dict(lhs_shape=(2, 2, 2), rhs_shape=(2, 2), axis=1),
dict(lhs_shape=(2, 2, 2), rhs_shape=(2, 2), axis=-1),
],
dtype=int_types + float_types + complex_types
)
@jax.default_matmul_precision("float32")
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testVecdot(self, lhs_shape, rhs_shape, axis, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
np_fn = np.linalg.vecdot
np_fn = jtu.promote_like_jnp(partial(np_fn, axis=axis))
jnp_fn = partial(jnp.linalg.vecdot, axis=axis)
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
self._CompileAndCheck(jnp_fn, args_maker, tol=tol)
# smoke-test for optional kwargs.
jnp_fn = partial(jnp.linalg.vecdot, axis=axis,
precision=lax.Precision.HIGHEST,
preferred_element_type=dtype)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
# jnp.linalg.matmul is an alias of jnp.matmul; do a minimal test here.
@jtu.sample_product(
[
dict(lhs_shape=(3,), rhs_shape=(3,)), # vec-vec
dict(lhs_shape=(2, 3), rhs_shape=(3,)), # mat-vec
dict(lhs_shape=(3,), rhs_shape=(3, 4)), # vec-mat
dict(lhs_shape=(2, 3), rhs_shape=(3, 4)), # mat-mat
],
dtype=float_types + complex_types
)
@jax.default_matmul_precision("float32")
def testMatmul(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
np_fn = jtu.promote_like_jnp(np.linalg.matmul)
jnp_fn = jnp.linalg.matmul
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
self._CompileAndCheck(jnp_fn, args_maker, tol=tol)
# smoke-test for optional kwargs.
jnp_fn = partial(jnp.linalg.matmul,
precision=lax.Precision.HIGHEST,
preferred_element_type=dtype)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
# jnp.linalg.tensordot is an alias of jnp.tensordot; do a minimal test here.
@jtu.sample_product(
[
dict(lhs_shape=(2, 2, 2), rhs_shape=(2, 2), axes=0),
dict(lhs_shape=(2, 2, 2), rhs_shape=(2, 2), axes=1),
dict(lhs_shape=(2, 2, 2), rhs_shape=(2, 2), axes=2),
],
dtype=float_types + complex_types
)
@jax.default_matmul_precision("float32")
def testTensordot(self, lhs_shape, rhs_shape, axes, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
np_fn = jtu.promote_like_jnp(partial(np.linalg.tensordot, axes=axes))
jnp_fn = partial(jnp.linalg.tensordot, axes=axes)
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
self._CompileAndCheck(jnp_fn, args_maker, tol=tol)
# smoke-test for optional kwargs.
jnp_fn = partial(jnp.linalg.tensordot, axes=axes,
precision=lax.Precision.HIGHEST,
preferred_element_type=dtype)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=tol)
@parameterized.product(
jtu.sample_product_testcases(
[
dict(m=m, n=n, full_matrices=full_matrices, hermitian=hermitian)
for (m, n), full_matrices in (
list(
itertools.product(
itertools.product([0, 2, 7, 29, 32, 53], repeat=2),
[False, True],
)
)
+
# Test cases that ensure we are economical when computing the SVD
# and its gradient. If we form a 400kx400k matrix explicitly we
# will OOM.
[((400000, 2), False), ((2, 400000), False)]
)
for hermitian in ([False, True] if m == n else [False])
],
b=[(), (3,), (2, 3)],
dtype=float_types + complex_types,
compute_uv=[False, True],
),
algorithm=svd_algorithms()
)
@jax.default_matmul_precision("float32")
def testSVD(self, b, m, n, dtype, full_matrices, compute_uv, hermitian,
algorithm):
if hermitian and algorithm is not None:
# Hermitian SVD doesn't support the algorithm parameter.
self.skipTest("Hermitian SVD doesn't support the algorithm parameter")
if jtu.is_device_rocm() and algorithm == lax.linalg.SvdAlgorithm.POLAR:
self.skipTest("ROCM polar SVD not implemented")
if (
jtu.test_device_matches(["cuda"])
and (algorithm, m, n) in [
(lax.linalg.SvdAlgorithm.POLAR, 400000, 2),
(lax.linalg.SvdAlgorithm.POLAR, 2, 400000),
(lax.linalg.SvdAlgorithm.JACOBI, 400000, 2),
(lax.linalg.SvdAlgorithm.JACOBI, 2, 400000),
]
):
# Test fails with CUDA polar and jacobi decompositions
self.skipTest("Test fails with CUDA polar and jacobi decompositions")
def compute_max_backward_error(operand, reconstructed_operand):
error_norm = np.linalg.norm(operand - reconstructed_operand,
axis=(-2, -1))
backward_error = (error_norm /
np.linalg.norm(operand, axis=(-2, -1)))
max_backward_error = np.amax(backward_error)
return max_backward_error
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(b + (m, n), dtype)]
tol = 100 * jnp.finfo(dtype).eps
reconstruction_tol = 2 * tol
unitariness_tol = 3 * tol
a, = args_maker()
if hermitian:
a = a + np.conj(T(a))
if algorithm is None:
fun = partial(jnp.linalg.svd, hermitian=hermitian)
else:
fun = partial(lax.linalg.svd, algorithm=algorithm)
out = fun(a, full_matrices=full_matrices, compute_uv=compute_uv)
if compute_uv:
# Check the reconstructed matrices
out = list(out)
out[1] = out[1].astype(out[0].dtype) # for strict dtype promotion.
if m and n:
if full_matrices:
k = min(m, n)
if m < n:
max_backward_error = compute_max_backward_error(
a, np.matmul(out[1][..., None, :] * out[0], out[2][..., :k, :]))
self.assertLess(max_backward_error, reconstruction_tol)
else:
max_backward_error = compute_max_backward_error(
a, np.matmul(out[1][..., None, :] * out[0][..., :, :k], out[2]))
self.assertLess(max_backward_error, reconstruction_tol)
else:
max_backward_error = compute_max_backward_error(
a, np.matmul(out[1][..., None, :] * out[0], out[2]))
self.assertLess(max_backward_error, reconstruction_tol)
# Check the unitary properties of the singular vector matrices.
unitary_mat = np.real(np.matmul(np.conj(T(out[0])), out[0]))
eye_slice = np.eye(out[0].shape[-1], dtype=unitary_mat.dtype)
self.assertAllClose(np.broadcast_to(eye_slice, b + eye_slice.shape),
unitary_mat, rtol=unitariness_tol,
atol=unitariness_tol)
if m >= n:
unitary_mat = np.real(np.matmul(np.conj(T(out[2])), out[2]))
eye_slice = np.eye(out[2].shape[-1], dtype=unitary_mat.dtype)
self.assertAllClose(np.broadcast_to(eye_slice, b + eye_slice.shape),
unitary_mat, rtol=unitariness_tol,
atol=unitariness_tol)
else:
unitary_mat = np.real(np.matmul(out[2], np.conj(T(out[2]))))
eye_slice = np.eye(out[2].shape[-2], dtype=unitary_mat.dtype)
self.assertAllClose(np.broadcast_to(eye_slice, b + eye_slice.shape),
unitary_mat, rtol=unitariness_tol,
atol=unitariness_tol)
else:
self.assertAllClose(np.linalg.svd(a, compute_uv=False), np.asarray(out),
atol=1e-4, rtol=3e-4)
self._CompileAndCheck(partial(fun, full_matrices=full_matrices,
compute_uv=compute_uv),
args_maker)
if not compute_uv and a.size < 100000:
svd = partial(fun, full_matrices=full_matrices, compute_uv=compute_uv)
# TODO(phawkins): these tolerances seem very loose.
if dtype == np.complex128:
jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=1e-4, atol=1e-4,
eps=1e-8)
else:
jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=5e-2, atol=2e-1)
if compute_uv and (not full_matrices):
d, = args_maker()
def f(x):
u, s, v = jnp.linalg.svd(
a + x * d,
full_matrices=full_matrices,
compute_uv=compute_uv)
vdiag = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')
return jnp.matmul(jnp.matmul(u, vdiag(s).astype(u.dtype)), v).real
_, t_out = jvp(f, (1.,), (1.,))
if dtype == np.complex128:
tol = 2e-13
else:
tol = 6e-4
self.assertArraysAllClose(t_out, d.real, atol=tol, rtol=tol)
def testJspSVDBasic(self):
# since jax.scipy.linalg.svd is almost the same as jax.numpy.linalg.svd
# do not check it functionality here
jsp.linalg.svd(np.ones((2, 2), dtype=np.float32))
@jtu.sample_product(
shape=[(1, 1), (4, 4), (2, 5), (5, 2), (5, 5), (2, 5, 5)],
dtype=float_types + complex_types,
full_matrices=[True, False],
compute_uv=[True, False],
)
@jax.default_matmul_precision("float32")
def testSVDGrad(self, shape, dtype, full_matrices, compute_uv):
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
if not compute_uv:
f = partial(jnp.linalg.svd, full_matrices=False, compute_uv=False)
else:
f = partial(_normalizing_svd, full_matrices=full_matrices)
if full_matrices and shape[-1] != shape[-2]:
self.skipTest("JVP for SVD not implemented for full matrices.")
jtu.check_grads(f, (a,), order=2, rtol=0.035, eps=1.0 / 512)
@jtu.sample_product(
shape=[(0, 2), (2, 0), (3, 4), (3, 3), (4, 3)],
dtype=[np.float32],
mode=["reduced", "r", "full", "complete", "raw"],
)
def testNumpyQrModes(self, shape, dtype, mode):
rng = jtu.rand_default(self.rng())
jnp_func = partial(jax.numpy.linalg.qr, mode=mode)
np_func = partial(np.linalg.qr, mode=mode)
if mode == "full":
np_func = jtu.ignore_warning(category=DeprecationWarning, message="The 'full' option.*")(np_func)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_func, jnp_func, args_maker, rtol=1e-5, atol=1e-5,
check_dtypes=(mode != "raw"))
self._CompileAndCheck(jnp_func, args_maker)
@jtu.sample_product(
shape=[(0, 0), (2, 0), (0, 2), (3, 3), (3, 4), (2, 10, 5),
(2, 200, 100), (64, 16, 5), (33, 7, 3), (137, 9, 5), (20000, 2, 2)],
dtype=float_types + complex_types,
full_matrices=[False, True],
)
@jax.default_matmul_precision("float32")
def testQr(self, shape, dtype, full_matrices):
if (jtu.test_device_matches(["cuda"]) and
_is_required_cuda_version_satisfied(12000)):
self.skipTest("Triggers a bug in cuda-12 b/287345077")
rng = jtu.rand_default(self.rng())
m, n = shape[-2:]
if full_matrices:
mode, k = "complete", m
else:
mode, k = "reduced", min(m, n)
a = rng(shape, dtype)
lq, lr = jnp.linalg.qr(a, mode=mode)
# np.linalg.qr doesn't support batch dimensions. But it seems like an
# inevitable extension so we support it in our version.
nq = np.zeros(shape[:-2] + (m, k), dtype)
nr = np.zeros(shape[:-2] + (k, n), dtype)
for index in np.ndindex(*shape[:-2]):
nq[index], nr[index] = np.linalg.qr(a[index], mode=mode)
max_rank = max(m, n)
# Norm, adjusted for dimension and type.
def norm(x):
n = np.linalg.norm(x, axis=(-2, -1))
return n / (max(1, max_rank) * jnp.finfo(dtype).eps)
def compare_orthogonal(q1, q2):
# Q is unique up to sign, so normalize the sign first.
ratio = np.divide(np.where(q2 == 0, 0, q1), np.where(q2 == 0, 1, q2))
sum_of_ratios = ratio.sum(axis=-2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
q1 *= phases
nm = norm(q1 - q2)
max_norm = 220 if jtu.is_device_tpu(7, 'x') else 160
self.assertTrue(np.all(nm < max_norm), msg=f"norm={np.amax(nm)}")
# Check a ~= qr
norm_error = norm(a - np.matmul(lq, lr))
self.assertTrue(np.all(norm_error < 60), msg=np.amax(norm_error))
# Compare the first 'k' vectors of Q; the remainder form an arbitrary
# orthonormal basis for the null space.
compare_orthogonal(nq[..., :k], lq[..., :k])
# Check that q is close to unitary.
self.assertTrue(np.all(
norm(np.eye(k) - np.matmul(np.conj(T(lq)), lq)) < 10))
# This expresses identity function, which makes us robust to, e.g., the
# tangents flipping the direction of vectors in Q.
def qr_and_mul(a):
q, r = jnp.linalg.qr(a, mode=mode)
return q @ r
if m == n or (m > n and not full_matrices):
jtu.check_jvp(qr_and_mul, partial(jvp, qr_and_mul), (a,), atol=3e-3)
@jtu.skip_on_devices("tpu")
def testQrInvalidDtypeCPU(self, shape=(5, 6), dtype=np.float16):
# Regression test for https://github.com/jax-ml/jax/issues/10530
rng = jtu.rand_default(self.rng())
arr = rng(shape, dtype)
if jtu.test_device_matches(['cpu']):
err, msg = NotImplementedError, "Unsupported dtype float16"
else:
err, msg = Exception, "Unsupported dtype"
with self.assertRaisesRegex(err, msg):
jax.block_until_ready(jnp.linalg.qr(arr))
@jtu.sample_product(
shape=[(10, 4, 5), (5, 3, 3), (7, 6, 4)],
dtype=float_types + complex_types,
)
def testQrBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args = rng(shape, jnp.float32)
qs, rs = vmap(jsp.linalg.qr)(args)
self.assertTrue(np.all(np.linalg.norm(args - np.matmul(qs, rs)) < 1e-3))
@jtu.sample_product(
shape=[(1, 1), (4, 4), (2, 3, 5), (5, 5, 5), (20, 20), (5, 10)],
pnorm=[jnp.inf, -jnp.inf, 1, -1, 2, -2, 'fro'],
dtype=float_types + complex_types,
)
@jtu.skip_on_devices("gpu") # TODO(#2203): numerical errors
def testCond(self, shape, pnorm, dtype):
def gen_mat():
# arr_gen = jtu.rand_some_nan(self.rng())
arr_gen = jtu.rand_default(self.rng())
res = arr_gen(shape, dtype)
return res
def args_gen(p):
def _args_gen():
return [gen_mat(), p]
return _args_gen
args_maker = args_gen(pnorm)
if pnorm not in [2, -2] and len(set(shape[-2:])) != 1:
with self.assertRaises(ValueError):
jnp.linalg.cond(*args_maker())
else:
self._CheckAgainstNumpy(np.linalg.cond, jnp.linalg.cond, args_maker,
check_dtypes=False, tol=1e-3)
partial_norm = partial(jnp.linalg.cond, p=pnorm)
self._CompileAndCheck(partial_norm, lambda: [gen_mat()],
check_dtypes=False, rtol=1e-03, atol=1e-03)
@jtu.sample_product(
shape=[(1, 1), (4, 4), (6, 2, 3), (3, 4, 2, 6)],
dtype=float_types + complex_types,
)
def testTensorinv(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.linalg.tensorinv, ind=len(shape) // 2)
jnp_fun = partial(jnp.linalg.tensorinv, ind=len(shape) // 2)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=1E-4)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
((2, 2), (3, 2, 2)),
((2, 1, 3, 3), (1, 4, 3, 4)),
((1, 0, 0), (1, 0, 2)),
]
],
dtype=float_types + complex_types,
)
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testSolve(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(np.linalg.solve, jnp.linalg.solve, args_maker,
tol=1e-3)
self._CompileAndCheck(jnp.linalg.solve, args_maker)
@jtu.sample_product(
lhs_shape=[(2, 2), (2, 2, 2), (2, 2, 2, 2), (2, 2, 2, 2, 2)],
rhs_shape=[(2,), (2, 2), (2, 2, 2), (2, 2, 2, 2)]
)
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testSolveBroadcasting(self, lhs_shape, rhs_shape):
# Batched solve can involve some ambiguities; this test checks
# that we match NumPy's convention in all cases.
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, 'float32'), rng(rhs_shape, 'float32')]
self._CheckAgainstNumpy(np.linalg.solve, jnp.linalg.solve, args_maker, tol=1E-3)
self._CompileAndCheck(jnp.linalg.solve, args_maker)
@jtu.sample_product(
shape=[(1, 1), (4, 4), (2, 5, 5), (100, 100), (5, 5, 5), (0, 0)],
dtype=float_types,
)
def testInv(self, shape, dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
a = _random_invertible(rng=rng, shape=shape, dtype=dtype)
return [a]
self._CheckAgainstNumpy(np.linalg.inv, jnp.linalg.inv, args_maker,
tol=1e-3)
self._CompileAndCheck(jnp.linalg.inv, args_maker)
@jtu.sample_product(
[dict(shape=shape, hermitian=hermitian)
for shape in [(1, 1), (4, 4), (3, 10, 10), (2, 70, 7), (2000, 7),
(7, 1000), (70, 7, 2), (2, 0, 0), (3, 0, 2), (1, 0),
(400000, 2), (2, 400000)]
for hermitian in ([False, True] if shape[-1] == shape[-2] else [False])],
dtype=float_types + complex_types,
)
@jtu.ignore_warning(message="invalid value", category=RuntimeWarning)
def testPinv(self, shape, hermitian, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fn = partial(jnp.linalg.pinv, hermitian=hermitian)
def np_fn(a):
# Symmetrize the input matrix to match the jnp behavior.
if hermitian:
a = (a + T(a.conj())) / 2
return np.linalg.pinv(a, hermitian=hermitian)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, tol=1e-4)
self._CompileAndCheck(jnp_fn, args_maker, atol=1e-5)
# TODO(phawkins): 6e-2 seems like a very loose tolerance.
jtu.check_grads(jnp_fn, args_maker(), 1, rtol=6e-2, atol=1e-3)
def testPinvRcond(self):
x = jnp.ones((3, 3))
with self.assertRaisesWithLiteralMatch(
ValueError, "pinv: only one of rtol and rcond may be specified."):
jnp.linalg.pinv(x, rcond=1E-2, rtol=1E-2)
self.assertArraysEqual(
jnp.linalg.pinv(x, rcond=1E-2),
jnp.linalg.pinv(x, rtol=1E-2)
)
def testPinvGradIssue2792(self):
def f(p):
a = jnp.array([[0., 0.],[-p, 1.]], jnp.float32) * 1 / (1 + p**2)
return jnp.linalg.pinv(a)
j = jax.jacobian(f)(jnp.float32(2.))
self.assertAllClose(jnp.array([[0., -1.], [ 0., 0.]], jnp.float32), j)
expected = jnp.array([[[[-1., 0.], [ 0., 0.]], [[0., -1.], [0., 0.]]],
[[[0., 0.], [-1., 0.]], [[0., 0.], [0., -1.]]]],
dtype=jnp.float32)
self.assertAllClose(
expected, jax.jacobian(jnp.linalg.pinv)(jnp.eye(2, dtype=jnp.float32)))
@jtu.sample_product(
shape=[(1, 1), (2, 2), (4, 4), (5, 5), (1, 2, 2), (2, 3, 3), (2, 5, 5)],
dtype=float_types + complex_types,
n=[-5, -2, -1, 0, 1, 2, 3, 4, 5, 10],
)
@jax.default_matmul_precision("float32")
def testMatrixPower(self, shape, dtype, n):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(partial(np.linalg.matrix_power, n=n),
partial(jnp.linalg.matrix_power, n=n),
args_maker, tol=1e-3)
self._CompileAndCheck(partial(jnp.linalg.matrix_power, n=n), args_maker,
rtol=1e-3)
def testMatrixPowerBool(self):
# Regression test for https://github.com/jax-ml/jax/issues/28603
mat = np.array([[True,True], [False,True]])
np_result = np.linalg.matrix_power(mat, 2)
jnp_result = jnp.linalg.matrix_power(mat, 2)
self.assertArraysEqual(np_result, jnp_result)
@jtu.sample_product(
shape=[(3, ), (1, 2), (8, 5), (4, 4), (5, 5), (50, 50), (3, 4, 5),
(2, 3, 4, 5)],
dtype=float_types + complex_types,
)
def testMatrixRank(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
self._CheckAgainstNumpy(np.linalg.matrix_rank, jnp.linalg.matrix_rank,
args_maker, check_dtypes=False, tol=1e-3)
self._CompileAndCheck(jnp.linalg.matrix_rank, args_maker,
check_dtypes=False, rtol=1e-3)
def testMatrixRankTol(self):
x = jnp.ones((3, 3))
with self.assertRaisesWithLiteralMatch(
ValueError, "matrix_rank: only one of tol or rtol may be specified."):
jnp.linalg.matrix_rank(x, rtol=1E-2, tol=1E-2)
self.assertArraysEqual(
jnp.linalg.matrix_rank(x, rtol=1E-2),
jnp.linalg.matrix_rank(x, tol=1E-2)
)
@jtu.sample_product(
shapes=[
[(3, ), (3, 1)], # quick-out codepath
[(1, 3), (3, 5), (5, 2)], # multi_dot_three codepath
[(1, 3), (3, 5), (5, 2), (2, 7), (7, )] # dynamic programming codepath
],
dtype=float_types + complex_types,
)
def testMultiDot(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [[rng(shape, dtype) for shape in shapes]]
np_fun = np.linalg.multi_dot
jnp_fun = partial(jnp.linalg.multi_dot, precision=lax.Precision.HIGHEST)
tol = {np.float32: 1e-4, np.float64: 1e-10,
np.complex64: 1e-4, np.complex128: 1e-10}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 6), (4,)),
((6, 6), (6, 1)),
((8, 6), (8, 4)),
((0, 3), (0,)),
((3, 0), (3,)),
((3, 1), (3, 0)),
]
],
rcond=[-1, None, 0.5],
dtype=float_types + complex_types,
)
def testLstsq(self, lhs_shape, rhs_shape, dtype, rcond):
rng = jtu.rand_default(self.rng())
np_fun = partial(np.linalg.lstsq, rcond=rcond)
jnp_fun = partial(jnp.linalg.lstsq, rcond=rcond)
jnp_fun_numpy_resid = partial(jnp.linalg.lstsq, rcond=rcond, numpy_resid=True)
tol = {np.float32: 1e-4, np.float64: 1e-12,
np.complex64: 1e-5, np.complex128: 1e-12}
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun_numpy_resid, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
# Disabled because grad is flaky for low-rank inputs.
# TODO:
# jtu.check_grads(lambda *args: jnp_fun(*args)[0], args_maker(), order=2, atol=1e-2, rtol=1e-2)
@jtu.sample_product(
shape=[(2, 1), (2, 2), (1, 2)]
)
def testLstsqZeroMatrix(self, shape):
# Regression test for https://github.com/jax-ml/jax/issues/32666
args_maker = lambda: [np.zeros(shape), np.ones((shape))]
np_fun = np.linalg.lstsq
jnp_fun = partial(jnp.linalg.lstsq, numpy_resid=True)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# Regression test for incorrect type for eigenvalues of a complex matrix.
def testIssue669(self):
def test(x):
val, vec = jnp.linalg.eigh(x)
return jnp.real(jnp.sum(val))
grad_test_jc = jit(grad(jit(test)))
xc = np.eye(3, dtype=np.complex64)
self.assertAllClose(xc, grad_test_jc(xc))
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testIssue1151(self):
rng = self.rng()
A = jnp.array(rng.randn(100, 3, 3), dtype=jnp.float32)
b = jnp.array(rng.randn(100, 3, 1), dtype=jnp.float32)
x = jnp.linalg.solve(A, b)
self.assertAllClose(vmap(jnp.dot)(A, x), b, atol=2e-3, rtol=1e-2)
_ = jax.jacobian(jnp.linalg.solve, argnums=0)(A, b)
_ = jax.jacobian(jnp.linalg.solve, argnums=1)(A, b)
_ = jax.jacobian(jnp.linalg.solve, argnums=0)(A[0], b[0])
_ = jax.jacobian(jnp.linalg.solve, argnums=1)(A[0], b[0])
@jtu.skip_on_flag("jax_skip_slow_tests", True)
@jax.legacy_prng_key("allow")
def testIssue1383(self):
seed = jax.random.PRNGKey(0)
tmp = jax.random.uniform(seed, (2,2))
a = jnp.dot(tmp, tmp.T)
def f(inp):
val, vec = jnp.linalg.eigh(inp)
return jnp.dot(jnp.dot(vec, inp), vec.T)
grad_func = jax.jacfwd(f)
hess_func = jax.jacfwd(grad_func)
cube_func = jax.jacfwd(hess_func)
self.assertFalse(np.any(np.isnan(cube_func(a))))
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape, axis=axis)
for lhs_shape, rhs_shape, axis in [
[(3,), (3,), -1],
[(2, 3), (2, 3), -1],
[(3, 4), (3, 4), 0],
[(3, 5), (3, 4, 5), 0]
]],
lhs_dtype=jtu.dtypes.numeric,
rhs_dtype=jtu.dtypes.numeric,
)
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testCross(self, lhs_shape, rhs_shape, lhs_dtype, rhs_dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lax_fun = partial(jnp.linalg.cross, axis=axis)
np_fun = jtu.promote_like_jnp(partial(np.linalg.cross, axis=axis))
with jtu.strict_promotion_if_dtypes_match([lhs_dtype, rhs_dtype]):
self._CheckAgainstNumpy(np_fun, lax_fun, args_maker)
self._CompileAndCheck(lax_fun, args_maker)
@jtu.sample_product(
lhs_shape=[(0,), (3,), (5,)],
rhs_shape=[(0,), (3,), (5,)],
lhs_dtype=jtu.dtypes.numeric,
rhs_dtype=jtu.dtypes.numeric,
)
def testOuter(self, lhs_shape, rhs_shape, lhs_dtype, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lax_fun = jnp.linalg.outer
np_fun = jtu.promote_like_jnp(np.linalg.outer)
with jtu.strict_promotion_if_dtypes_match([lhs_dtype, rhs_dtype]):
self._CheckAgainstNumpy(np_fun, lax_fun, args_maker)
self._CompileAndCheck(lax_fun, args_maker)
@jtu.sample_product(
shape = [(2, 3), (3, 2), (3, 3, 4), (4, 3, 3), (2, 3, 4, 5)],
dtype = jtu.dtypes.all,
offset=range(-2, 3)
)
def testDiagonal(self, shape, dtype, offset):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
lax_fun = partial(jnp.linalg.diagonal, offset=offset)
np_fun = partial(np.linalg.diagonal, offset=offset)
self._CheckAgainstNumpy(np_fun, lax_fun, args_maker)
self._CompileAndCheck(lax_fun, args_maker)
def testTrace(self):
shape, dtype, offset, out_dtype = (3, 4), "float32", 0, None
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
lax_fun = partial(jnp.linalg.trace, offset=offset, dtype=out_dtype)
np_fun = partial(np.linalg.trace, offset=offset)
self._CheckAgainstNumpy(np_fun, lax_fun, args_maker)
self._CompileAndCheck(lax_fun, args_maker)
| NumpyLinalgTest |
python | pypa__warehouse | tests/unit/accounts/test_views.py | {
"start": 143268,
"end": 143479
} | class ____:
def test_profile_callout_returns_user(self):
user = pretend.stub()
request = pretend.stub()
assert views.profile_callout(user, request) == {"user": user}
| TestProfileCallout |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_workflows.py | {
"start": 6535,
"end": 8025
} | class ____:
@mock.patch(BASE_PATH.format("Workflow"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
def test_execute(self, mock_hook, mock_object):
timestamp = Timestamp()
timestamp.FromDatetime(
datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(minutes=5)
)
workflow_mock = mock.MagicMock()
workflow_mock.start_time = timestamp
mock_hook.return_value.list_workflows.return_value = [workflow_mock]
op = WorkflowsListWorkflowsOperator(
task_id="test_task",
location=LOCATION,
project_id=PROJECT_ID,
filter_=FILTER_,
order_by=ORDER_BY,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
context = mock.MagicMock()
result = op.execute(context=context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_workflows.assert_called_once_with(
location=LOCATION,
project_id=PROJECT_ID,
filter_=FILTER_,
order_by=ORDER_BY,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
assert result == [mock_object.to_dict.return_value]
| TestWorkflowsListWorkflowsOperator |
python | etianen__django-reversion | tests/test_app/tests/base.py | {
"start": 2692,
"end": 2800
} | class ____:
def setUp(self):
super().setUp()
reversion.register(TestModel)
| TestModelMixin |
python | has2k1__plotnine | plotnine/scales/scale_linetype.py | {
"start": 782,
"end": 1092
} | class ____(scale_linetype):
"""
Scale for line patterns
"""
_aesthetics = ["linetype"]
def __post_init__(self):
super().__post_init__()
warn(
"Using linetype for an ordinal variable is not advised.",
PlotnineWarning,
)
| scale_linetype_ordinal |
python | pandas-dev__pandas | pandas/tests/frame/test_reductions.py | {
"start": 66032,
"end": 78408
} | class ____:
@pytest.mark.parametrize(
"opname, dtype, exp_value, exp_dtype",
[
("sum", np.int8, 0, np.int64),
("prod", np.int8, 1, np.int_),
("sum", np.int64, 0, np.int64),
("prod", np.int64, 1, np.int64),
("sum", np.uint8, 0, np.uint64),
("prod", np.uint8, 1, np.uint),
("sum", np.uint64, 0, np.uint64),
("prod", np.uint64, 1, np.uint64),
("sum", np.float32, 0, np.float32),
("prod", np.float32, 1, np.float32),
("sum", np.float64, 0, np.float64),
],
)
def test_df_empty_min_count_0(self, opname, dtype, exp_value, exp_dtype):
df = DataFrame({0: [], 1: []}, dtype=dtype)
result = getattr(df, opname)(min_count=0)
expected = Series([exp_value, exp_value], dtype=exp_dtype, index=range(2))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"opname, dtype, exp_dtype",
[
("sum", np.int8, np.float64),
("prod", np.int8, np.float64),
("sum", np.int64, np.float64),
("prod", np.int64, np.float64),
("sum", np.uint8, np.float64),
("prod", np.uint8, np.float64),
("sum", np.uint64, np.float64),
("prod", np.uint64, np.float64),
("sum", np.float32, np.float32),
("prod", np.float32, np.float32),
("sum", np.float64, np.float64),
],
)
def test_df_empty_min_count_1(self, opname, dtype, exp_dtype):
df = DataFrame({0: [], 1: []}, dtype=dtype)
result = getattr(df, opname)(min_count=1)
expected = Series([np.nan, np.nan], dtype=exp_dtype, index=Index([0, 1]))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"opname, dtype, exp_value, exp_dtype",
[
("sum", "Int8", 0, ("Int32" if is_windows_np2_or_is32 else "Int64")),
("prod", "Int8", 1, ("Int32" if is_windows_np2_or_is32 else "Int64")),
("sum", "Int64", 0, "Int64"),
("prod", "Int64", 1, "Int64"),
("sum", "UInt8", 0, ("UInt32" if is_windows_np2_or_is32 else "UInt64")),
("prod", "UInt8", 1, ("UInt32" if is_windows_np2_or_is32 else "UInt64")),
("sum", "UInt64", 0, "UInt64"),
("prod", "UInt64", 1, "UInt64"),
("sum", "Float32", 0, "Float32"),
("prod", "Float32", 1, "Float32"),
("sum", "Float64", 0, "Float64"),
],
)
def test_df_empty_nullable_min_count_0(self, opname, dtype, exp_value, exp_dtype):
df = DataFrame({0: [], 1: []}, dtype=dtype)
result = getattr(df, opname)(min_count=0)
expected = Series([exp_value, exp_value], dtype=exp_dtype, index=Index([0, 1]))
tm.assert_series_equal(result, expected)
# TODO: why does min_count=1 impact the resulting Windows dtype
# differently than min_count=0?
@pytest.mark.parametrize(
"opname, dtype, exp_dtype",
[
("sum", "Int8", ("Int32" if is_windows_or_is32 else "Int64")),
("prod", "Int8", ("Int32" if is_windows_or_is32 else "Int64")),
("sum", "Int64", "Int64"),
("prod", "Int64", "Int64"),
("sum", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")),
("prod", "UInt8", ("UInt32" if is_windows_or_is32 else "UInt64")),
("sum", "UInt64", "UInt64"),
("prod", "UInt64", "UInt64"),
("sum", "Float32", "Float32"),
("prod", "Float32", "Float32"),
("sum", "Float64", "Float64"),
],
)
def test_df_empty_nullable_min_count_1(self, opname, dtype, exp_dtype):
df = DataFrame({0: [], 1: []}, dtype=dtype)
result = getattr(df, opname)(min_count=1)
expected = Series([pd.NA, pd.NA], dtype=exp_dtype, index=Index([0, 1]))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
{"a": [0, 1, 2], "b": [pd.NaT, pd.NaT, pd.NaT]},
{"a": [0, 1, 2], "b": [Timestamp("1990-01-01"), pd.NaT, pd.NaT]},
{
"a": [0, 1, 2],
"b": [
Timestamp("1990-01-01"),
Timestamp("1991-01-01"),
Timestamp("1992-01-01"),
],
},
{
"a": [0, 1, 2],
"b": [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.NaT],
},
{
"a": [0, 1, 2],
"b": [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
],
},
],
)
def test_df_cov_pd_nat(self, data):
# GH #53115
df = DataFrame(data)
with pytest.raises(TypeError, match="not supported for cov"):
df.cov()
def test_sum_timedelta64_skipna_false():
# GH#17235
arr = np.arange(8).astype(np.int64).view("m8[s]").reshape(4, 2)
arr[-1, -1] = "Nat"
df = DataFrame(arr)
assert (df.dtypes == arr.dtype).all()
result = df.sum(skipna=False)
expected = Series([pd.Timedelta(seconds=12), pd.NaT], dtype="m8[s]")
tm.assert_series_equal(result, expected)
result = df.sum(axis=0, skipna=False)
tm.assert_series_equal(result, expected)
result = df.sum(axis=1, skipna=False)
expected = Series(
[
pd.Timedelta(seconds=1),
pd.Timedelta(seconds=5),
pd.Timedelta(seconds=9),
pd.NaT,
],
dtype="m8[s]",
)
tm.assert_series_equal(result, expected)
def test_mixed_frame_with_integer_sum():
# https://github.com/pandas-dev/pandas/issues/34520
df = DataFrame([["a", 1]], columns=list("ab"))
df = df.astype({"b": "Int64"})
result = df.sum()
expected = Series(["a", 1], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False, None])
@pytest.mark.parametrize("method", ["min", "max"])
def test_minmax_extensionarray(method, numeric_only):
# https://github.com/pandas-dev/pandas/issues/32651
int64_info = np.iinfo("int64")
ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype())
df = DataFrame({"Int64": ser})
result = getattr(df, method)(numeric_only=numeric_only)
expected = Series(
[getattr(int64_info, method)],
dtype="Int64",
index=Index(["Int64"]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ts_value", [Timestamp("2000-01-01"), pd.NaT])
def test_frame_mixed_numeric_object_with_timestamp(ts_value):
# GH 13912
df = DataFrame({"a": [1], "b": [1.1], "c": ["foo"], "d": [ts_value]})
with pytest.raises(TypeError, match="does not support operation|Cannot perform"):
df.sum()
def test_prod_sum_min_count_mixed_object():
# https://github.com/pandas-dev/pandas/issues/41074
df = DataFrame([1, "a", True])
result = df.prod(axis=0, min_count=1, numeric_only=False)
expected = Series(["a"], dtype=object)
tm.assert_series_equal(result, expected)
msg = re.escape("unsupported operand type(s) for +: 'int' and 'str'")
with pytest.raises(TypeError, match=msg):
df.sum(axis=0, min_count=1, numeric_only=False)
@pytest.mark.parametrize("method", ["min", "max", "mean", "median", "skew", "kurt"])
@pytest.mark.parametrize("numeric_only", [True, False])
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
def test_reduction_axis_none_returns_scalar(method, numeric_only, dtype):
# GH#21597 As of 2.0, axis=None reduces over all axes.
df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), dtype=dtype)
result = getattr(df, method)(axis=None, numeric_only=numeric_only)
np_arr = df.to_numpy(dtype=np.float64)
if method in {"skew", "kurt"}:
comp_mod = pytest.importorskip("scipy.stats")
if method == "kurt":
method = "kurtosis"
expected = getattr(comp_mod, method)(np_arr, bias=False, axis=None)
tm.assert_almost_equal(result, expected)
else:
expected = getattr(np, method)(np_arr, axis=None)
assert result == expected
@pytest.mark.parametrize(
"kernel",
[
"corr",
"corrwith",
"cov",
"idxmax",
"idxmin",
"kurt",
"max",
"mean",
"median",
"min",
"prod",
"quantile",
"sem",
"skew",
"std",
"sum",
"var",
],
)
def test_fails_on_non_numeric(kernel):
# GH#46852
df = DataFrame({"a": [1, 2, 3], "b": object})
args = (df,) if kernel == "corrwith" else ()
msg = "|".join(
[
"not allowed for this dtype",
"argument must be a string or a number",
"not supported between instances of",
"unsupported operand type",
"argument must be a string or a real number",
]
)
if kernel == "median":
# slightly different message on different builds
msg1 = (
r"Cannot convert \[\[<class 'object'> <class 'object'> "
r"<class 'object'>\]\] to numeric"
)
msg2 = (
r"Cannot convert \[<class 'object'> <class 'object'> "
r"<class 'object'>\] to numeric"
)
msg = "|".join([msg1, msg2])
with pytest.raises(TypeError, match=msg):
getattr(df, kernel)(*args)
@pytest.mark.parametrize(
"method",
[
"all",
"any",
"count",
"idxmax",
"idxmin",
"kurt",
"kurtosis",
"max",
"mean",
"median",
"min",
"nunique",
"prod",
"product",
"sem",
"skew",
"std",
"sum",
"var",
],
)
@pytest.mark.parametrize("min_count", [0, 2])
def test_numeric_ea_axis_1(
method, skipna, min_count, any_numeric_ea_dtype, using_nan_is_na
):
# GH 54341
df = DataFrame(
{
"a": Series([0, 1, 2, 3], dtype=any_numeric_ea_dtype),
"b": Series([0, 1, pd.NA, 3], dtype=any_numeric_ea_dtype),
},
)
expected_df = DataFrame(
{
"a": [0.0, 1.0, 2.0, 3.0],
"b": [0.0, 1.0, np.nan, 3.0],
},
)
if method in ("count", "nunique"):
expected_dtype = "int64"
elif method in ("all", "any"):
expected_dtype = "boolean"
elif method in (
"kurt",
"kurtosis",
"mean",
"median",
"sem",
"skew",
"std",
"var",
) and not any_numeric_ea_dtype.startswith("Float"):
expected_dtype = "Float64"
else:
expected_dtype = any_numeric_ea_dtype
kwargs = {}
if method not in ("count", "nunique", "quantile"):
kwargs["skipna"] = skipna
if method in ("prod", "product", "sum"):
kwargs["min_count"] = min_count
if not skipna and method in ("idxmax", "idxmin"):
with pytest.raises(ValueError, match="encountered an NA value"):
getattr(df, method)(axis=1, **kwargs)
with pytest.raises(ValueError, match="Encountered an NA value"):
getattr(expected_df, method)(axis=1, **kwargs)
return
result = getattr(df, method)(axis=1, **kwargs)
expected = getattr(expected_df, method)(axis=1, **kwargs)
if method not in ("idxmax", "idxmin"):
if using_nan_is_na:
expected = expected.astype(expected_dtype)
else:
mask = np.isnan(expected)
expected[mask] = 0
expected = expected.astype(expected_dtype)
expected[mask] = pd.NA
tm.assert_series_equal(result, expected)
def test_mean_nullable_int_axis_1():
# GH##36585
df = DataFrame(
{"a": [1, 2, 3, 4], "b": Series([1, 2, 4, None], dtype=pd.Int64Dtype())}
)
result = df.mean(axis=1, skipna=True)
expected = Series([1.0, 2.0, 3.5, 4.0], dtype="Float64")
tm.assert_series_equal(result, expected)
result = df.mean(axis=1, skipna=False)
expected = Series([1.0, 2.0, 3.5, pd.NA], dtype="Float64")
tm.assert_series_equal(result, expected)
| TestEmptyDataFrameReductions |
python | openai__openai-python | src/openai/types/beta/realtime/transcription_session_update.py | {
"start": 2208,
"end": 3920
} | class ____(BaseModel):
create_response: Optional[bool] = None
"""Whether or not to automatically generate a response when a VAD stop event
occurs.
Not available for transcription sessions.
"""
eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs. Not available for transcription sessions.
"""
prefix_padding_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: Optional[float] = None
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
type: Optional[Literal["server_vad", "semantic_vad"]] = None
"""Type of turn detection."""
| SessionTurnDetection |
python | ray-project__ray | python/ray/tune/callback.py | {
"start": 11739,
"end": 17047
} | class ____(Callback):
"""Call multiple callbacks at once."""
IS_CALLBACK_CONTAINER = True
CKPT_FILE_TMPL = "callback-states-{}.pkl"
def __init__(self, callbacks: List[Callback]):
self._callbacks = callbacks
def setup(self, **info):
for callback in self._callbacks:
try:
callback.setup(**info)
except TypeError as e:
if "argument" in str(e):
warnings.warn(
"Please update `setup` method in callback "
f"`{callback.__class__}` to match the method signature"
" in `ray.tune.callback.Callback`.",
FutureWarning,
)
callback.setup()
else:
raise e
def on_step_begin(self, **info):
for callback in self._callbacks:
callback.on_step_begin(**info)
def on_step_end(self, **info):
for callback in self._callbacks:
callback.on_step_end(**info)
def on_trial_start(self, **info):
for callback in self._callbacks:
callback.on_trial_start(**info)
def on_trial_restore(self, **info):
for callback in self._callbacks:
callback.on_trial_restore(**info)
def on_trial_save(self, **info):
for callback in self._callbacks:
callback.on_trial_save(**info)
def on_trial_result(self, **info):
for callback in self._callbacks:
callback.on_trial_result(**info)
def on_trial_complete(self, **info):
for callback in self._callbacks:
callback.on_trial_complete(**info)
def on_trial_recover(self, **info):
for callback in self._callbacks:
callback.on_trial_recover(**info)
def on_trial_error(self, **info):
for callback in self._callbacks:
callback.on_trial_error(**info)
def on_checkpoint(self, **info):
for callback in self._callbacks:
callback.on_checkpoint(**info)
def on_experiment_end(self, **info):
for callback in self._callbacks:
callback.on_experiment_end(**info)
def get_state(self) -> Optional[Dict]:
"""Gets the state of all callbacks contained within this list.
If there are no stateful callbacks, then None will be returned in order
to avoid saving an unnecessary callback checkpoint file."""
state = {}
any_stateful_callbacks = False
for i, callback in enumerate(self._callbacks):
callback_state = callback.get_state()
if callback_state:
any_stateful_callbacks = True
state[i] = callback_state
if not any_stateful_callbacks:
return None
return state
def set_state(self, state: Dict):
"""Sets the state for all callbacks contained within this list.
Skips setting state for all stateless callbacks where `get_state`
returned None."""
for i, callback in enumerate(self._callbacks):
callback_state = state.get(i, None)
if callback_state:
callback.set_state(callback_state)
def save_to_dir(self, checkpoint_dir: str, session_str: str = "default"):
"""Save the state of the callback list to the checkpoint_dir.
Args:
checkpoint_dir: directory where the checkpoint is stored.
session_str: Unique identifier of the current run session (ex: timestamp).
"""
state_dict = self.get_state()
if state_dict:
file_name = self.CKPT_FILE_TMPL.format(session_str)
tmp_file_name = f".tmp-{file_name}"
_atomic_save(
state=state_dict,
checkpoint_dir=checkpoint_dir,
file_name=file_name,
tmp_file_name=tmp_file_name,
)
def restore_from_dir(self, checkpoint_dir: str):
"""Restore the state of the list of callbacks from the checkpoint_dir.
You should check if it's possible to restore with `can_restore`
before calling this method.
Args:
checkpoint_dir: directory where the checkpoint is stored.
Raises:
RuntimeError: if unable to find checkpoint.
NotImplementedError: if the `set_state` method is not implemented.
"""
state_dict = _load_newest_checkpoint(
checkpoint_dir, self.CKPT_FILE_TMPL.format("*")
)
if not state_dict:
raise RuntimeError(
"Unable to find checkpoint in {}.".format(checkpoint_dir)
)
self.set_state(state_dict)
def can_restore(self, checkpoint_dir: str) -> bool:
"""Check if the checkpoint_dir contains the saved state for this callback list.
Returns:
can_restore: True if the checkpoint_dir contains a file of the
format `CKPT_FILE_TMPL`. False otherwise.
"""
return any(
glob.iglob(Path(checkpoint_dir, self.CKPT_FILE_TMPL.format("*")).as_posix())
)
def __len__(self) -> int:
return len(self._callbacks)
def __getitem__(self, i: int) -> "Callback":
return self._callbacks[i]
| CallbackList |
python | mwaskom__seaborn | seaborn/_core/properties.py | {
"start": 10606,
"end": 10909
} | class ____(IntervalProperty):
"""Thickness of a line mark, in points."""
@property
def default_range(self) -> tuple[float, float]:
"""Min and max values used by default for semantic mapping."""
base = mpl.rcParams["lines.linewidth"]
return base * .5, base * 2
| LineWidth |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/package_finder.py | {
"start": 14100,
"end": 21762
} | class ____:
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name: str,
target_python: Optional[TargetPython] = None,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> "CandidateEvaluator":
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_sorted_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name: str,
supported_tags: List[Tag],
specifier: specifiers.BaseSpecifier,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
hashes: Optional[Hashes] = None,
) -> None:
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
# Since the index of the tag in the _supported_tags list is used
# as a priority, precompute a map from tag to index/priority to be
# used in wheel.find_most_preferred_tag.
self._wheel_tag_preferences = {
tag: idx for idx, tag in enumerate(supported_tags)
}
def get_applicable_candidates(
self,
candidates: List[InstallationCandidate],
) -> List[InstallationCandidate]:
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
candidates_and_versions = [(c, str(c.version)) for c in candidates]
versions = set(
specifier.filter(
(v for _, v in candidates_and_versions),
prereleases=allow_prereleases,
)
)
applicable_candidates = [c for c, v in candidates_and_versions if v in versions]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(
self,
candidate: InstallationCandidate,
ignore_compatibility: bool = True,
) -> CandidateSortingKey:
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag: BuildTag = ()
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
try:
pri = -(
wheel.find_most_preferred_tag(
valid_tags, self._wheel_tag_preferences
)
)
except ValueError:
if not ignore_compatibility:
raise UnsupportedWheel(
f"{wheel.filename} is not a supported wheel for this platform. It "
"can't be sorted."
)
pri = -support_num
if self._prefer_binary:
binary_preference = 1
build_tag = wheel.build_tag
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash,
yank_value,
binary_preference,
candidate.version,
pri,
build_tag,
)
def sort_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> Optional[InstallationCandidate]:
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> BestCandidateResult:
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
| CandidateEvaluator |
python | ray-project__ray | python/ray/train/_internal/backend_executor.py | {
"start": 1800,
"end": 2375
} | class ____:
"""
Resource configuration for resource_ids to share between workers.
Args:
resource_name: The name of the resource to configure
(Example: "neuron_cores" or "gpu").
resource_enable_sharing_env_var: The environment variable to
check if the resource should be shared.
share_resource_ids_env_var: The environment variable to configure for
sharing the resources with other workers.
"""
resource_name: str
resource_enable_sharing_env_var: str
share_resource_ids_env_var: str
| ResourceConfig |
python | doocs__leetcode | lcp/LCP 34. 二叉树染色/Solution.py | {
"start": 164,
"end": 662
} | class ____:
def maxValue(self, root: TreeNode, k: int) -> int:
def dfs(root: TreeNode) -> List[int]:
ans = [0] * (k + 1)
if root is None:
return ans
l, r = dfs(root.left), dfs(root.right)
ans[0] = max(l) + max(r)
for i in range(k):
for j in range(k - i):
ans[i + j + 1] = max(ans[i + j + 1], l[i] + r[j] + root.val)
return ans
return max(dfs(root))
| Solution |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_numeric.py | {
"start": 1427,
"end": 2308
} | class ____:
def test_valid(self) -> None:
prop = bcpn.Angle()
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(0.0)
assert prop.is_valid(1.0)
def test_invalid(self) -> None:
prop = bcpn.Angle()
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpn.Angle()
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpn.Angle()
assert str(prop) == "Angle"
| Test_Angle |
python | PrefectHQ__prefect | src/prefect/_vendor/croniter/croniter.py | {
"start": 4076,
"end": 4134
} | class ____(TypeError):
"""."""
| CroniterBadTypeRangeError |
python | doocs__leetcode | solution/0400-0499/0480.Sliding Window Median/Solution2.py | {
"start": 0,
"end": 580
} | class ____:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
l = SortedList()
r = SortedList()
ans = []
for i, x in enumerate(nums):
r.add(x)
l.add(r.pop(0))
while len(l) - len(r) > 1:
r.add(l.pop())
j = i - k + 1
if j >= 0:
ans.append(l[-1] if k & 1 else (l[-1] + r[0]) / 2)
if nums[j] in l:
l.remove(nums[j])
else:
r.remove(nums[j])
return ans
| Solution |
python | PrefectHQ__prefect | tests/_internal/schemas/test_v2_schema.py | {
"start": 157,
"end": 4027
} | class ____:
"""Test the process_v2_params functions with and without FieldInfo."""
def test_process_v2_params_with_existing_fieldinfo(self):
"""Test parameter processing with an existing FieldInfo object as default."""
existing_field = Field(
default="default_name",
description="Existing field description",
json_schema_extra={"position": 99},
)
param = inspect.Parameter(
"name",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=existing_field,
annotation=str,
)
name, type_, field = process_v2_params(
param, position=0, docstrings={"name": "Docstring description"}, aliases={}
)
assert name == "name"
assert type_ is str
assert field.default == "default_name"
assert field.title == "name"
assert field.description == "Existing field description"
assert field.json_schema_extra == {"position": 99}
def test_process_v2_params_with_existing_fieldinfo_no_description(self):
"""Test parameter processing with existing FieldInfo that has no description."""
existing_field = Field(
default="default_name", description=None, json_schema_extra={"position": 99}
)
param = inspect.Parameter(
"name",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=existing_field,
annotation=str,
)
name, type_, field = process_v2_params(
param, position=0, docstrings={"name": "Docstring description"}, aliases={}
)
assert name == "name"
assert field.description == "Docstring description"
def test_process_v2_params_with_existing_fieldinfo_empty_description(self):
"""Test parameter processing with existing FieldInfo that has empty description."""
existing_field = Field(
default="default_name", description="", json_schema_extra={"position": 99}
)
param = inspect.Parameter(
"name",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=existing_field,
annotation=str,
)
name, type_, field = process_v2_params(
param, position=0, docstrings={"name": "Docstring description"}, aliases={}
)
assert name == "name"
assert field.description == "Docstring description"
def test_process_v2_params_with_existing_fieldinfo_required(self):
"""Test parameter processing with existing FieldInfo for a required field."""
existing_field = Field(description="Required field with existing info")
param = inspect.Parameter(
"name",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=existing_field,
annotation=str,
)
name, type_, field = process_v2_params(
param, position=0, docstrings={}, aliases={}
)
assert name == "name"
assert field.default is PydanticUndefined # Required field
assert field.description == "Required field with existing info"
def test_process_v2_params_with_existing_fieldinfo_no_json_schema_extra(self):
"""Test parameter processing with existing FieldInfo that has no json_schema_extra."""
existing_field = Field(
default="default_value", description="Field without json_schema_extra"
)
param = inspect.Parameter(
"name",
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=existing_field,
annotation=str,
)
name, type_, field = process_v2_params(
param, position=5, docstrings={}, aliases={}
)
assert name == "name"
assert field.json_schema_extra == {"position": 5}
| TestProcessV2Params |
python | yandexdataschool__Practical_RL | week05_explore/replay_buffer.py | {
"start": 157,
"end": 2433
} | class ____(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return (
np.array(obses_t),
np.array(actions),
np.array(rewards),
np.array(obses_tp1),
np.array(dones)
)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [
random.randint(0, len(self._storage) - 1)
for _ in range(batch_size)
]
return self._encode_sample(idxes)
| ReplayBuffer |
python | Pylons__pyramid | tests/test_config/test_assets.py | {
"start": 36921,
"end": 37365
} | class ____:
_got_data = _is_package = None
def get_data(self, path):
self._got_data = path
return b'DEADBEEF'
def is_package(self, fullname):
self._is_package = fullname
return True
def get_code(self, fullname):
self._got_code = fullname
return b'DEADBEEF'
def get_source(self, fullname):
self._got_source = fullname
return 'def foo():\n pass'
| DummyLoader |
python | doocs__leetcode | solution/1900-1999/1964.Find the Longest Valid Obstacle Course at Each Position/Solution.py | {
"start": 412,
"end": 793
} | class ____:
def longestObstacleCourseAtEachPosition(self, obstacles: List[int]) -> List[int]:
nums = sorted(set(obstacles))
n = len(nums)
tree = BinaryIndexedTree(n)
ans = []
for x in obstacles:
i = bisect_left(nums, x) + 1
ans.append(tree.query(i) + 1)
tree.update(i, ans[-1])
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/design-a-3d-binary-matrix-with-efficient-layer-tracking.py | {
"start": 2149,
"end": 3268
} | class ____(object):
def __init__(self, n):
"""
:type n: int
"""
self.__matrix = {}
self.__cnt = collections.defaultdict(int)
self.__max_heap = [(0, -(n-1))]
def setCell(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: None
"""
if (x, y, z) in self.__matrix:
return
self.__matrix[x, y, z] = 1
self.__cnt[x] += 1
heapq.heappush(self.__max_heap, (-self.__cnt[x], -x))
def unsetCell(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: None
"""
if (x, y, z) not in self.__matrix:
return
del self.__matrix[x, y, z]
self.__cnt[x] -= 1
heapq.heappush(self.__max_heap, (-self.__cnt[x], -x))
def largestMatrix(self):
"""
:rtype: int
"""
while self.__max_heap and -self.__max_heap[0][0] != self.__cnt[-self.__max_heap[0][1]]:
heapq.heappop(self.__max_heap)
return -self.__max_heap[0][1]
| Matrix3D_2 |
python | pytorch__pytorch | test/inductor/test_cutedsl_grouped_mm.py | {
"start": 662,
"end": 5373
} | class ____(InductorTestCase):
def _get_inputs(
self,
group_size: int,
M_hint: int,
K: int,
N: int,
device: str,
dtype: torch.dtype,
alignment: int = 16,
) -> tuple[Tensor, Tensor, Tensor]:
# --- Random, tile-aligned M sizes ---
M_sizes = (
torch.randint(1, (M_hint // alignment) + 1, (group_size,), dtype=torch.int)
* alignment
)
M_total = torch.sum(M_sizes).item()
# --- Construct input tensors ---
A = torch.randn(int(M_total), K, dtype=dtype, device=device) * 0.1
B = torch.randn((group_size, K, N), dtype=dtype, device=device) * 0.01
# --- Build offsets (no leading zero, strictly increasing) ---
offsets = torch.cumsum(M_sizes, dim=0).to(dtype=torch.int32, device=device)
return (A, B, offsets)
@parametrize("group_size", (2, 8))
@parametrize("M_hint", (256, 1024))
@parametrize("K", (64, 128))
@parametrize("N", (128, 256))
def test_grouped_gemm_basic(self, group_size: int, M_hint: int, K: int, N: int):
device = "cuda"
dtype = torch.bfloat16
A, B, offsets = self._get_inputs(group_size, M_hint, K, N, device, dtype)
def grouped_gemm_fn(A_packed, B_batched, offs):
return F.grouped_mm(A_packed, B_batched, offs=offs)
# Eager execution
c_eager = grouped_gemm_fn(A, B, offsets)
# Test with Cute backend
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTEDSL",
"test_configs.autotune_choice_name_regex": "cutedsl",
"autotune_fallback_to_aten": False,
}
):
grouped_gemm_compiled = torch.compile(
grouped_gemm_fn, backend="inductor", dynamic=False
)
c_compiled = grouped_gemm_compiled(A, B, offsets)
self.assertEqual(c_eager.dtype, dtype)
self.assertEqual(c_compiled.dtype, dtype)
torch.testing.assert_close(c_eager, c_compiled)
@parametrize("layout_A", ("contiguous", "offset", "padded", "view"))
@parametrize("layout_B", ("contiguous", "broadcasted"))
def test_grouped_gemm_assorted_layouts(
self,
layout_A: str,
layout_B: str,
):
device = "cuda"
dtype = torch.bfloat16
G, K, N = 8, 64, 128
M_sizes = [128] * G
sum_M = sum(M_sizes)
offsets = torch.tensor(
[sum(M_sizes[: i + 1]) for i in range(G)], dtype=torch.int32, device=device
)
A_base = torch.randn(sum_M, K, device=device, dtype=dtype)
A = A_base
if layout_A == "offset":
# allocate bigger buffer than needed, use nonzero storage offset
storage = torch.randn(sum_M * K + 512, device=device, dtype=dtype)
offset = 128 # skip first 128 elements
A = torch.as_strided(storage[offset:], (sum_M, K), (K, 1))
elif layout_A == "padded":
# simulate row pitch > K (row_stride = K + pad)
row_pitch = K + 8
storage = torch.randn(sum_M * row_pitch, device=device, dtype=dtype)
A = torch.as_strided(storage, (sum_M, K), (row_pitch, 1))
elif layout_A == "view":
A_storage = torch.randn(sum_M * K, device=device, dtype=dtype)
A = A_storage.view(sum_M, K)
assert A._base is not None
assert A.shape == (sum_M, K)
B = torch.randn((G, K, N), dtype=dtype, device=device) * 0.01
if layout_B == "broadcasted":
# Broadcast B across groups (zero stride along G)
B = B[0].expand(G, K, N)
assert B.stride(0) == 0
def grouped_gemm_fn(A_packed, B_batched, offs):
return F.grouped_mm(A_packed, B_batched, offs=offs)
# --- eager ---
c_eager = grouped_gemm_fn(A, B, offsets)
# --- compiled (CUTE backend) ---
with config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "CUTEDSL",
"test_configs.autotune_choice_name_regex": "cutedsl",
"autotune_fallback_to_aten": False,
}
):
grouped_gemm_compiled = torch.compile(
grouped_gemm_fn, backend="inductor", dynamic=False
)
c_compiled = grouped_gemm_compiled(A, B, offsets)
self.assertEqual(c_eager.dtype, dtype)
self.assertEqual(c_compiled.dtype, dtype)
torch.testing.assert_close(c_eager, c_compiled)
if __name__ == "__main__":
run_tests()
| TestCuTeDSLGroupedGemm |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_captions.py | {
"start": 42558,
"end": 47076
} | class ____(util.MdCase):
"""Test Blocks caption cases with `auto` level."""
extension = ['pymdownx.blocks.caption']
extension_configs = {
'pymdownx.blocks.caption': {
'auto_level': 2,
'prepend': True
}
}
def test_caption(self):
"""Test basic caption with `auto` level and `prepend`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption
This is the caption.
///
''',
R'''
<figure id="__figure-caption_1">
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> This is the caption.</p>
</figcaption>
<p>A paragraph with a caption.</p>
</figure>
''',
True
)
def test_nested_captions(self):
"""Test nested captions with `auto` level and `prepend`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption
Level 3 caption.
///
/// figure-caption
Level 2 caption.
///
/// figure-caption
Level 1 caption.
///
''',
R'''
<figure id="__figure-caption_1">
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p>
</figcaption>
<figure id="__figure-caption_1_1">
<figcaption>
<p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p>
</figcaption>
<figure>
<figcaption>
<p>Level 3 caption.</p>
</figcaption>
<p>A paragraph with a caption.</p>
</figure>
</figure>
</figure>
''',
True
)
def test_nested_consecutive_captions(self):
"""Test nested consecutive captions with `auto` level and `prepend`."""
self.check_markdown(
R'''
A paragraph with a caption.
/// figure-caption
Level 3 caption.
///
/// figure-caption
Level 2 caption.
///
/// figure-caption
Level 1 caption.
///
A paragraph with a caption.
/// figure-caption
Level 2 caption.
///
/// figure-caption
Level 1 caption.
///
''',
R'''
<figure id="__figure-caption_1">
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> Level 1 caption.</p>
</figcaption>
<figure id="__figure-caption_1_1">
<figcaption>
<p><span class="caption-prefix">Figure 1.1.</span> Level 2 caption.</p>
</figcaption>
<figure>
<figcaption>
<p>Level 3 caption.</p>
</figcaption>
<p>A paragraph with a caption.</p>
</figure>
</figure>
</figure>
<figure id="__figure-caption_2">
<figcaption>
<p><span class="caption-prefix">Figure 2.</span> Level 1 caption.</p>
</figcaption>
<figure id="__figure-caption_2_1">
<figcaption>
<p><span class="caption-prefix">Figure 2.1.</span> Level 2 caption.</p>
</figcaption>
<p>A paragraph with a caption.</p>
</figure>
</figure>
''',
True
)
def test_manual_prepend(self):
"""Test manual prepend."""
self.check_markdown(
R"""
Text
/// figure-caption | <
Prepended
///
Text
/// figure-caption | >
Appended
///
""",
R"""
<figure id="__figure-caption_1">
<figcaption>
<p><span class="caption-prefix">Figure 1.</span> Prepended</p>
</figcaption>
<p>Text</p>
</figure>
<figure id="__figure-caption_2">
<p>Text</p>
<figcaption>
<p><span class="caption-prefix">Figure 2.</span> Appended</p>
</figcaption>
</figure>
""",
True
)
| TestBlocksCaptionAutoLevelPrepend |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 31853,
"end": 33233
} | class ____(_SelectIsNotFrom, _CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
_coerce_numerics = True
_coerce_star = True
_guess_straight_column = re.compile(r"^\w\S*$", re.I)
def _raise_for_expected(
self, element, argname=None, resolved=None, *, advice=None, **kw
):
if not advice and isinstance(element, list):
advice = (
f"Did you mean to say select("
f"{', '.join(repr(e) for e in element)})?"
)
return super()._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
def _text_coercion(self, element, argname=None):
element = str(element)
guess_is_literal = not self._guess_straight_column.match(element)
raise exc.ArgumentError(
"Textual column expression %(column)r %(argname)sshould be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity"
% {
"column": util.ellipses_string(element),
"argname": "for argument %s" % (argname,) if argname else "",
"literal_column": (
"literal_column" if guess_is_literal else "column"
),
}
)
| ColumnsClauseImpl |
python | wandb__wandb | wandb/sdk/data_types/helper_types/classes.py | {
"start": 289,
"end": 1906
} | class ____(Media):
_log_type = "classes"
_class_set: Sequence[dict]
def __init__(self, class_set: Sequence[dict]) -> None:
"""Classes is holds class metadata intended to be used in concert with other objects when visualizing artifacts.
Args:
class_set (list): list of dicts in the form of {"id":int|str, "name":str}
"""
super().__init__()
for class_obj in class_set:
assert "id" in class_obj and "name" in class_obj
self._class_set = class_set
@classmethod
def from_json(
cls: Type["Classes"],
json_obj: dict,
source_artifact: Optional["Artifact"],
) -> "Classes":
return cls(json_obj.get("class_set")) # type: ignore
def to_json(self, run_or_artifact: Optional[Union["LocalRun", "Artifact"]]) -> dict:
json_obj = {}
# This is a bit of a hack to allow _ClassesIdType to
# be able to operate fully without an artifact in play.
# In all other cases, artifact should be a true artifact.
if run_or_artifact is not None:
json_obj = super().to_json(run_or_artifact)
json_obj["_type"] = Classes._log_type
json_obj["class_set"] = self._class_set
return json_obj
def get_type(self) -> "_ClassesIdType":
return _ClassesIdType(self)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if isinstance(other, Classes):
return self._class_set == other._class_set
else:
return False
| Classes |
python | scrapy__scrapy | tests/CrawlerRunner/ip_address.py | {
"start": 999,
"end": 1962
} | class ____(Spider):
name = "localhost_spider"
async def start(self):
yield Request(self.url)
def parse(self, response):
netloc = urlparse_cached(response).netloc
host = netloc.split(":")[0]
self.logger.info(f"Host: {host}")
self.logger.info(f"Type: {type(response.ip_address)}")
self.logger.info(f"IP address: {response.ip_address}")
if __name__ == "__main__":
from twisted.internet import reactor
with MockServer() as mock_http_server, MockDNSServer() as mock_dns_server:
port = mock_http_server.http_port
url = f"http://not.a.real.domain:{port}/echo"
servers = [(mock_dns_server.host, mock_dns_server.port)]
reactor.installResolver(createResolver(servers=servers))
configure_logging()
runner = CrawlerRunner()
d = runner.crawl(LocalhostSpider, url=url)
d.addBoth(lambda _: reactor.stop())
reactor.run()
| LocalhostSpider |
python | vyperlang__vyper | vyper/codegen/function_definitions/common.py | {
"start": 2699,
"end": 3271
} | class ____:
func_t: ContractFunctionT
min_calldatasize: int # the min calldata required for this entry point
ir_node: IRnode # the ir for this entry point
def __post_init__(self):
# sanity check ABI v2 properties guaranteed by the spec.
# https://docs.soliditylang.org/en/v0.8.21/abi-spec.html#formal-specification-of-the-encoding states: # noqa: E501
# > Note that for any X, len(enc(X)) is a multiple of 32.
assert self.min_calldatasize >= 4
assert (self.min_calldatasize - 4) % 32 == 0
@dataclass
| EntryPointInfo |
python | requests__requests-oauthlib | tests/test_compliance_fixes.py | {
"start": 5331,
"end": 8114
} | class ____(TestCase):
def setUp(self):
mocker = requests_mock.Mocker()
mocker.post(
"https://slack.com/api/oauth.access",
json={"access_token": "xoxt-23984754863-2348975623103", "scope": "read"},
)
for method in ("GET", "POST"):
mocker.request(
method=method,
url="https://slack.com/api/auth.test",
json={
"ok": True,
"url": "https://myteam.slack.com/",
"team": "My Team",
"user": "cal",
"team_id": "T12345",
"user_id": "U12345",
},
)
mocker.start()
self.addCleanup(mocker.stop)
slack = OAuth2Session("someclientid", redirect_uri="https://i.b")
self.session = slack_compliance_fix(slack)
def test_protected_request(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get("https://slack.com/api/auth.test")
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertNotIn("token", query)
body = response.request.body
data = parse_qs(body)
self.assertEqual(data["token"], ["dummy-access-token"])
def test_protected_request_override_token_get(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get(
"https://slack.com/api/auth.test", data={"token": "different-token"}
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertNotIn("token", query)
body = response.request.body
data = parse_qs(body)
self.assertEqual(data["token"], ["different-token"])
def test_protected_request_override_token_post(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.post(
"https://slack.com/api/auth.test", data={"token": "different-token"}
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertNotIn("token", query)
body = response.request.body
data = parse_qs(body)
self.assertEqual(data["token"], ["different-token"])
def test_protected_request_override_token_url(self):
self.session.token = {"access_token": "dummy-access-token"}
response = self.session.get(
"https://slack.com/api/auth.test?token=different-token"
)
url = response.request.url
query = parse_qs(urlparse(url).query)
self.assertEqual(query["token"], ["different-token"])
self.assertIsNone(response.request.body)
| SlackComplianceFixTest |
python | dagster-io__dagster | .buildkite/buildkite-shared/buildkite_shared/git.py | {
"start": 524,
"end": 1695
} | class ____:
_repositories: set[Path] = set()
all: set[Path] = set()
@classmethod
def load_from_git(cls, git_info: GitInfo) -> None:
# Only do the expensive git diffing once
if git_info.directory in cls._repositories:
return None
original_directory = os.getcwd()
os.chdir(git_info.directory)
subprocess.call(["git", "fetch", "origin", str(git_info.base_branch)])
origin = get_commit(f"origin/{git_info.base_branch}")
head = get_commit("HEAD")
logging.info(
f"Changed files between origin/{git_info.base_branch} ({origin}) and HEAD ({head}):"
)
paths = (
subprocess.check_output(
[
"git",
"diff",
f"origin/{git_info.base_branch}...HEAD",
"--name-only",
]
)
.decode("utf-8")
.strip()
.split("\n")
)
for path in sorted(paths):
logging.info(" - " + path)
cls.all.add(git_info.directory / path)
os.chdir(original_directory)
| ChangedFiles |
python | huggingface__transformers | tests/models/altclip/test_modeling_altclip.py | {
"start": 16674,
"end": 19662
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "BAAI/AltCLIP"
model = AltCLIPModel.from_pretrained(model_name).to(torch_device)
processor = AltCLIPProcessor.from_pretrained(model_name)
image = prepare_img()
inputs = processor(text=["一张猫的照片", "一张狗的照片"], images=image, padding=True, return_tensors="pt").to(torch_device) # fmt: skip
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
probs = outputs.logits_per_image.softmax(dim=1)
expected_probs = torch.tensor([[9.9942e-01, 5.7805e-04]], device=torch_device)
torch.testing.assert_close(probs, expected_probs, rtol=5e-3, atol=5e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model_name = "BAAI/AltCLIP"
model = AltCLIPModel.from_pretrained(model_name).to(torch_device)
image_processor = AltCLIPProcessor.from_pretrained(
model_name, size={"shortest_edge": 180}, crop_size={"height": 180, "width": 180}
)
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
inputs = image_processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device)
# interpolate_pos_encodiung false should return value error
with self.assertRaises(ValueError, msg="doesn't match model"):
with torch.no_grad():
model(**inputs, interpolate_pos_encoding=False)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((1, 145, 1024))
print("nilesh ")
print(outputs.vision_model_output.last_hidden_state.shape)
print(outputs.vision_model_output.last_hidden_state[0, :3, :3])
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.3589, -0.5939, 0.3534], [0.4346, 0.1647, 0.7071], [1.1404, -0.4716, 0.1664]]
).to(torch_device)
torch.testing.assert_close(
outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4
)
| AltCLIPModelIntegrationTest |
python | django__django | tests/auth_tests/test_templatetags.py | {
"start": 188,
"end": 1574
} | class ____(SimpleTestCase):
@override_settings(
PASSWORD_HASHERS=["django.contrib.auth.hashers.PBKDF2PasswordHasher"]
)
def test_valid_password(self):
value = (
"pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5u"
"dm0="
)
hashed_html = (
"<p><strong>algorithm</strong>: <bdi>pbkdf2_sha256</bdi> "
"<strong>iterations</strong>: <bdi>100000</bdi> "
"<strong>salt</strong>: <bdi>a6Pucb******</bdi> "
"<strong>hash</strong>: <bdi>WmCkn9**************************************"
"</bdi></p>"
)
self.assertEqual(render_password_as_hash(value), hashed_html)
def test_invalid_password(self):
expected = (
"<p><strong>Invalid password format or unknown hashing algorithm.</strong>"
"</p>"
)
for value in ["pbkdf2_sh", "md5$password", "invalid", "testhash$password"]:
with self.subTest(value=value):
self.assertEqual(render_password_as_hash(value), expected)
def test_no_password(self):
expected = "<p><strong>No password set.</strong></p>"
for value in ["", None, make_password(None)]:
with self.subTest(value=value):
self.assertEqual(render_password_as_hash(value), expected)
| RenderPasswordAsHashTests |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 50729,
"end": 52034
} | class ____(nn.Module):
def __init__(self, config: Sam3TrackerVideoPromptEncoderConfig):
super().__init__()
self.mask_input_channels = config.mask_input_channels // 4
self.activation = ACT2FN[config.hidden_act]
self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1)
self.layer_norm1 = Sam3TrackerVideoLayerNorm(
self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first"
)
self.layer_norm2 = Sam3TrackerVideoLayerNorm(
self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first"
)
def forward(self, masks):
hidden_states = self.conv1(masks)
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.activation(hidden_states)
dense_embeddings = self.conv3(hidden_states)
return dense_embeddings
| Sam3TrackerVideoMaskEmbedding |
python | encode__starlette | tests/test_applications.py | {
"start": 1710,
"end": 2800
} | class ____(HTTPEndpoint):
def get(self, request: Request) -> PlainTextResponse:
return PlainTextResponse("Hello, world!")
def all_users_page(request: Request) -> PlainTextResponse:
return PlainTextResponse("Hello, everyone!")
def user_page(request: Request) -> PlainTextResponse:
username = request.path_params["username"]
return PlainTextResponse(f"Hello, {username}!")
def custom_subdomain(request: Request) -> PlainTextResponse:
return PlainTextResponse("Subdomain: " + request.path_params["subdomain"])
def runtime_error(request: Request) -> None:
raise RuntimeError()
async def websocket_endpoint(session: WebSocket) -> None:
await session.accept()
await session.send_text("Hello, world!")
await session.close()
async def websocket_raise_websocket_exception(websocket: WebSocket) -> None:
await websocket.accept()
raise WebSocketException(code=status.WS_1003_UNSUPPORTED_DATA)
async def websocket_raise_http_exception(websocket: WebSocket) -> None:
raise HTTPException(status_code=401, detail="Unauthorized")
| Homepage |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 227875,
"end": 229661
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of EnablePullRequestAutoMerge"""
__schema__ = github_schema
__field_names__ = (
"pull_request_id",
"commit_headline",
"commit_body",
"merge_method",
"author_email",
"expected_head_oid",
"client_mutation_id",
)
pull_request_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="pullRequestId")
"""ID of the pull request to enable auto-merge on."""
commit_headline = sgqlc.types.Field(String, graphql_name="commitHeadline")
"""Commit headline to use for the commit when the PR is mergable; if
omitted, a default message will be used. NOTE: when merging with a
merge queue any input value for commit headline is ignored.
"""
commit_body = sgqlc.types.Field(String, graphql_name="commitBody")
"""Commit body to use for the commit when the PR is mergable; if
omitted, a default message will be used. NOTE: when merging with a
merge queue any input value for commit message is ignored.
"""
merge_method = sgqlc.types.Field(PullRequestMergeMethod, graphql_name="mergeMethod")
"""The merge method to use. If omitted, defaults to `MERGE`. NOTE:
when merging with a merge queue any input value for merge method
is ignored.
"""
author_email = sgqlc.types.Field(String, graphql_name="authorEmail")
"""The email address to associate with this merge."""
expected_head_oid = sgqlc.types.Field(GitObjectID, graphql_name="expectedHeadOid")
"""The expected head OID of the pull request."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| EnablePullRequestAutoMergeInput |
python | ray-project__ray | python/ray/serve/tests/unit/test_batching.py | {
"start": 781,
"end": 28490
} | class ____:
def __init__(self):
self.messages = []
def write(self, buf):
self.messages.append(buf)
def reset_message(self):
self.messages = []
# We use a single event loop for the entire test session. Without this
# fixture, the event loop is sometimes prematurely terminated by pytest.
@pytest.fixture(scope="session")
def event_loop():
loop = get_or_create_event_loop()
yield loop
loop.close()
@pytest.mark.asyncio
async def test_decorator_validation():
@serve.batch
async def function():
pass
@serve.batch(max_batch_size=10, batch_wait_timeout_s=1.5)
async def function2():
pass
class Class:
@serve.batch
async def method(self):
pass
class Class2:
@serve.batch(max_batch_size=10, batch_wait_timeout_s=1.5)
async def method(self):
pass
with pytest.raises(TypeError, match="async def"):
@serve.batch
def non_async_function():
pass
with pytest.raises(TypeError, match="async def"):
class NotAsync:
@serve.batch
def method(self, requests):
pass
with pytest.raises(ValueError):
class ZeroBatch:
@serve.batch(max_batch_size=0)
async def method(self, requests):
pass
with pytest.raises(TypeError):
class FloatNonIntBatch:
@serve.batch(max_batch_size=1.1)
async def method(self, requests):
pass
class FloatIntegerBatch:
@serve.batch(max_batch_size=1.0)
async def method(self, requests):
pass
with pytest.raises(ValueError):
class NegativeTimeout:
@serve.batch(batch_wait_timeout_s=-0.1)
async def method(self, requests):
pass
class FloatZeroTimeout:
@serve.batch(batch_wait_timeout_s=0.0)
async def method(self, requests):
pass
class IntZeroTimeout:
@serve.batch(batch_wait_timeout_s=0)
async def method(self, requests):
pass
with pytest.raises(TypeError):
class NonTimeout:
@serve.batch(batch_wait_timeout_s="a")
async def method(self, requests):
pass
@pytest.mark.asyncio
@pytest.mark.parametrize("use_class", [True, False])
async def test_batch_size_one_long_timeout(use_class):
@serve.batch(max_batch_size=1, batch_wait_timeout_s=1000)
async def long_timeout(requests):
if "raise" in requests:
_ = 1 / 0
return requests
class LongTimeout:
@serve.batch(max_batch_size=1, batch_wait_timeout_s=1000)
async def long_timeout(self, requests):
if "raise" in requests:
_ = 1 / 0
return requests
cls = LongTimeout()
async def call(arg):
if use_class:
return await cls.long_timeout(arg)
else:
return await long_timeout(arg)
assert await call("hi") == "hi"
with pytest.raises(ZeroDivisionError):
await call("raise")
@pytest.mark.asyncio
@pytest.mark.parametrize("use_class", [True, False])
async def test_batch_size_multiple_zero_timeout(use_class):
block_execution_event = asyncio.Event()
@serve.batch(max_batch_size=2, batch_wait_timeout_s=0)
async def zero_timeout(requests):
await block_execution_event.wait()
if "raise" in requests:
_ = 1 / 0
return requests
class ZeroTimeout:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=0)
async def zero_timeout(self, requests):
await block_execution_event.wait()
if "raise" in requests:
_ = 1 / 0
return requests
cls = ZeroTimeout()
async def call(arg):
if use_class:
return await cls.zero_timeout(arg)
else:
return await zero_timeout(arg)
block_execution_event.set()
assert await call("hi") == "hi"
with pytest.raises(ZeroDivisionError):
await call("raise")
block_execution_event.clear()
# Check that 2 requests will be executed together if available.
# The first should cause a size-one batch to be executed, then
# the next two should be executed together (signaled by both
# having the exception).
t1 = get_or_create_event_loop().create_task(call("hi1"))
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(t1), timeout=0.001)
t2 = get_or_create_event_loop().create_task(call("hi2"))
t3 = get_or_create_event_loop().create_task(call("raise"))
block_execution_event.set()
assert await t1 == "hi1"
with pytest.raises(ZeroDivisionError):
await t2
with pytest.raises(ZeroDivisionError):
await t3
@pytest.mark.asyncio
async def test_batch_timeout_empty_queue():
"""Check that Serve waits when creating batches.
Serve should wait a full batch_wait_timeout_s after receiving the first
request in the next batch before processing the batch.
"""
@serve.batch(max_batch_size=10, batch_wait_timeout_s=0.25)
async def no_op(requests):
return ["No-op"] * len(requests)
num_iterations = 2
for iteration in range(num_iterations):
tasks = [get_or_create_event_loop().create_task(no_op(None)) for _ in range(9)]
done, _ = await asyncio.wait(tasks, timeout=0.05)
# Due to the long timeout, none of the tasks should finish until a tenth
# request is submitted
assert len(done) == 0
tasks.append(get_or_create_event_loop().create_task(no_op(None)))
done, _ = await asyncio.wait(tasks, timeout=0.05)
# All the timeout tasks should be finished
assert set(tasks) == set(done)
assert all(t.result() == "No-op" for t in tasks)
if iteration < num_iterations - 1:
# Leave queue empty for batch_wait_timeout_s between batches
time.sleep(0.25)
@pytest.mark.asyncio
async def test_batch_wait_queue_exceeds_batch_size_race_condition():
"""Check that the wait queue can exceed the batch size.
This test was added to guard against a race condition documented in
https://github.com/ray-project/ray/pull/42705#discussion_r1466653910.
"""
@serve.batch(max_batch_size=2, batch_wait_timeout_s=10000)
async def no_op(requests):
return ["No-op"] * len(requests)
tasks = [get_or_create_event_loop().create_task(no_op(None)) for _ in range(10)]
# All the tasks should finish.
done, pending = await asyncio.wait(tasks, timeout=0.5)
assert len(done) == len(tasks)
assert len(pending) == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("use_class", [True, False])
async def test_batch_size_multiple_long_timeout(use_class):
@serve.batch(max_batch_size=3, batch_wait_timeout_s=1000)
async def long_timeout(requests):
if "raise" in requests:
_ = 1 / 0
return requests
class LongTimeout:
@serve.batch(max_batch_size=3, batch_wait_timeout_s=1000)
async def long_timeout(self, requests):
if "raise" in requests:
_ = 1 / 0
return requests
cls = LongTimeout()
async def call(arg):
if use_class:
return await cls.long_timeout(arg)
else:
return await long_timeout(arg)
t1 = get_or_create_event_loop().create_task(call("hi1"))
t2 = get_or_create_event_loop().create_task(call("hi2"))
done, pending = await asyncio.wait([t1, t2], timeout=0.1)
assert len(done) == 0
t3 = get_or_create_event_loop().create_task(call("hi3"))
done, pending = await asyncio.wait([t1, t2, t3], timeout=100)
assert set(done) == {t1, t2, t3}
assert [t1.result(), t2.result(), t3.result()] == ["hi1", "hi2", "hi3"]
t1 = get_or_create_event_loop().create_task(call("hi1"))
t2 = get_or_create_event_loop().create_task(call("raise"))
done, pending = await asyncio.wait([t1, t2], timeout=0.1)
assert len(done) == 0
t3 = get_or_create_event_loop().create_task(call("hi3"))
done, pending = await asyncio.wait([t1, t2, t3], timeout=100)
assert set(done) == {t1, t2, t3}
assert all(isinstance(t.exception(), ZeroDivisionError) for t in done)
with pytest.raises(ZeroDivisionError):
t1.result()
with pytest.raises(ZeroDivisionError):
t2.result()
with pytest.raises(ZeroDivisionError):
t3.result()
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["args", "kwargs", "mixed", "out-of-order"])
@pytest.mark.parametrize("use_class", [True, False])
async def test_batch_args_kwargs(mode, use_class):
if use_class:
class MultipleArgs:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def method(self, key1, key2):
return [(key1[i], key2[i]) for i in range(len(key1))]
instance = MultipleArgs()
func = instance.method
else:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def func(key1, key2):
return [(key1[i], key2[i]) for i in range(len(key1))]
if mode == "args":
coros = [func("hi1", "hi2"), func("hi3", "hi4")]
elif mode == "kwargs":
coros = [func(key1="hi1", key2="hi2"), func(key1="hi3", key2="hi4")]
elif mode == "mixed":
coros = [func("hi1", key2="hi2"), func("hi3", key2="hi4")]
elif mode == "out-of-order":
coros = [func(key2="hi2", key1="hi1"), func(key2="hi4", key1="hi3")]
result = await asyncio.gather(*coros)
assert result == [("hi1", "hi2"), ("hi3", "hi4")]
@pytest.mark.asyncio
@pytest.mark.parametrize("use_class", [True, False])
@pytest.mark.parametrize("use_gen", [True, False])
async def test_batch_cancellation(use_class, use_gen):
block_requests = asyncio.Event()
request_was_cancelled = True
async def unary_implementation(key1, key2):
nonlocal block_requests, request_was_cancelled
await block_requests.wait()
request_was_cancelled = False
return [(key1[i], key2[i]) for i in range(len(key1))]
async def streaming_implementation(key1, key2):
nonlocal block_requests, request_was_cancelled
await block_requests.wait()
request_was_cancelled = False
yield [(key1[i], key2[i]) for i in range(len(key1))]
if use_class:
class MultipleArgs:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def unary_method(self, key1, key2):
return await unary_implementation(key1, key2)
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def streaming_method(self, key1, key2):
async for value in streaming_implementation(key1, key2):
yield value
instance = MultipleArgs()
if use_gen:
func = instance.streaming_method
else:
func = instance.unary_method
else:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def unary_func(key1, key2):
return await unary_implementation(key1, key2)
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def streaming_func(key1, key2):
async for value in streaming_implementation(key1, key2):
yield value
if use_gen:
func = streaming_func
else:
func = unary_func
if use_gen:
gens = [func("hi1", "hi2"), func("hi3", "hi4")]
tasks = [asyncio.create_task(gen.__anext__()) for gen in gens]
else:
tasks = [
asyncio.create_task(func("hi1", "hi2")),
asyncio.create_task(func("hi3", "hi4")),
]
print("Submitted requests.")
# The requests should be blocked on the long request_timeout
done, pending = await asyncio.wait(tasks, timeout=0.01)
assert len(done) == 0
assert len(pending) == 2
print("Requests are blocked, as expected.")
# Cancel the first request. The second request should still be blocked on
# the long request_timeout
tasks[0].cancel()
pending, done = await asyncio.wait(tasks, timeout=0.01)
assert len(done) == 1
assert len(pending) == 1
print("Cancelled first request.")
# Cancel the second request. Both requests should be done.
tasks[1].cancel()
done, pending = await asyncio.wait(tasks, timeout=0.01)
assert len(done) == 2
assert len(pending) == 0
print("Cancelled second request. Sending new requests with no timeout.")
# Sanity check that the request was actually cancelled.
assert request_was_cancelled
# Unblock requests. The requests should succeed.
block_requests.set()
if use_gen:
gens = [func("hi1", "hi2"), func("hi3", "hi4")]
tasks = [asyncio.create_task(gen.__anext__()) for gen in gens]
else:
tasks = [
asyncio.create_task(func("hi1", "hi2")),
asyncio.create_task(func("hi3", "hi4")),
]
result = await asyncio.gather(*tasks)
assert result == [("hi1", "hi2"), ("hi3", "hi4")]
@pytest.mark.asyncio
@pytest.mark.parametrize("use_class", [True, False])
@pytest.mark.parametrize("use_gen", [True, False])
async def test_cancellation_after_error(use_class, use_gen):
"""Cancelling a request after it errors should be supported."""
raise_error = asyncio.Event()
async def unary_implementation(key1, key2):
if not raise_error.is_set():
raise ValueError()
return [(key1[i], key2[i]) for i in range(len(key1))]
async def streaming_implementation(key1, key2):
if not raise_error.is_set():
raise ValueError()
yield [(key1[i], key2[i]) for i in range(len(key1))]
if use_class:
class MultipleArgs:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def unary_method(self, key1, key2):
return await unary_implementation(key1, key2)
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def streaming_method(self, key1, key2):
async for value in streaming_implementation(key1, key2):
yield value
instance = MultipleArgs()
if use_gen:
func = instance.streaming_method
else:
func = instance.unary_method
else:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def unary_func(key1, key2):
return await unary_implementation(key1, key2)
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def streaming_func(key1, key2):
async for value in streaming_implementation(key1, key2):
yield value
if use_gen:
func = streaming_func
else:
func = unary_func
# Submit requests and then cancel them.
if use_gen:
gens = [func("hi1", "hi2"), func("hi3", "hi4")]
tasks = [asyncio.create_task(gen.__anext__()) for gen in gens]
else:
tasks = [
asyncio.create_task(func("hi1", "hi2")),
asyncio.create_task(func("hi3", "hi4")),
]
print("Submitted initial batch of requests.")
for task in tasks:
task.cancel()
print("Closed initial batch of requests.")
raise_error.set()
# Submit requests and check that they still work.
if use_gen:
gens = [func("hi1", "hi2"), func("hi3", "hi4")]
tasks = [asyncio.create_task(gen.__anext__()) for gen in gens]
else:
tasks = [
asyncio.create_task(func("hi1", "hi2")),
asyncio.create_task(func("hi3", "hi4")),
]
print("Submitted new batch of requests.")
result = await asyncio.gather(*tasks)
assert result == [("hi1", "hi2"), ("hi3", "hi4")]
@pytest.mark.asyncio
@pytest.mark.parametrize("use_class", [True, False])
async def test_batch_setters(use_class):
if use_class:
class C:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def method(self, key1, key2):
return [(key1[i], key2[i]) for i in range(len(key1))]
instance = C()
func = instance.method
else:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def func(key1, key2):
return [(key1[i], key2[i]) for i in range(len(key1))]
assert func._get_max_batch_size() == 2
assert func._get_batch_wait_timeout_s() == 1000
# @serve.batch should create batches of size 2
tasks = [
get_or_create_event_loop().create_task(func("hi1", "hi2")),
get_or_create_event_loop().create_task(func("hi3", "hi4")),
]
done, pending = await asyncio.wait(tasks, timeout=0.1)
assert len(pending) == 0
assert {task.result() for task in done} == {("hi1", "hi2"), ("hi3", "hi4")}
# Set new values
func.set_max_batch_size(3)
func.set_batch_wait_timeout_s(15000)
assert func._get_max_batch_size() == 3
assert func._get_batch_wait_timeout_s() == 15000
# @serve.batch should create batches of size 3
tasks = [
get_or_create_event_loop().create_task(func("hi1", "hi2")),
get_or_create_event_loop().create_task(func("hi3", "hi4")),
get_or_create_event_loop().create_task(func("hi5", "hi6")),
]
done, pending = await asyncio.wait(tasks, timeout=0.1)
assert len(pending) == 0
assert {task.result() for task in done} == {
("hi1", "hi2"),
("hi3", "hi4"),
("hi5", "hi6"),
}
@pytest.mark.asyncio
async def test_batch_use_earliest_setters():
"""@serve.batch should use the right settings when constructing a batch.
When the @serve.batch setters get called before a batch has started
accumulating, the next batch should use the setters' values. When they
get called while a batch is accumulating, the previous values should be
used.
"""
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def func(key1, key2):
return [(key1[i], key2[i]) for i in range(len(key1))]
assert func._get_max_batch_size() == 2
assert func._get_batch_wait_timeout_s() == 1000
# Set new values
func.set_max_batch_size(3)
func.set_batch_wait_timeout_s(15000)
assert func._get_max_batch_size() == 3
assert func._get_batch_wait_timeout_s() == 15000
# Should create batches of size 3, even if setters are called while
# batch is accumulated
tasks = [
get_or_create_event_loop().create_task(func("hi1", "hi2")),
get_or_create_event_loop().create_task(func("hi3", "hi4")),
]
# Batch should be waiting for last request
done, pending = await asyncio.wait(tasks, timeout=0.1)
assert len(done) == 0 and len(pending) == 2
func.set_max_batch_size(1)
func.set_batch_wait_timeout_s(0)
assert func._get_max_batch_size() == 1
assert func._get_batch_wait_timeout_s() == 0
# Batch should still be waiting for last request
done, pending = await asyncio.wait(pending, timeout=0.1)
assert len(done) == 0 and len(pending) == 2
# Batch should execute after last request
pending.add(get_or_create_event_loop().create_task(func("hi5", "hi6")))
done, pending = await asyncio.wait(pending, timeout=0.1)
assert len(pending) == 0
assert {task.result() for task in done} == {
("hi1", "hi2"),
("hi3", "hi4"),
("hi5", "hi6"),
}
# Next batch should use updated values
tasks = [get_or_create_event_loop().create_task(func("hi1", "hi2"))]
done, pending = await asyncio.wait(tasks, timeout=0.1)
assert len(done) == 1 and len(pending) == 0
assert done.pop().result() == ("hi1", "hi2")
@pytest.mark.asyncio
@pytest.mark.parametrize("mode", ["args", "kwargs", "mixed", "out-of-order"])
@pytest.mark.parametrize("use_class", [True, False])
@pytest.mark.parametrize("generator_length", [0, 2, 5])
async def test_batch_generator_basic(mode, use_class, generator_length):
if use_class:
class MultipleArgs:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def method(self, key1, key2):
for gen_idx in range(generator_length):
yield [(gen_idx, key1[i], key2[i]) for i in range(len(key1))]
instance = MultipleArgs()
func = instance.method
else:
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def func(key1, key2):
for gen_idx in range(generator_length):
yield [(gen_idx, key1[i], key2[i]) for i in range(len(key1))]
if mode == "args":
generators = [func("hi1", "hi2"), func("hi3", "hi4")]
elif mode == "kwargs":
generators = [func(key1="hi1", key2="hi2"), func(key1="hi3", key2="hi4")]
elif mode == "mixed":
generators = [func("hi1", key2="hi2"), func("hi3", key2="hi4")]
elif mode == "out-of-order":
generators = [func(key2="hi2", key1="hi1"), func(key2="hi4", key1="hi3")]
results = [
[result async for result in generators[0]],
[result async for result in generators[1]],
]
assert results == [
[(gen_idx, "hi1", "hi2") for gen_idx in range(generator_length)],
[(gen_idx, "hi3", "hi4") for gen_idx in range(generator_length)],
]
@pytest.mark.asyncio
@pytest.mark.parametrize("error_type", ["runtime_error", "mismatched_lengths"])
async def test_batch_generator_exceptions(error_type):
GENERATOR_LENGTH = 5
ERROR_IDX = 2
ERROR_MSG = "Testing error"
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def func(key1, key2):
for gen_idx in range(GENERATOR_LENGTH):
results = [(gen_idx, key1[i], key2[i]) for i in range(len(key1))]
if gen_idx == ERROR_IDX:
if error_type == "runtime_error":
raise RuntimeError(ERROR_MSG)
elif error_type == "mismatched_lengths":
yield results * 2
yield results
generators = [func("hi1", "hi2"), func("hi3", "hi4")]
for generator in generators:
for _ in range(ERROR_IDX):
await generator.__anext__()
if error_type == "runtime_error":
with pytest.raises(RuntimeError, match=ERROR_MSG):
await generator.__anext__()
elif error_type == "mismatched_lengths":
with pytest.raises(RayServeException):
await generator.__anext__()
with pytest.raises(StopAsyncIteration):
await generator.__anext__()
@pytest.mark.asyncio
@pytest.mark.parametrize("stop_token", [StopAsyncIteration, StopIteration])
async def test_batch_generator_early_termination(stop_token):
NUM_CALLERS = 4
event = asyncio.Event()
@serve.batch(max_batch_size=NUM_CALLERS, batch_wait_timeout_s=1000)
async def sequential_terminator(ids: List[int]):
"""Terminates callers one-after-another in order of call."""
for num_finished_callers in range(1, NUM_CALLERS + 1):
event.clear()
responses = [stop_token for _ in range(num_finished_callers)]
responses += [ids[idx] for idx in range(num_finished_callers, NUM_CALLERS)]
yield responses
await event.wait()
ids = list(range(NUM_CALLERS))
generators = [sequential_terminator(id) for id in ids]
for id, generator in zip(ids, generators):
async for result in generator:
assert result == id
# Each terminated caller frees the sequential_terminator to process
# another iteration.
event.set()
@pytest.mark.asyncio
async def test_batch_generator_setters():
"""@serve.batch setters should succeed while the current batch streams."""
@serve.batch(max_batch_size=2, batch_wait_timeout_s=1000)
async def yield_three_times(key1, key2):
for _ in range(3):
yield [(key1[i], key2[i]) for i in range(len(key1))]
assert yield_three_times._get_max_batch_size() == 2
assert yield_three_times._get_batch_wait_timeout_s() == 1000
args_list = [("hi1", "hi2"), ("hi3", "hi4")]
coros = [yield_three_times(*args) for args in args_list]
# Partially consume generators
for coro, expected_result in zip(coros, args_list):
for _ in range(2):
await coro.__anext__() == expected_result
# Set new values
yield_three_times.set_max_batch_size(3)
yield_three_times.set_batch_wait_timeout_s(15000)
assert yield_three_times._get_max_batch_size() == 3
assert yield_three_times._get_batch_wait_timeout_s() == 15000
# Execute three more requests
args_list_2 = [("hi1", "hi2"), ("hi3", "hi4"), ("hi5", "hi6")]
coros_2 = [yield_three_times(*args) for args in args_list_2]
# Finish consuming original requests
for coro, expected_result in zip(coros, args_list):
await coro.__anext__() == expected_result
with pytest.raises(StopAsyncIteration):
await coro.__anext__()
# Consume new requests
for coro, expected_result in zip(coros_2, args_list_2):
for _ in range(3):
await coro.__anext__() == expected_result
with pytest.raises(StopAsyncIteration):
await coro.__anext__()
def test_warn_if_max_batch_size_exceeds_max_ongoing_requests():
"""Test warn_if_max_batch_size_exceeds_max_ongoing_requests() logged the warning
message correctly.
When the queue starts with or updated `max_batch_size` to be larger than
max_ongoing_requests, log the warning to suggest configuring `max_ongoing_requests`.
When the queue starts with or updated `max_batch_size` to be smaller or equal than
max_ongoing_requests, no warning should be logged.
"""
logger = logging.getLogger(SERVE_LOGGER_NAME)
stream = FakeStream()
stream_handler = logging.StreamHandler(stream)
logger.addHandler(stream_handler)
bound = default_deployment_config.max_ongoing_requests
over_bound = bound + 1
under_bound = bound - 1
over_bound_warning_message = (
f"`max_batch_size` ({over_bound}) * `max_concurrent_batches` "
f"({1}) is larger than `max_ongoing_requests` "
f"({bound}). This means the replica will never achieve "
"the configured `max_batch_size` concurrently. Please update "
"`max_ongoing_requests` to be >= `max_batch_size` * `max_concurrent_batches`.\n"
)
# Start queue above the bound will log warning. Start at under or at the bound will
# not log warning
for max_batch_size in [over_bound, under_bound, bound]:
queue = _BatchQueue(
max_batch_size=max_batch_size,
batch_wait_timeout_s=1000,
max_concurrent_batches=1,
)
if max_batch_size > bound:
assert over_bound_warning_message in stream.messages
else:
assert over_bound_warning_message not in stream.messages
stream.reset_message()
# Update queue above the bound will log warning. Update at under or at the bound
# will not log warning
for max_batch_size in [over_bound, under_bound, bound]:
queue.set_max_batch_size(max_batch_size)
if max_batch_size > bound:
assert over_bound_warning_message in stream.messages
else:
assert over_bound_warning_message not in stream.messages
stream.reset_message()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| FakeStream |
python | pennersr__django-allauth | allauth/socialaccount/providers/untappd/provider.py | {
"start": 626,
"end": 1729
} | class ____(OAuth2Provider):
id = "untappd"
name = "Untappd"
account_class = UntappdAccount
oauth2_adapter_class = UntappdOAuth2Adapter
def get_auth_params_from_request(self, request, action):
params = super().get_auth_params_from_request(request, action)
# Untappd uses redirect_url instead of redirect_uri
params["redirect_url"] = request.build_absolute_uri(
reverse(self.id + "_callback")
)
return params
def extract_uid(self, data):
return str(data["response"]["user"]["uid"])
def extract_common_fields(self, data):
user = data["response"]["user"]
return dict(
username=user["user_name"],
name=user["first_name"] + " " + user["last_name"],
)
def extract_email_addresses(self, data):
ret = [
EmailAddress(
email=data["response"]["user"]["settings"]["email_address"],
verified=True,
primary=True,
)
]
return ret
provider_classes = [UntappdProvider]
| UntappdProvider |
python | davidhalter__jedi | jedi/inference/context.py | {
"start": 5507,
"end": 6828
} | class ____(AbstractContext):
"""
Should be defined, otherwise the API returns empty types.
"""
def __init__(self, value):
super().__init__(value.inference_state)
self._value = value
@property
def tree_node(self):
return self._value.tree_node
@property
def parent_context(self):
return self._value.parent_context
def is_module(self):
return self._value.is_module()
def is_builtins_module(self):
return self._value == self.inference_state.builtins_module
def is_class(self):
return self._value.is_class()
def is_stub(self):
return self._value.is_stub()
def is_instance(self):
return self._value.is_instance()
def is_compiled(self):
return self._value.is_compiled()
def is_bound_method(self):
return self._value.is_bound_method()
def py__name__(self):
return self._value.py__name__()
@property
def name(self):
return self._value.name
def get_qualified_names(self):
return self._value.get_qualified_names()
def py__doc__(self):
return self._value.py__doc__()
def get_value(self):
return self._value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._value)
| ValueContext |
python | realpython__materials | inheritance-and-composition/composition/employees.py | {
"start": 108,
"end": 1074
} | class ____:
def __init__(self):
self._employees = [
{"id": 1, "name": "Mary Poppins", "role": "manager"},
{"id": 2, "name": "John Smith", "role": "secretary"},
{"id": 3, "name": "Kevin Bacon", "role": "sales"},
{"id": 4, "name": "Jane Doe", "role": "factory"},
{"id": 5, "name": "Robin Williams", "role": "secretary"},
]
self.productivity = ProductivitySystem()
self.payroll = PayrollSystem()
self.employee_addresses = AddressBook()
@property
def employees(self):
return [self._create_employee(**data) for data in self._employees]
def _create_employee(self, id, name, role):
address = self.employee_addresses.get_employee_address(id)
employee_role = self.productivity.get_role(role)
payroll_policy = self.payroll.get_policy(id)
return Employee(id, name, address, employee_role, payroll_policy)
| EmployeeDatabase |
python | walkccc__LeetCode | solutions/444. Sequence Reconstruction/444.py | {
"start": 0,
"end": 919
} | class ____:
def sequenceReconstruction(
self,
org: list[int],
seqs: list[list[int]],
) -> bool:
if not seqs:
return False
n = len(org)
graph = [[] for _ in range(n)]
inDegrees = [0] * n
# Build the graph.
for seq in seqs:
if len(seq) == 1 and seq[0] < 1 or seq[0] > n:
return False
for u, v in zip(seq, seq[1:]):
if u < 1 or u > n or v < 1 or v > n:
return False
graph[u - 1].append(v - 1)
inDegrees[v - 1] += 1
# Perform topological sorting.
q = collections.deque([i for i, d in enumerate(inDegrees) if d == 0])
i = 0 # org's index
while q:
if len(q) > 1:
return False
u = q.popleft()
if u != org[i] - 1:
return False
i += 1
for v in graph[u]:
inDegrees[v] -= 1
if inDegrees[v] == 0:
q.append(v)
return i == n
| Solution |
python | facebook__pyre-check | client/commands/profile.py | {
"start": 2011,
"end": 3511
} | class ____(Event):
description: Final[Optional[str]]
def _parse_tags(input: List[List[str]]) -> Dict[str, str]:
return {key: value for [key, value] in input}
def _parse_metadata(input_json: Dict[str, Any]) -> EventMetadata:
pid = input_json["pid"]
return EventMetadata(
name=input_json["name"],
worker_id=input_json.get("worker_id", pid),
pid=pid,
timestamp=input_json["timestamp"],
tags=_parse_tags(input_json.get("tags", [])),
)
def parse_event(input_string: str) -> Event:
input_json: Dict[str, Any] = json.loads(input_string)
event_type = input_json["event_type"]
metadata = _parse_metadata(input_json)
if event_type[0] == "Duration":
duration = event_type[1]
return DurationEvent(duration=duration, metadata=metadata)
elif event_type[0] == "Counter":
description = None if len(event_type) <= 1 else event_type[1]
return CounterEvent(description=description, metadata=metadata)
else:
raise ValueError(f"Unrecognized event type: {input}")
def parse_events(input_string: str) -> List[Event]:
output: List[Event] = []
for index, line in enumerate(input_string.splitlines()):
try:
line = line.strip()
if len(line) == 0:
continue
output.append(parse_event(line))
except Exception:
raise RuntimeError(f"Malformed log entry detected on line {index + 1}")
return output
| CounterEvent |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 68,
"end": 129
} | class ____:
"""Just a docstring."""
| ClassWithJustTheDocstring |
python | fastai__fastai | fastai/torch_core.py | {
"start": 5476,
"end": 15023
} | class ____(ArrayImageBase):
"An array representing an image mask"
_show_args = {'alpha':0.5, 'cmap':'tab20', 'interpolation':'nearest'}
# %% ../nbs/00_torch_core.ipynb 37
@patch
def __array_eq__(self:Tensor,b):
return torch.equal(self,b) if self.dim() else self==b
# %% ../nbs/00_torch_core.ipynb 38
def _array2tensor(x, requires_grad=False, pin_memory=False, **kwargs):
if x.dtype==np.uint16: x = x.astype(np.float32)
# windows default numpy int dtype is int32, while torch tensor default int dtype is int64
# https://github.com/numpy/numpy/issues/9464
if sys.platform == "win32" and x.dtype==int: x = x.astype(np.int64)
t = torch.as_tensor(x, **kwargs)
t.requires_grad_(requires_grad)
if pin_memory: t.pin_memory()
return t
# %% ../nbs/00_torch_core.ipynb 39
@use_kwargs_dict(dtype=None, device=None, requires_grad=False, pin_memory=False)
def tensor(x, *rest, **kwargs):
"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly."
if len(rest): x = (x,)+rest
# There was a Pytorch bug in dataloader using num_workers>0. Haven't confirmed if fixed
# if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)
res = (x if isinstance(x, Tensor)
else torch.tensor(x, **kwargs) if isinstance(x, (tuple,list,numbers.Number))
else _array2tensor(x, **kwargs) if isinstance(x, ndarray)
else as_tensor(x.values, **kwargs) if isinstance(x, (pd.Series, pd.DataFrame))
# else as_tensor(array(x, **kwargs)) if hasattr(x, '__array__') or is_iter(x)
else _array2tensor(array(x), **kwargs))
if res.dtype is torch.float64: return res.float()
return res
# %% ../nbs/00_torch_core.ipynb 42
def set_seed(s, reproducible=False):
"Set random seed for `random`, `torch`, and `numpy` (where available)"
try: torch.manual_seed(s)
except NameError: pass
try: torch.cuda.manual_seed_all(s)
except NameError: pass
try: np.random.seed(s%(2**32-1))
except NameError: pass
random.seed(s)
if reproducible:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# %% ../nbs/00_torch_core.ipynb 47
def get_random_states():
"Gets states for `random`, `torch`, and `numpy` random number generators"
return {'random_state':random.getstate(),
'numpy_state':np.random.get_state(),
'torch_state':torch.get_rng_state(),
'torch_cuda_state':torch.cuda.get_rng_state_all(),
'torch_deterministic':torch.backends.cudnn.deterministic,
'torch_benchmark':torch.backends.cudnn.benchmark}
# %% ../nbs/00_torch_core.ipynb 48
def set_random_states(random_state,numpy_state,torch_state,torch_cuda_state,torch_deterministic,torch_benchmark):
"Set states for `random`, `torch`, and `numpy` random number generators"
random.setstate(random_state)
np.random.set_state(numpy_state)
torch.set_rng_state(torch_state)
torch.cuda.set_rng_state_all(torch_cuda_state)
torch.backends.cudnn.deterministic=torch_deterministic
torch.backends.cudnn.benchmark=torch_benchmark
# %% ../nbs/00_torch_core.ipynb 53
@contextmanager
def no_random(seed=42,reproducible=True):
"Stores and retrieves state of random number generators. Sets random seed for `random`, `torch`, and `numpy`."
states = get_random_states()
set_seed(seed,reproducible=reproducible)
try:
yield #we are managing global variables
finally:
set_random_states(**states)
# %% ../nbs/00_torch_core.ipynb 59
def unsqueeze(x, dim=-1, n=1):
"Same as `torch.unsqueeze` but can add `n` dims"
for _ in range(n): x = x.unsqueeze(dim)
return x
# %% ../nbs/00_torch_core.ipynb 61
def unsqueeze_(x, dim=-1, n=1):
"Same as `torch.unsqueeze_` but can add `n` dims"
for _ in range(n): x.unsqueeze_(dim)
return x
# %% ../nbs/00_torch_core.ipynb 63
def _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))
def _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))
# %% ../nbs/00_torch_core.ipynb 64
def apply(func, x, *args, **kwargs):
"Apply `func` recursively to `x`, passing on args"
if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])
if isinstance(x,(dict,MutableMapping)): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}
res = func(x, *args, **kwargs)
return res if x is None else retain_type(res, x)
# %% ../nbs/00_torch_core.ipynb 65
def maybe_gather(x, axis=0):
"Gather copies of `x` on `axis` (if training is distributed)"
if num_distrib()<=1: return x
ndim = x.ndim
res = [x.new_zeros(*x.shape if ndim > 0 else (1,)) for _ in range(num_distrib())]
torch.distributed.all_gather(res, x.contiguous() if ndim > 0 else x[None])
return torch.cat(res, dim=axis) if ndim > 0 else torch.cat(res, dim=axis).mean()
# %% ../nbs/00_torch_core.ipynb 66
def to_detach(b, cpu=True, gather=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
def _inner(x, cpu=True, gather=True):
if not isinstance(x,Tensor): return x
x = x.detach()
if gather: x = maybe_gather(x)
return x.cpu() if cpu else x
return apply(_inner, b, cpu=cpu, gather=gather)
# %% ../nbs/00_torch_core.ipynb 68
def to_half(b):
"Recursively map floating point tensors in `b ` to FP16."
return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)
# %% ../nbs/00_torch_core.ipynb 69
def to_float(b):
"Recursively map floating point tensors in `b ` to float."
return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)
# %% ../nbs/00_torch_core.ipynb 70
# None: True if available; True: error if not available; False: use CPU
defaults.use_cuda = None
# %% ../nbs/00_torch_core.ipynb 71
def _has_mps():
if nested_attr(torch, 'backends.mps.is_available', noop)(): return True
return nested_attr(torch, 'backends.mps.is_built', False)()
def default_device(use=-1):
"Return or set default device; `use_cuda`: -1 - CUDA/mps if available; True - error if not available; False - CPU"
if use == -1: use = defaults.use_cuda
else: defaults.use_cuda=use
if use is None:
if torch.cuda.is_available() or _has_mps(): use = True
if use:
if torch.cuda.is_available(): return torch.device(torch.cuda.current_device())
if _has_mps(): return torch.device('mps')
return torch.device('cpu')
# %% ../nbs/00_torch_core.ipynb 73
def to_device(b, device=None, non_blocking=False):
"Recursively put `b` on `device`."
if defaults.use_cuda==False: device='cpu'
elif device is None: device=default_device()
def _inner(o):
# ToDo: add TensorDict when released
if isinstance(o,Tensor): return o.to(device, non_blocking=non_blocking)
return o
return apply(_inner, b)
# %% ../nbs/00_torch_core.ipynb 76
def to_cpu(b):
"Recursively map tensors in `b ` to the cpu."
return to_device(b,'cpu')
# %% ../nbs/00_torch_core.ipynb 78
def to_np(x):
"Convert a tensor to a numpy array."
return apply(lambda o: o.data.cpu().numpy(), x)
# %% ../nbs/00_torch_core.ipynb 80
def to_concat(xs, dim=0):
"Concat the element in `xs` (recursively if they are tuples/lists of tensors)"
if not xs: return xs
if is_listy(xs[0]): return type(xs[0])([to_concat([x[i] for x in xs], dim=dim) for i in range_of(xs[0])])
if isinstance(xs[0],dict): return {k: to_concat([x[k] for x in xs], dim=dim) for k in xs[0].keys()}
#We may receive xs that are not concatenable (inputs of a text classifier for instance),
# in this case we return a big list
try: return retain_type(torch.cat(xs, dim=dim), xs[0])
except: return sum([L(retain_type(o_.index_select(dim, tensor(i)).squeeze(dim), xs[0])
for i in range_of(o_)) for o_ in xs], L())
# %% ../nbs/00_torch_core.ipynb 84
# Parsed PyTorch versions for faster version checking
_torch_version = parse(torch.__version__)
_torch_20 = parse('2.0')
_torch_113 = parse('1.13')
_torch_112 = parse('1.12')
# %% ../nbs/00_torch_core.ipynb 85
@patch
def set_meta(self:Tensor, x, as_copy=False):
"Set all metadata in `__dict__`"
if not hasattr(x,'__dict__'): return
# XXX: change to `deepcopy` once PyTorch 1.7.1 is out, and check nb 23 segmentation fit works
self.__dict__ = copy(x.__dict__) if as_copy else x.__dict__
# %% ../nbs/00_torch_core.ipynb 86
if not hasattr(torch,'as_subclass'): torch.as_subclass = torch.Tensor.as_subclass
# %% ../nbs/00_torch_core.ipynb 87
@patch
def as_subclass(self:Tensor, typ):
"Cast to `typ` and include `__dict__` and meta"
return retain_meta(self, torch.as_subclass(self, typ))
# %% ../nbs/00_torch_core.ipynb 90
def _torch_handled(args, opt, func):
if func not in opt: return False
for oks in opt[func]:
if all(isinstance(arg,ok) for arg,ok in zip(args,oks) if ok): return True
# %% ../nbs/00_torch_core.ipynb 91
# from https://github.com/pytorch/pytorch/blob/13c975684a220ec096216ec6468ccd0dc90ff50a/torch/_tensor.py#L34
def _rebuild_from_type(func, type, args, dict):
ret = func(*args).as_subclass(type)
ret.__dict__ = dict
return ret
# %% ../nbs/00_torch_core.ipynb 92
def _find_args(x):
x0 = x[0] if is_listy(x[0]) and x[0] else x
return [a for a in x0 if hasattr(a,'__dict__')]
# %% ../nbs/00_torch_core.ipynb 93
| ArrayMask |
python | django__django | tests/modeladmin/tests.py | {
"start": 34455,
"end": 39849
} | class ____(SimpleTestCase):
class MockUser:
def has_module_perms(self, app_label):
return app_label == "modeladmin"
class MockViewUser(MockUser):
def has_perm(self, perm, obj=None):
return perm == "modeladmin.view_band"
class MockAddUser(MockUser):
def has_perm(self, perm, obj=None):
return perm == "modeladmin.add_band"
class MockChangeUser(MockUser):
def has_perm(self, perm, obj=None):
return perm == "modeladmin.change_band"
class MockDeleteUser(MockUser):
def has_perm(self, perm, obj=None):
return perm == "modeladmin.delete_band"
def test_has_view_permission(self):
"""
has_view_permission() returns True for users who can view objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockViewUser()
self.assertIs(ma.has_view_permission(request), True)
request.user = self.MockAddUser()
self.assertIs(ma.has_view_permission(request), False)
request.user = self.MockChangeUser()
self.assertIs(ma.has_view_permission(request), True)
request.user = self.MockDeleteUser()
self.assertIs(ma.has_view_permission(request), False)
def test_has_add_permission(self):
"""
has_add_permission returns True for users who can add objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockViewUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_inline_has_add_permission_uses_obj(self):
class ConcertInline(TabularInline):
model = Concert
def has_add_permission(self, request, obj):
return bool(obj)
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertEqual(ma.get_inline_instances(request), [])
band = Band(name="The Doors", bio="", sign_date=date(1965, 1, 1))
inline_instances = ma.get_inline_instances(request, band)
self.assertEqual(len(inline_instances), 1)
self.assertIsInstance(inline_instances[0], ConcertInline)
def test_has_change_permission(self):
"""
has_change_permission returns True for users who can edit objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockViewUser()
self.assertIs(ma.has_change_permission(request), False)
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
has_delete_permission returns True for users who can delete objects and
False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockViewUser()
self.assertIs(ma.has_delete_permission(request), False)
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
as_module_permission returns True for users who have any permission
for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockViewUser()
self.assertIs(ma.has_module_permission(request), True)
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = "anotherapp"
try:
request.user = self.MockViewUser()
self.assertIs(ma.has_module_permission(request), False)
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| ModelAdminPermissionTests |
python | kamyu104__LeetCode-Solutions | Python/valid-word-abbreviation.py | {
"start": 29,
"end": 693
} | class ____(object):
def validWordAbbreviation(self, word, abbr):
"""
:type word: str
:type abbr: str
:rtype: bool
"""
i , digit = 0, 0
for c in abbr:
if c.isdigit():
if digit == 0 and c == '0':
return False
digit *= 10
digit += int(c)
else:
if digit:
i += digit
digit = 0
if i >= len(word) or word[i] != c:
return False
i += 1
if digit:
i += digit
return i == len(word)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/saved_model/model_utils/export_output.py | {
"start": 3249,
"end": 6664
} | class ____(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, tensor.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, tensor.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('Cannot create a ClassificationOutput with empty '
'arguments. At least one of `scores` and `classes` '
'must be defined.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError(
'Classification signatures can only accept a single tensor input of '
'type tf.string. Please check to make sure that you have structured '
'the serving_input_receiver_fn so that it creates a single string '
'placeholder. If your model function expects multiple inputs, then '
'use `tf.io.parse_example()` to parse the string into multiple '
f'tensors.\n Received: {receiver_tensors}')
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError(
'Classification signatures can only accept a single tensor input of '
'type tf.string. Please check to make sure that you have structured '
'the serving_input_receiver_fn so that it creates a single string '
'placeholder. If your model function expects multiple inputs, then '
'use `tf.io.parse_example()` to parse the string into multiple '
f'tensors.\n Received: {receiver_tensors}')
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
| ClassificationOutput |
python | django__django | tests/backends/mysql/test_introspection.py | {
"start": 171,
"end": 1399
} | class ____(TestCase):
def test_parse_constraint_columns(self):
_parse_constraint_columns = connection.introspection._parse_constraint_columns
tests = (
("`height` >= 0", ["height"], ["height"]),
("`cost` BETWEEN 1 AND 10", ["cost"], ["cost"]),
("`ref1` > `ref2`", ["id", "ref1", "ref2"], ["ref1", "ref2"]),
(
"`start` IS NULL OR `end` IS NULL OR `start` < `end`",
["id", "start", "end"],
["start", "end"],
),
("JSON_VALID(`json_field`)", ["json_field"], ["json_field"]),
("CHAR_LENGTH(`name`) > 2", ["name"], ["name"]),
("lower(`ref1`) != 'test'", ["id", "owe", "ref1"], ["ref1"]),
("lower(`ref1`) != 'test'", ["id", "lower", "ref1"], ["ref1"]),
("`name` LIKE 'test%'", ["name"], ["name"]),
)
for check_clause, table_columns, expected_columns in tests:
with self.subTest(check_clause):
check_columns = _parse_constraint_columns(check_clause, table_columns)
self.assertEqual(list(check_columns), expected_columns)
@skipUnless(connection.vendor == "mysql", "MySQL tests")
| ParsingTests |
python | getlogbook__logbook | tests/test_deadlock.py | {
"start": 250,
"end": 928
} | class ____:
def __init__(self):
self._acquired = False
self._deadlock_occurred = False
def acquire(self):
if self._acquired:
self._deadlock_occurred = True
self._acquired = True
def release(self):
self._acquired = False
def test_deadlock_in_emit():
logbook_logger = logbook.Logger("logbook")
obj = MyObject(logbook_logger.info)
stream_handler = logbook.StreamHandler(stream=sys.stderr, level=logbook.DEBUG)
stream_handler.lock = FakeLock()
with stream_handler.applicationbound():
logbook_logger.info("format this: {}", obj)
assert not stream_handler.lock._deadlock_occurred
| FakeLock |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.