language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | realpython__materials | django-diary/source_code_final/entries/views.py | {
"start": 817,
"end": 1094
} | class ____(LockedView, SuccessMessageMixin, UpdateView):
model = Entry
fields = ["title", "content"]
success_message = "Your entry was updated!"
def get_success_url(self):
return reverse_lazy("entry-detail", kwargs={"pk": self.object.pk})
| EntryUpdateView |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/react/types.py | {
"start": 1254,
"end": 1786
} | class ____(BaseReasoningStep):
"""Response reasoning step."""
thought: str
response: str
is_streaming: bool = False
def get_content(self) -> str:
"""Get content."""
if self.is_streaming:
return f"Thought: {self.thought}\nAnswer (Starts With): {self.response} ..."
else:
return f"Thought: {self.thought}\nAnswer: {self.response}"
@property
def is_done(self) -> bool:
"""Is the reasoning step the last one."""
return True
| ResponseReasoningStep |
python | viewflow__viewflow | viewflow/jsonstore.py | {
"start": 5484,
"end": 5671
} | class ____(JSONFieldMixin, fields.BooleanField):
def __init__(self, *args, **kwargs):
super(BooleanField, self).__init__(*args, **kwargs)
self.blank = False
| BooleanField |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 84767,
"end": 84982
} | class ____(BlockwiseHead):
def _task(self, name: Key, index: int) -> Task:
return Task(
name, operator.getitem, TaskRef((self.frame._name, index)), slice(0, self.n)
)
| BlockwiseHeadIndex |
python | huggingface__transformers | src/transformers/loss/loss_for_object_detection.py | {
"start": 19518,
"end": 24347
} | class ____:
def __init__(self, tensors, mask: Tensor | None):
self.tensors = tensors
self.mask = mask
def to(self, device):
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: list[Tensor]):
if tensor_list[0].ndim == 3:
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
batch_shape = [len(tensor_list)] + max_size
batch_size, num_channels, height, width = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("Only 3-dimensional tensors are supported")
return NestedTensor(tensor, mask)
# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
def _set_aux_loss(outputs_class, outputs_coord):
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
def ForSegmentationLoss(
logits, labels, device, pred_boxes, pred_masks, config, outputs_class=None, outputs_coord=None, **kwargs
):
# First: create the matcher
matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality", "masks"]
criterion = ImageLoss(
matcher=matcher,
num_classes=config.num_labels,
eos_coef=config.eos_coefficient,
losses=losses,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
outputs_loss["pred_masks"] = pred_masks
auxiliary_outputs = None
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
weight_dict["loss_mask"] = config.mask_loss_coefficient
weight_dict["loss_dice"] = config.dice_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
def ForObjectDetectionLoss(
logits, labels, device, pred_boxes, config, outputs_class=None, outputs_coord=None, **kwargs
):
# First: create the matcher
matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality"]
criterion = ImageLoss(
matcher=matcher,
num_classes=config.num_labels,
eos_coef=config.eos_coefficient,
losses=losses,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
auxiliary_outputs = None
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
| NestedTensor |
python | huggingface__transformers | tests/pipelines/test_pipelines_translation.py | {
"start": 3791,
"end": 6408
} | class ____(unittest.TestCase):
@require_torch
@slow
def test_default_translations(self):
# We don't provide a default for this pair
with self.assertRaises(ValueError):
pipeline(task="translation_cn_to_ar")
# but we do for this one
translator = pipeline(task="translation_en_to_de")
self.assertEqual(translator._preprocess_params["src_lang"], "en")
self.assertEqual(translator._preprocess_params["tgt_lang"], "de")
@require_torch
@slow
def test_multilingual_translation(self):
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50Tokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
translator = pipeline(task="translation", model=model, tokenizer=tokenizer)
# Missing src_lang, tgt_lang
with self.assertRaises(ValueError):
translator("This is a test")
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR")
self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}])
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN")
self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}])
# src_lang, tgt_lang can be defined at pipeline call time
translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR")
outputs = translator("This is a test")
self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}])
@require_torch
def test_translation_on_odd_language(self):
model = "patrickvonplaten/t5-tiny-random"
translator = pipeline(task="translation_cn_to_ar", model=model)
self.assertEqual(translator._preprocess_params["src_lang"], "cn")
self.assertEqual(translator._preprocess_params["tgt_lang"], "ar")
@require_torch
def test_translation_default_language_selection(self):
model = "patrickvonplaten/t5-tiny-random"
with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"):
translator = pipeline(task="translation", model=model)
self.assertEqual(translator.task, "translation_en_to_de")
self.assertEqual(translator._preprocess_params["src_lang"], "en")
self.assertEqual(translator._preprocess_params["tgt_lang"], "de")
@require_torch
def test_translation_with_no_language_no_model_fails(self):
with self.assertRaises(ValueError):
pipeline(task="translation")
| TranslationNewFormatPipelineTests |
python | ray-project__ray | python/ray/serve/batching.py | {
"start": 1081,
"end": 1242
} | class ____:
self_arg: Any
flattened_args: List[Any]
future: asyncio.Future
request_context: serve.context._RequestContext
@dataclass
| _SingleRequest |
python | huggingface__transformers | src/transformers/models/wav2vec2/modeling_wav2vec2.py | {
"start": 57846,
"end": 67718
} | class ____(Wav2Vec2PreTrainedModel):
def __init__(self, config: Wav2Vec2Config):
super().__init__(config)
self.wav2vec2 = Wav2Vec2Model(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = Wav2Vec2GumbelVectorQuantizer(config)
self.project_hid = nn.Linear(config.hidden_size, config.proj_codevector_dim)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.wav2vec2.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: float = 0.1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1).type_as(
target_features
)
# apply temperature
logits = logits / temperature
return logits
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.BoolTensor] = None,
sampled_negative_indices: Optional[torch.BoolTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Wav2Vec2ForPreTrainingOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
Required input for pre-training.
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, Wav2Vec2ForPreTraining
>>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices
>>> from datasets import load_dataset
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
>>> model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values # Batch size 1
>>> # compute masked indices
>>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item()
>>> mask_time_indices = _compute_mask_indices(
... shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2
... )
>>> sampled_negative_indices = _sample_negative_indices(
... features_shape=(batch_size, sequence_length),
... num_negatives=model.config.num_negatives,
... mask_time_indices=mask_time_indices,
... )
>>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long)
>>> sampled_negative_indices = torch.tensor(
... data=sampled_negative_indices, device=input_values.device, dtype=torch.long
... )
>>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices)
>>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states)
>>> cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)
>>> # show that cosine similarity is much higher than random
>>> cosine_sim[mask_time_indices.to(torch.bool)].mean() > 0.5
tensor(True)
>>> # for contrastive loss training model should be put into train mode
>>> model = model.train()
>>> loss = model(
... input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices
... ).loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if mask_time_indices is not None:
mask_time_indices = mask_time_indices.to(torch.bool)
outputs = self.wav2vec2(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
mask_time_indices=mask_time_indices,
return_dict=return_dict,
)
# 1. project all transformed features (including masked) to final vq dim
transformer_features = self.project_hid(outputs[0])
# 2. quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1], attention_mask, add_adapter=False
)
quantized_features, codevector_perplexity = self.quantizer(
extract_features, mask_time_indices=mask_time_indices
)
quantized_features = quantized_features.to(self.project_q.weight.dtype)
quantized_features = self.project_q(quantized_features)
loss = contrastive_loss = diversity_loss = None
if sampled_negative_indices is not None:
batch_size, sequence_length, hidden_size = quantized_features.shape
# for training, we sample negatives
# 3. sample K negatives (distractors) quantized states for contrastive loss
# if attention_mask is passed, make sure that padded feature vectors cannot be sampled
# sample negative quantized vectors BTC => (BxT)C
negative_quantized_features = quantized_features.view(-1, hidden_size)[
sampled_negative_indices.long().view(-1)
]
negative_quantized_features = negative_quantized_features.view(
batch_size, sequence_length, -1, hidden_size
).permute(2, 0, 1, 3)
# 4. compute logits, corresponding to `logs = sim(c_t, [q_t, \sim{q}_t]) / \kappa`
# of equation (3) in https://huggingface.co/papers/2006.11477
logits = self.compute_contrastive_logits(
quantized_features[None, :],
negative_quantized_features,
transformer_features,
self.config.contrastive_logits_temperature,
)
# 5. if a negative vector is identical to the positive (i.e. when codebook utilization is low),
# its cosine similarity will be masked
neg_is_pos = (quantized_features == negative_quantized_features).all(-1)
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
# 6. compute contrastive loss \mathbf{L}_m = cross_entropy(logs) =
# -log(exp(sim(c_t, q_t)/\kappa) / \sum_{\sim{q}} exp(sim(c_t, \sim{q})/\kappa))
logits = logits.transpose(0, 2).reshape(-1, logits.size(0))
target = ((1 - mask_time_indices.long()) * -100).transpose(0, 1).flatten()
contrastive_loss = nn.functional.cross_entropy(logits.float(), target, reduction="sum")
# 7. compute diversity loss: \mathbf{L}_d
num_codevectors = self.config.num_codevectors_per_group * self.config.num_codevector_groups
diversity_loss = ((num_codevectors - codevector_perplexity) / num_codevectors) * mask_time_indices.sum()
# 8. \mathbf{L} = \mathbf{L}_m + \alpha * \mathbf{L}_d
loss = contrastive_loss + self.config.diversity_loss_weight * diversity_loss
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return Wav2Vec2ForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
contrastive_loss=contrastive_loss,
diversity_loss=diversity_loss,
)
@auto_docstring
| Wav2Vec2ForPreTraining |
python | great-expectations__great_expectations | great_expectations/data_context/data_context/ephemeral_data_context.py | {
"start": 909,
"end": 3567
} | class ____(AbstractDataContext):
"""Subclass of AbstractDataContext that uses runtime values to generate a temporary or in-memory DataContext.""" # noqa: E501 # FIXME CoP
def __init__(
self,
project_config: Union[DataContextConfig, Mapping],
runtime_environment: Optional[dict] = None,
user_agent_str: str | None = None,
) -> None:
"""EphemeralDataContext constructor
project_config: config for in-memory EphemeralDataContext
runtime_environment: a dictionary of config variables tha
override both those set in config_variables.yml and the environment
"""
self._project_config = self._init_project_config(project_config)
super().__init__(runtime_environment=runtime_environment, user_agent_str=user_agent_str)
@property
@override
def mode(self) -> Literal["ephemeral"]:
return "ephemeral"
@override
def _init_project_config(
self, project_config: Union[DataContextConfig, Mapping]
) -> DataContextConfig:
return EphemeralDataContext.get_or_create_data_context_config(project_config)
@override
def _init_variables(self) -> EphemeralDataContextVariables:
variables = EphemeralDataContextVariables(
config=self._project_config,
config_provider=self.config_provider,
)
return variables
@override
def _init_datasource_store(self) -> DatasourceStore:
from great_expectations.data_context.store.datasource_store import (
DatasourceStore,
)
store_name: str = "datasource_store" # Never explicitly referenced but adheres
# to the convention set by other internal Stores
store_backend: dict = {"class_name": "InMemoryStoreBackend"}
datasource_store = DatasourceStore(
store_name=store_name,
store_backend=store_backend,
)
return datasource_store
@public_api
def convert_to_file_context(self) -> FileDataContext:
"""Convert existing EphemeralDataContext into a FileDataContext.
Scaffolds a file-backed project structure in the current working directory.
Returns:
A FileDataContext with an updated config to reflect the state of the
current context.
"""
self._synchronize_fluent_datasources()
migrator = FileMigrator(
primary_stores=self.stores,
datasource_store=self._datasource_store,
variables=self.variables,
fluent_config=self.fluent_config,
)
return migrator.migrate()
| EphemeralDataContext |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 5269,
"end": 6495
} | class ____(RecognitionException):
def __init__(self, *args):
self.foundChar = None
if len(args) == 2:
self.foundChar = args[0]
scanner = args[1]
RecognitionException.__init__(self, "NoViableAlt",
scanner.getFilename(),
scanner.getLine(),
scanner.getColumn())
elif len(args) == 4:
self.foundChar = args[0]
fileName = args[1]
line = args[2]
column = args[3]
RecognitionException.__init__(self, "NoViableAlt",
fileName, line, column)
else:
RecognitionException.__init__(self, "NoViableAlt",
'', -1, -1)
def __str__(self):
mesg = "unexpected char: "
if self.foundChar >= ' ' and self.foundChar <= '~':
mesg += "'" + self.foundChar + "'"
elif self.foundChar:
mesg += "0x" + hex(ord(self.foundChar)).upper()[2:]
else:
mesg += "<None>"
return mesg
__repr__ = __str__
| NoViableAltForCharException |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 48883,
"end": 49414
} | class ____(TestCase):
def test_unique_choice_field(self):
class TestUniqueChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = UniqueChoiceModel
fields = '__all__'
UniqueChoiceModel.objects.create(name='choice1')
serializer = TestUniqueChoiceSerializer(data={'name': 'choice1'})
assert not serializer.is_valid()
assert serializer.errors == {'name': ['unique choice model with this name already exists.']}
| Test5004UniqueChoiceField |
python | scrapy__scrapy | tests/test_command_parse.py | {
"start": 943,
"end": 1257
} | class ____(BaseSpider):
name = "asyncdef_asyncio_return"
async def parse(self, response):
await asyncio.sleep(0.2)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {{status}}")
return [{{'id': 1}}, {{'id': 2}}]
| AsyncDefAsyncioReturnSpider |
python | cython__cython | Cython/Debugger/Tests/test_libcython_in_gdb.py | {
"start": 10826,
"end": 11364
} | class ____(DebugTestCase):
def test_functions(self):
self.break_and_run('c = 2')
result = gdb.execute('print $cy_cname("b")', to_string=True)
assert re.search('__pyx_.*b', result), result
result = gdb.execute('print $cy_lineno()', to_string=True)
supposed_lineno = test_libcython.source_to_lineno['c = 2']
assert str(supposed_lineno) in result, (supposed_lineno, result)
result = gdb.execute('print $cy_cvalue("b")', to_string=True)
assert '= 1' in result
| TestFunctions |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/workspace/load_target.py | {
"start": 1073,
"end": 1271
} | class ____(WorkspaceLoadTarget):
paths: Sequence[str]
def create_origins(self) -> Sequence[CodeLocationOrigin]:
return location_origins_from_yaml_paths(self.paths)
| WorkspaceFileTarget |
python | PrefectHQ__prefect | tests/runner/test_runner.py | {
"start": 15850,
"end": 76602
} | class ____:
async def test_add_flows_to_runner(self, prefect_client: PrefectClient):
"""Runner.add should create a deployment for the flow passed to it"""
runner = Runner()
deployment_id_1 = await runner.add_flow(dummy_flow_1, __file__, interval=3600)
deployment_id_2 = await runner.add_flow(
dummy_flow_2, __file__, cron="* * * * *"
)
deployment_1 = await prefect_client.read_deployment(deployment_id_1)
deployment_2 = await prefect_client.read_deployment(deployment_id_2)
assert deployment_1 is not None
assert deployment_1.name == "test_runner"
assert deployment_1.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
assert deployment_2 is not None
assert deployment_2.name == "test_runner"
assert deployment_2.schedules[0].schedule.cron == "* * * * *"
async def test_add_flow_to_runner_always_updates_openapi_schema(
self, prefect_client: PrefectClient
):
"""Runner.add should create a deployment for the flow passed to it"""
runner = Runner()
@flow
def one(num: int):
pass
deployment_id = await runner.add_flow(one, name="test-openapi")
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.name == "test-openapi"
assert deployment.description == "None"
assert set(deployment.parameter_openapi_schema["properties"].keys()) == {"num"}
@flow(name="one")
def two(num: int):
"description now"
pass
deployment_id = await runner.add_flow(two, name="test-openapi")
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.name == "test-openapi"
assert deployment.description == "description now"
assert set(deployment.parameter_openapi_schema["properties"].keys()) == {"num"}
@flow(name="one")
def three(name: str):
pass
deployment_id = await runner.add_flow(three, name="test-openapi")
deployment = await prefect_client.read_deployment(deployment_id)
assert deployment.name == "test-openapi"
assert deployment.description is None
assert set(deployment.parameter_openapi_schema["properties"].keys()) == {"name"}
async def test_runner_deployment_updates_pull_steps(
self, prefect_client: PrefectClient, work_pool
):
@flow
def one(num: int):
pass
deployment = RunnerDeployment(
name="test-pullsteps",
flow_name="one",
work_pool_name=work_pool.name,
storage=_PullStepStorage(
pull_steps=[dict(name="step-one"), dict(name="step-two")]
),
)
deployment_id = await deployment.apply()
api_deployment = await prefect_client.read_deployment(deployment_id)
assert api_deployment.name == "test-pullsteps"
assert api_deployment.pull_steps == [
dict(name="step-one"),
dict(name="step-two"),
]
deployment = RunnerDeployment(
name="test-pullsteps",
flow_name="one",
work_pool_name=work_pool.name,
storage=_PullStepStorage(
pull_steps=[dict(name="step-one"), dict(name="step-two-b")]
),
)
deployment_id = await deployment.apply()
api_deployment = await prefect_client.read_deployment(deployment_id)
assert api_deployment.name == "test-pullsteps"
assert api_deployment.pull_steps == [
dict(name="step-one"),
dict(name="step-two-b"),
]
async def test_runner_deployment_clears_pull_steps_when_storage_removed(
self, prefect_client: PrefectClient, work_pool
):
"""Test that pull steps are cleared when storage is removed from a deployment.
This addresses issue #18335 where pull steps would persist after removing
storage, causing flow runs to fail.
"""
@flow
def test_flow():
pass
# Create deployment with storage
deployment_with_storage = RunnerDeployment(
name="test-clear-pullsteps",
flow_name="test_flow",
work_pool_name=work_pool.name,
storage=_PullStepStorage(
pull_steps=[dict(name="step-one"), dict(name="step-two")]
),
)
deployment_id = await deployment_with_storage.apply()
api_deployment = await prefect_client.read_deployment(deployment_id)
# Verify pull steps exist
assert api_deployment.pull_steps == [
dict(name="step-one"),
dict(name="step-two"),
]
# Update deployment without storage (simulating switch to Docker)
deployment_no_storage = RunnerDeployment(
name="test-clear-pullsteps",
flow_name="test_flow",
work_pool_name=work_pool.name,
# No storage - pull steps should be cleared
)
await deployment_no_storage.apply()
api_deployment = await prefect_client.read_deployment(deployment_id)
# Verify pull steps were cleared
assert api_deployment.pull_steps is None
@pytest.mark.parametrize(
"kwargs",
[
{**d1, **d2}
for d1, d2 in combinations(
[
{"interval": 3600},
{"cron": "* * * * *"},
{"rrule": "FREQ=MINUTELY"},
{
"schedules": [
DeploymentScheduleCreate(
schedule=CronSchedule(cron="* * * * *"), active=True
)
]
},
{"schedule": Cron("* * * * *")},
],
2,
)
],
)
async def test_add_flow_raises_on_multiple_schedule_parameters(self, kwargs):
with warnings.catch_warnings():
# `schedule` parameter is deprecated and will raise a warning
warnings.filterwarnings("ignore", category=DeprecationWarning)
expected_message = "Only one of interval, cron, rrule, schedule, or schedules can be provided."
runner = Runner()
with pytest.raises(ValueError, match=expected_message):
await runner.add_flow(dummy_flow_1, __file__, **kwargs)
async def test_add_deployments_to_runner(self, prefect_client: PrefectClient):
"""Runner.add_deployment should apply the deployment passed to it"""
runner = Runner()
deployment_1 = await dummy_flow_1.to_deployment(__file__, interval=3600)
deployment_2 = await dummy_flow_2.to_deployment(__file__, cron="* * * * *")
deployment_id_1 = await runner.add_deployment(deployment_1)
deployment_id_2 = await runner.add_deployment(deployment_2)
deployment_1 = await prefect_client.read_deployment(deployment_id_1)
deployment_2 = await prefect_client.read_deployment(deployment_id_2)
assert deployment_1 is not None
assert deployment_1.name == "test_runner"
assert deployment_1.schedules[0].schedule.interval == datetime.timedelta(
seconds=3600
)
assert deployment_2 is not None
assert deployment_2.name == "test_runner"
assert deployment_2.schedules[0].schedule.cron == "* * * * *"
async def test_runner_can_pause_schedules_on_stop(
self, prefect_client: PrefectClient, caplog
):
runner = Runner()
deployment_1 = await dummy_flow_1.to_deployment(__file__, interval=3600)
deployment_2 = await dummy_flow_2.to_deployment(__file__, cron="* * * * *")
await runner.add_deployment(deployment_1)
await runner.add_deployment(deployment_2)
deployment_1 = await prefect_client.read_deployment_by_name(
name="dummy-flow-1/test_runner"
)
deployment_2 = await prefect_client.read_deployment_by_name(
name="dummy-flow-2/test_runner"
)
assert not deployment_1.paused
assert not deployment_2.paused
await runner.start(run_once=True)
deployment_1 = await prefect_client.read_deployment_by_name(
name="dummy-flow-1/test_runner"
)
deployment_2 = await prefect_client.read_deployment_by_name(
name="dummy-flow-2/test_runner"
)
assert deployment_1.paused
assert deployment_2.paused
assert "Pausing all deployments" in caplog.text
assert "All deployments have been paused" in caplog.text
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_does_not_emit_heartbeats_if_not_set(
self,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
runner = Runner()
deployment = await dummy_flow_1.to_deployment(__file__)
await runner.add_deployment(deployment)
await runner.start(run_once=True)
deployment = await prefect_client.read_deployment_by_name(
name="dummy-flow-1/test_runner"
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment.id
)
await runner.start(run_once=True)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 0
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_executes_flow_runs(
self,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
runner = Runner(heartbeat_seconds=30)
deployment = await dummy_flow_1.to_deployment(__file__)
await runner.add_deployment(deployment)
await runner.start(run_once=True)
deployment = await prefect_client.read_deployment_by_name(
name="dummy-flow-1/test_runner"
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment.id
)
await runner.start(run_once=True)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 1
assert heartbeat_events[0].resource.id == f"prefect.flow-run.{flow_run.id}"
related = [dict(r.items()) for r in heartbeat_events[0].related]
assert related == [
{
"prefect.resource.id": f"prefect.deployment.{deployment.id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": "test_runner",
},
{
"prefect.resource.id": f"prefect.flow.{flow_run.flow_id}",
"prefect.resource.role": "flow",
"prefect.resource.name": dummy_flow_1.name,
},
]
async def test_runner_does_not_duplicate_heartbeats(
self,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
"""
Regression test for issue where multiple invocations of `execute_flow_run`
would result in multiple heartbeats being emitted for each flow run.
"""
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run_1 = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
flow_run_2 = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
async with Runner(heartbeat_seconds=30, limit=None) as runner:
first_task = asyncio.create_task(runner.execute_flow_run(flow_run_1.id))
second_task = asyncio.create_task(runner.execute_flow_run(flow_run_2.id))
await asyncio.gather(first_task, second_task)
flow_run_1 = await prefect_client.read_flow_run(flow_run_id=flow_run_1.id)
assert flow_run_1.state
assert flow_run_1.state.is_completed()
flow_run_2 = await prefect_client.read_flow_run(flow_run_id=flow_run_2.id)
assert flow_run_2.state
assert flow_run_2.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 2
assert {e.resource.id for e in heartbeat_events} == {
f"prefect.flow-run.{flow_run_1.id}",
f"prefect.flow-run.{flow_run_2.id}",
}
async def test_runner_sends_heartbeats_on_a_cadence(
self,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
runner = Runner()
# Ain't I a stinker?
runner.heartbeat_seconds = 1
deployment_id = await (
await short_but_not_too_short.to_deployment(__file__)
).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.execute_flow_run(flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
# We should get at least 5 heartbeats since the flow should take about 5 seconds to run
assert len(heartbeat_events) > 5
async def test_runner_heartbeats_include_deployment_version(
self,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
runner = Runner(heartbeat_seconds=30)
await runner.add_deployment(await dummy_flow_1.to_deployment(__file__))
# mock the client to return a DeploymentResponse with a version_id and
# version_info, which would be the case if the deployment was created Prefect
# Cloud experimental deployment versioning support.
deployment = await prefect_client.read_deployment_by_name(
name="dummy-flow-1/test_runner"
)
deployment.version_id = uuid.UUID("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
deployment.version_info = VersionInfo(
type="githubulous",
version="1.2.3.4.5.6",
)
with mock.patch(
"prefect.client.orchestration.PrefectClient.read_deployment"
) as mock_read_deployment:
mock_read_deployment.return_value = deployment
await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment.id
)
await runner.start(run_once=True)
heartbeat_events: list[Event] = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 1
heartbeat = heartbeat_events[0]
resource = heartbeat.resource_in_role["deployment"]
assert resource["prefect.resource.id"] == f"prefect.deployment.{deployment.id}"
assert (
resource["prefect.deployment.version-id"]
== "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
)
assert resource["prefect.deployment.version-type"] == "githubulous"
assert resource["prefect.deployment.version"] == "1.2.3.4.5.6"
async def test_runner_does_not_try_to_cancel_flow_run_if_no_process_id_is_found(
self, prefect_client: PrefectClient
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/18106
"""
runner_1 = Runner()
runner_2 = Runner()
runner_2._mark_flow_run_as_cancelled = AsyncMock()
runner_2._kill_process = AsyncMock()
deployment_id = await runner_1.add_deployment(
await tired_flow.to_deployment(__file__)
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
async with runner_1, runner_2:
execute_task = asyncio.create_task(runner_1.execute_flow_run(flow_run.id))
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
await prefect_client.set_flow_run_state(
flow_run_id=flow_run.id,
state=flow_run.state.model_copy(
update={"name": "Cancelling", "type": StateType.CANCELLING}
),
)
await execute_task
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state.is_cancelled()
runner_2._mark_flow_run_as_cancelled.assert_not_called()
runner_2._kill_process.assert_not_called()
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_runs_on_cancellation_hooks_for_remotely_stored_flows(
self,
prefect_client: PrefectClient,
caplog: pytest.LogCaptureFixture,
in_temporary_runner_directory: None,
temp_storage: MockStorage,
):
runner = Runner(query_seconds=1)
temp_storage.code = dedent(
"""\
from time import sleep
from prefect import flow
from prefect.logging.loggers import flow_run_logger
def on_cancellation(flow, flow_run, state):
logger = flow_run_logger(flow_run, flow)
logger.info("This flow was cancelled!")
@flow(on_cancellation=[on_cancellation], log_prints=True)
def cancel_flow(sleep_time: int = 100):
sleep(sleep_time)
"""
)
deployment_id = await runner.add_flow(
await flow.from_source(
source=temp_storage, entrypoint="flows.py:cancel_flow"
),
name=__file__,
)
async with runner:
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
execute_task = asyncio.create_task(runner.execute_flow_run(flow_run.id))
# Need to wait for polling loop to pick up flow run and
# start execution
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
await prefect_client.set_flow_run_state(
flow_run_id=flow_run.id,
state=flow_run.state.model_copy(
update={"name": "Cancelling", "type": StateType.CANCELLING}
),
)
await execute_task
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state.is_cancelled()
# check to make sure on_cancellation hook was called
assert "This flow was cancelled!" in caplog.text
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_warns_if_unable_to_load_cancellation_hooks(
self,
prefect_client: PrefectClient,
caplog: pytest.LogCaptureFixture,
in_temporary_runner_directory: None,
temp_storage: MockStorage,
):
runner = Runner(query_seconds=2)
temp_storage.code = dedent(
"""\
from time import sleep
from prefect import flow
from prefect.logging.loggers import flow_run_logger
def on_cancellation(flow, flow_run, state):
logger = flow_run_logger(flow_run, flow)
logger.info("This flow was cancelled!")
@flow(on_cancellation=[on_cancellation], log_prints=True)
def cancel_flow(sleep_time: int = 100):
sleep(sleep_time)
"""
)
deployment_id = await runner.add_flow(
await flow.from_source(
source=temp_storage, entrypoint="flows.py:cancel_flow"
),
name=__file__,
)
async with runner:
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
execute_task = asyncio.create_task(runner.execute_flow_run(flow_run.id))
# Need to wait for polling loop to pick up flow run and
# start execution
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
await prefect_client.delete_deployment(deployment_id=deployment_id)
await prefect_client.set_flow_run_state(
flow_run_id=flow_run.id,
state=flow_run.state.model_copy(
update={"name": "Cancelling", "type": StateType.CANCELLING}
),
)
await execute_task
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
# Cancellation hook should not have been called successfully
# but the flow run should still be cancelled correctly
assert flow_run.state.is_cancelled()
assert "This flow was cancelled!" not in caplog.text
assert (
"Runner failed to retrieve flow to execute on_cancellation hooks for flow run"
in caplog.text
)
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_runs_on_crashed_hooks_for_remotely_stored_flows(
self,
prefect_client: PrefectClient,
caplog: pytest.LogCaptureFixture,
in_temporary_runner_directory: None,
temp_storage: MockStorage,
):
runner = Runner()
temp_storage.code = dedent(
"""\
import os
import signal
from prefect import flow
from prefect.logging.loggers import flow_run_logger
def on_crashed(flow, flow_run, state):
logger = flow_run_logger(flow_run, flow)
logger.info("This flow crashed!")
@flow(on_crashed=[on_crashed], log_prints=True)
def crashing_flow():
print("Oh boy, here I go crashing again...")
os.kill(os.getpid(), signal.SIGTERM)
"""
)
deployment_id = await runner.add_flow(
await flow.from_source(
source=temp_storage, entrypoint="flows.py:crashing_flow"
),
name=__file__,
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.execute_flow_run(flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_crashed()
# check to make sure on_cancellation hook was called
assert "This flow crashed!" in caplog.text
@pytest.mark.parametrize(
"exception,hook_type",
[
# Test various exceptions that can occur when loading flows
(
ScriptError(
user_exc=FileNotFoundError("File not found"), path="/missing.py"
),
"crashed",
),
(ValueError("Flow run does not have an associated deployment"), "crashed"),
(RuntimeError("Unexpected error!"), "crashed"),
# Also test cancellation hooks
(
ScriptError(
user_exc=FileNotFoundError("File not found"), path="/missing.py"
),
"cancellation",
),
],
)
async def test_runner_handles_exceptions_in_hooks(
self,
exception,
hook_type,
caplog: pytest.LogCaptureFixture,
):
"""Test that exceptions during flow loading don't crash the runner"""
runner = Runner()
# Create a mock flow run
mock_flow_run = MagicMock()
mock_flow_run.id = "test-flow-run-id"
mock_flow_run.deployment_id = "test-deployment-id"
mock_flow_run.name = "test-flow-run"
# Mock load_flow_from_flow_run to raise the exception
with patch(
"prefect.runner.runner.load_flow_from_flow_run", side_effect=exception
):
# Run the appropriate hook method
if hook_type == "crashed":
state = Crashed(message="Test crash")
await runner._run_on_crashed_hooks(mock_flow_run, state)
expected_msg = (
"Runner failed to retrieve flow to execute on_crashed hooks"
)
else:
state = Cancelling(message="Test cancellation")
await runner._run_on_cancellation_hooks(mock_flow_run, state)
expected_msg = (
"Runner failed to retrieve flow to execute on_cancellation hooks"
)
# Verify warning was logged with exception details
assert expected_msg in caplog.text
assert type(exception).__name__ in caplog.text
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_does_not_emit_heartbeats_for_single_flow_run_if_not_set(
self, prefect_client: PrefectClient, mock_events_client: AssertingEventsClient
):
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.execute_flow_run(flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 0
@pytest.mark.usefixtures("use_hosted_api_server")
@pytest.mark.parametrize(
"dummy_flow",
[
dummy_flow_1,
ClassNameClassmethod.dummy_flow_classmethod,
ClassNameStaticmethod.dummy_flow_staticmethod,
],
)
async def test_runner_can_execute_a_single_flow_run(
self,
dummy_flow: Flow,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
runner = Runner(heartbeat_seconds=30, limit=None)
deployment_id = await (await dummy_flow.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.execute_flow_run(flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 1
assert heartbeat_events[0].resource.id == f"prefect.flow-run.{flow_run.id}"
related = [dict(r.items()) for r in heartbeat_events[0].related]
assert related == [
{
"prefect.resource.id": f"prefect.deployment.{deployment_id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": "test_runner",
},
{
"prefect.resource.id": f"prefect.flow.{flow_run.flow_id}",
"prefect.resource.role": "flow",
"prefect.resource.name": dummy_flow.name,
},
]
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_respects_set_limit(
self, prefect_client: PrefectClient, caplog
):
async with Runner(limit=1) as runner:
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
good_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
bad_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
runner._acquire_limit_slot(good_run.id)
await runner.execute_flow_run(bad_run.id)
assert "run limit reached" in caplog.text
flow_run = await prefect_client.read_flow_run(flow_run_id=bad_run.id)
assert flow_run.state.is_scheduled()
runner._release_limit_slot(good_run.id)
await runner.execute_flow_run(bad_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=bad_run.id)
assert flow_run.state.is_completed()
async def test_handles_spaces_in_sys_executable(self, monkeypatch, prefect_client):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/10820
"""
import sys
mock_process = AsyncMock()
mock_process.returncode = 0
mock_process.pid = 4242
mock_run_process_call = AsyncMock(
return_value=mock_process,
)
monkeypatch.setattr(prefect.runner.runner, "run_process", mock_run_process_call)
monkeypatch.setattr(sys, "executable", "C:/Program Files/Python38/python.exe")
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner._run_process(flow_run)
# Previously the command would have been
# ["C:/Program", "Files/Python38/python.exe", "-m", "prefect.engine"]
assert mock_run_process_call.call_args[1]["command"] == [
"C:/Program Files/Python38/python.exe",
"-m",
"prefect.engine",
]
async def test_runner_sets_flow_run_env_var_with_dashes(
self, monkeypatch, prefect_client
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/10851
"""
env_var_value = None
mock_process = AsyncMock()
mock_process.returncode = 0
mock_process.pid = 4242
def capture_env_var(*args, **kwargs):
nonlocal env_var_value
nonlocal mock_process
env_var_value = kwargs["env"].get("PREFECT__FLOW_RUN_ID")
return mock_process
mock_run_process_call = AsyncMock(side_effect=capture_env_var)
monkeypatch.setattr(prefect.runner.runner, "run_process", mock_run_process_call)
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner._run_process(flow_run)
assert env_var_value == str(flow_run.id)
assert env_var_value != flow_run.id.hex
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_runs_a_remotely_stored_flow(
self,
prefect_client: PrefectClient,
temp_storage: MockStorage,
):
runner = Runner()
deployment = await (
await flow.from_source(source=temp_storage, entrypoint="flows.py:test_flow")
).to_deployment(__file__)
deployment_id = await runner.add_deployment(deployment)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.start(run_once=True)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_caches_adhoc_pulls(self, prefect_client):
runner = Runner()
pull_code_spy = MagicMock()
with tempfile.TemporaryDirectory() as temp_dir:
storage = MockStorage(base_path=Path(temp_dir), pull_code_spy=pull_code_spy)
deployment = await RunnerDeployment.afrom_storage(
storage=storage,
entrypoint="flows.py:test_flow",
name=__file__,
)
deployment_id = await runner.add_deployment(deployment)
await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.start(run_once=True)
# 1 for deployment creation, 1 for runner start up, 1 for ad hoc pull
assert isinstance(runner._storage_objs[0], MockStorage)
assert runner._storage_objs[0]._pull_code_spy is not None
assert runner._storage_objs[0]._pull_code_spy.call_count == 3
await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
# Should be 3 because the ad hoc pull should have been cached
assert runner._storage_objs[0]._pull_code_spy.call_count == 3
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_does_not_raise_on_duplicate_submission(self, prefect_client):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/11093
The runner has a race condition where it can try to borrow a limit slot
that it already has. This test ensures that the runner does not raise
an exception in this case.
"""
async with Runner(pause_on_shutdown=False) as runner:
deployment = RunnerDeployment.from_flow(
flow=tired_flow,
name=__file__,
)
deployment_id = await runner.add_deployment(deployment)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
# acquire the limit slot and then try to borrow it again
# during submission to simulate race condition
runner._acquire_limit_slot(flow_run.id)
await runner._get_and_submit_flow_runs()
# shut down cleanly
runner.started = False
runner.stopping = True
runner._cancelling_flow_run_ids.add(flow_run.id)
await runner._cancel_run(flow_run)
@pytest.mark.parametrize(
"exit_code,help_message",
[
(-9, "This indicates that the process exited due to a SIGKILL signal"),
(
247,
"This indicates that the process was terminated due to high memory usage.",
),
],
)
async def test_runner_logs_exit_code_help_message(
self,
exit_code: int,
help_message: str,
caplog: pytest.LogCaptureFixture,
patch_run_process: MagicMock,
prefect_client: PrefectClient,
monkeypatch: pytest.MonkeyPatch,
tmp_path: Path,
):
# Change directory to avoid polluting the working directory
monkeypatch.chdir(str(tmp_path))
flow_id = await prefect_client.create_flow(
flow=dummy_flow_1,
)
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name=f"test-runner-deployment-{uuid.uuid4()}",
path=str(
prefect.__development_base_path__
/ "tests"
/ "test-projects"
/ "import-project"
),
entrypoint="my_module/flow.py:test_flow",
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id,
)
patch_run_process(returncode=exit_code)
async with Runner() as runner:
result = await runner.execute_flow_run(
flow_run_id=flow_run.id,
)
assert result
assert result.returncode == exit_code
record = next(r for r in caplog.records if help_message in r.message)
if exit_code == -9:
assert record.levelname == "INFO"
else:
assert record.levelname == "ERROR"
@pytest.mark.skipif(
sys.platform != "win32",
reason="subprocess.CREATE_NEW_PROCESS_GROUP is only defined in Windows",
)
async def test_windows_process_worker_run_sets_process_group_creation_flag(
self,
patch_run_process: MagicMock,
prefect_client: PrefectClient,
):
mock = patch_run_process()
deployment = await dummy_flow_1.ato_deployment(__file__)
deployment_id_coro = deployment.apply()
if TYPE_CHECKING:
assert isinstance(deployment_id_coro, Coroutine)
deployment_id = await deployment_id_coro
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
async with Runner() as runner:
await runner.execute_flow_run(
flow_run_id=flow_run.id,
)
mock.assert_awaited_once()
(_, kwargs) = mock.call_args
assert kwargs.get("creationflags") == mock.CREATE_NEW_PROCESS_GROUP
@pytest.mark.skipif(
sys.platform == "win32",
reason=(
"The asyncio.open_process_*.creationflags argument is only supported on Windows"
),
)
async def test_unix_process_worker_run_does_not_set_creation_flag(
self, patch_run_process: MagicMock, prefect_client: PrefectClient
):
mock = patch_run_process()
deployment = await dummy_flow_1.ato_deployment(__file__)
deployment_id_coro = deployment.apply()
if TYPE_CHECKING:
assert isinstance(deployment_id_coro, Coroutine)
deployment_id = await deployment_id_coro
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
async with Runner() as runner:
await runner.execute_flow_run(
flow_run_id=flow_run.id,
)
mock.assert_awaited_once()
(_, kwargs) = mock.call_args
assert kwargs.get("creationflags") is None
async def test_reschedule_flow_runs(
self,
monkeypatch: pytest.MonkeyPatch,
prefect_client: PrefectClient,
):
# Create a flow run that will take a while to run
deployment_id = await (await tired_flow.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
runner = Runner()
# Run the flow run in a new process with a Runner
execute_flow_run_task = asyncio.create_task(
runner.execute_flow_run(flow_run_id=flow_run.id)
)
# Wait for the flow run to start
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
runner.reschedule_current_flow_runs()
await execute_flow_run_task
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_scheduled()
async def test_runner_marks_flow_run_as_crashed_when_unabled_to_start_process(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
mock = AsyncMock(side_effect=Exception("Test error"))
monkeypatch.setattr(prefect.runner.runner, "run_process", mock)
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.execute_flow_run(flow_run_id=flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_crashed()
async def test_runner_handles_output_stream_errors(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/17316
"""
# Simulate stream output error
mock = AsyncMock(side_effect=Exception("Test error"))
monkeypatch.setattr(processutils, "consume_process_output", mock)
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
# Runner shouldn't crash
await runner.execute_flow_run(flow_run_id=flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
async def test_runner_temp_dir_creation_is_idempotent(self):
"""
Test that Runner temp directory creation is idempotent
and handles the case where the directory already exists.
This tests the fix for the race condition where multiple flow runs
could try to create the same temp directory when the runner is
entered as a context manager.
"""
runner = Runner()
# Manually create the temp directory to simulate race condition
runner._tmp_dir.mkdir(parents=True, exist_ok=False)
# Now entering the runner context should not fail even though
# the directory already exists (with exist_ok=True fix)
async with runner:
assert runner.started
assert runner._tmp_dir.exists()
# Directory should be cleaned up after exiting context
assert not runner._tmp_dir.exists()
async def test_runner_handles_deleted_flow_run_in_propose_crashed_state(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/19141
Ensures the runner doesn't crash when trying to propose a crashed state
for a flow run that has been deleted.
"""
from prefect.exceptions import ObjectNotFound
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
# Mock propose_state to raise ObjectNotFound (simulating a deleted flow run)
original_propose_state = prefect.runner.runner.propose_state
async def mock_propose_state(*args, **kwargs):
if kwargs.get("flow_run_id") == flow_run.id:
raise ObjectNotFound(
Exception("Flow run not found"), help_message="Flow run was deleted"
)
return await original_propose_state(*args, **kwargs)
monkeypatch.setattr(prefect.runner.runner, "propose_state", mock_propose_state)
async with runner:
# This should not crash the runner
await runner._propose_crashed_state(flow_run, "Test crash message")
# Runner should continue running without crashing
async def test_runner_handles_deleted_flow_run_in_mark_as_cancelled(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/19141
Ensures the runner doesn't crash when trying to mark a deleted flow run
as cancelled.
"""
from prefect.exceptions import ObjectNotFound
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
async with runner:
# Mock set_flow_run_state to raise ObjectNotFound
async def mock_set_state(*args, **kwargs):
raise ObjectNotFound(
Exception("Flow run not found"), help_message="Flow run was deleted"
)
monkeypatch.setattr(runner._client, "set_flow_run_state", mock_set_state)
# This should not crash the runner
await runner._mark_flow_run_as_cancelled(flow_run)
# Runner should continue running without crashing
async def test_runner_handles_deleted_flow_run_after_completion(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/19141
Ensures the runner doesn't crash when trying to read a flow run after
completion if the flow run has been deleted.
"""
from prefect.exceptions import ObjectNotFound
runner = Runner()
deployment_id = await (await dummy_flow_1.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
# Mock read_flow_run to raise ObjectNotFound after the process completes
original_read = prefect_client.read_flow_run
call_count = {"count": 0}
async def mock_read_flow_run(*args, **kwargs):
call_count["count"] += 1
# First call succeeds (for initial read), second call fails (after completion)
if call_count["count"] > 1:
raise ObjectNotFound(
Exception("Flow run not found"), help_message="Flow run was deleted"
)
return await original_read(*args, **kwargs)
monkeypatch.setattr(prefect_client, "read_flow_run", mock_read_flow_run)
# This should not crash the runner - it should complete successfully
await runner.execute_flow_run(flow_run_id=flow_run.id)
# Runner should have handled the ObjectNotFound gracefully
class TestRunnerBundleExecution:
@pytest.fixture(autouse=True)
def mock_subprocess_check_call(self, monkeypatch: pytest.MonkeyPatch):
mock_subprocess_check_call = AsyncMock()
monkeypatch.setattr(subprocess, "check_call", mock_subprocess_check_call)
return mock_subprocess_check_call
async def test_basic(
self, prefect_client: PrefectClient, mock_subprocess_check_call: AsyncMock
):
runner = Runner()
@flow(persist_result=True)
def simple_flow():
return "Be a simple kind of flow"
flow_run = await prefect_client.create_flow_run(simple_flow)
bundle = create_bundle_for_flow_run(simple_flow, flow_run)
await runner.execute_bundle(bundle)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
assert await flow_run.state.result() == "Be a simple kind of flow"
# Ensure that the dependencies are installed
assert mock_subprocess_check_call.call_count == 1
assert mock_subprocess_check_call.call_args[0][0][:3] == [
uv.find_uv_bin(),
"pip",
"install",
]
async def test_with_parameters(self, prefect_client: PrefectClient):
runner = Runner()
@flow(persist_result=True)
def flow_with_parameters(x: int, y: str):
return f"Be a simple kind of flow with {x} and {y}"
flow_run = await prefect_client.create_flow_run(
flow_with_parameters,
parameters={"x": 42, "y": "hello"},
)
bundle = create_bundle_for_flow_run(flow_with_parameters, flow_run)
await runner.execute_bundle(bundle)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
assert (
await flow_run.state.result()
== "Be a simple kind of flow with 42 and hello"
)
async def test_failed_flow(self, prefect_client: PrefectClient):
runner = Runner()
@flow
def total_and_utter_failure():
raise ValueError("This flow failed!")
flow_run = await prefect_client.create_flow_run(total_and_utter_failure)
bundle = create_bundle_for_flow_run(total_and_utter_failure, flow_run)
await runner.execute_bundle(bundle)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_failed()
async def test_cancel_bundle_execution(
self, prefect_client: PrefectClient, caplog: pytest.LogCaptureFixture
):
runner = Runner(query_seconds=1)
@flow
def flow_to_cancel():
sleep(100)
@flow_to_cancel.on_cancellation
def da_hook(
flow: "Flow[Any, Any]", flow_run: "FlowRun", state: "State[Any]"
):
flow_run_logger(flow_run, flow).info("This flow was cancelled!")
flow_run = await prefect_client.create_flow_run(flow_to_cancel)
bundle = create_bundle_for_flow_run(flow_to_cancel, flow_run)
execution_task = asyncio.create_task(runner.execute_bundle(bundle))
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state is not None
while not flow_run.state.is_running():
assert not execution_task.done(), (
"Execution ended earlier than expected"
)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state is not None
await prefect_client.set_flow_run_state(
flow_run_id=flow_run.id,
state=Cancelling(),
)
await execution_task
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_cancelled()
assert "This flow was cancelled!" in caplog.text
async def test_crashed_bundle_execution(
self, prefect_client: PrefectClient, caplog: pytest.LogCaptureFixture
):
runner = Runner()
@flow
def crashed_flow():
os.kill(os.getpid(), signal.SIGTERM)
@crashed_flow.on_crashed
def da_hook(
flow: "Flow[Any, Any]", flow_run: "FlowRun", state: "State[Any]"
):
flow_run_logger(flow_run, flow).info("This flow crashed!")
flow_run = await prefect_client.create_flow_run(crashed_flow)
bundle = create_bundle_for_flow_run(crashed_flow, flow_run)
await runner.execute_bundle(bundle)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_crashed()
assert "This flow crashed!" in caplog.text
async def test_heartbeats_for_bundle_execution(
self,
prefect_client: PrefectClient,
mock_events_client: AssertingEventsClient,
):
runner = Runner(heartbeat_seconds=30)
@flow
def heartbeat_flow():
return "a low, dull, quick sound — much such a sound as a watch makes when enveloped in cotton"
flow_run = await prefect_client.create_flow_run(heartbeat_flow)
bundle = create_bundle_for_flow_run(heartbeat_flow, flow_run)
await runner.execute_bundle(bundle)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
heartbeat_events = list(
filter(
lambda e: e.event == "prefect.flow-run.heartbeat",
mock_events_client.events,
)
)
assert len(heartbeat_events) == 1
assert heartbeat_events[0].resource.id == f"prefect.flow-run.{flow_run.id}"
related = [dict(r.items()) for r in heartbeat_events[0].related]
assert related == [
{
"prefect.resource.id": f"prefect.flow.{flow_run.flow_id}",
"prefect.resource.role": "flow",
"prefect.resource.name": heartbeat_flow.name,
},
]
@pytest.mark.usefixtures("use_hosted_api_server")
async def test_runner_emits_cancelled_event(
mock_events_client: AssertingEventsClient,
reset_worker_events,
prefect_client: PrefectClient,
temp_storage: MockStorage,
in_temporary_runner_directory: None,
):
runner = Runner(query_seconds=1)
temp_storage.code = dedent(
"""\
from time import sleep
from prefect import flow
from prefect.logging.loggers import flow_run_logger
def on_cancellation(flow, flow_run, state):
logger = flow_run_logger(flow_run, flow)
logger.info("This flow was cancelled!")
@flow(on_cancellation=[on_cancellation], log_prints=True)
def cancel_flow(sleep_time: int = 100):
sleep(sleep_time)
"""
)
deployment_id = await runner.add_flow(
await flow.from_source(source=temp_storage, entrypoint="flows.py:cancel_flow"),
name=__file__,
tags=["test"],
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id,
tags=["flow-run-one"],
)
api_flow = await prefect_client.read_flow(flow_run.flow_id)
async with runner:
execute_task = asyncio.create_task(
runner.execute_flow_run(flow_run_id=flow_run.id)
)
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
await prefect_client.set_flow_run_state(
flow_run_id=flow_run.id,
state=flow_run.state.model_copy(
update={"name": "Cancelling", "type": StateType.CANCELLING}
),
)
await execute_task
cancelled_events = list(
filter(
lambda e: e.event == "prefect.runner.cancelled-flow-run",
mock_events_client.events,
)
)
assert len(cancelled_events) == 1
assert dict(cancelled_events[0].resource.items()) == {
"prefect.resource.id": f"prefect.runner.{slugify(runner.name)}",
"prefect.resource.name": runner.name,
"prefect.version": str(__version__),
}
related = [dict(r.items()) for r in cancelled_events[0].related]
assert related == [
{
"prefect.resource.id": f"prefect.deployment.{deployment_id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": "test_runner",
},
{
"prefect.resource.id": f"prefect.flow.{api_flow.id}",
"prefect.resource.role": "flow",
"prefect.resource.name": api_flow.name,
},
{
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": flow_run.name,
},
{
"prefect.resource.id": "prefect.tag.flow-run-one",
"prefect.resource.role": "tag",
},
{
"prefect.resource.id": "prefect.tag.test",
"prefect.resource.role": "tag",
},
]
async def test_runner_can_execute_instance_method_flow(
prefect_client: PrefectClient,
):
"""Test that instance method flows can be executed via multiprocessing."""
runner = Runner(query_seconds=1)
# Create an instance and add its flow method
flow_instance = ClassWithInstanceMethod(10)
deployment_id = await runner.add_flow(
flow_instance.instance_method_flow,
name="instance-method-test",
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id,
parameters={"x": 5},
)
async with runner:
await runner.execute_flow_run(flow_run_id=flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_completed()
async def test_runner_runs_on_cancellation_hooks_for_instance_method_flows(
prefect_client: PrefectClient,
caplog: pytest.LogCaptureFixture,
):
"""Test that cancellation hooks work correctly for instance method flows."""
runner = Runner(query_seconds=1)
# Create an instance and add its flow method with cancellation hook
flow_instance = ClassWithCancellableFlow()
deployment_id = await runner.add_flow(
flow_instance.cancellable_flow,
name="cancellable-instance-method",
)
async with runner:
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
execute_task = asyncio.create_task(runner.execute_flow_run(flow_run.id))
# Wait for flow to start running
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
# Cancel the flow run
await prefect_client.set_flow_run_state(
flow_run_id=flow_run.id,
state=flow_run.state.model_copy(
update={"name": "Cancelling", "type": StateType.CANCELLING}
),
)
await execute_task
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state.is_cancelled()
assert "Instance method flow was cancelled!" in caplog.text
async def test_runner_runs_on_crashed_hooks_for_instance_method_flows(
prefect_client: PrefectClient,
caplog: pytest.LogCaptureFixture,
):
"""Test that crashed hooks work correctly for instance method flows."""
runner = Runner()
# Create an instance and add its flow method with crashed hook
flow_instance = ClassWithCrashingFlow()
deployment_id = await runner.add_flow(
flow_instance.crashing_flow,
name="crashing-instance-method",
)
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
await runner.execute_flow_run(flow_run.id)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
assert flow_run.state.is_crashed()
assert "Instance method flow crashed!" in caplog.text
| TestRunner |
python | TheAlgorithms__Python | graphs/graph_list.py | {
"start": 230,
"end": 6550
} | class ____[T]:
"""
Adjacency List type Graph Data Structure that accounts for directed and undirected
Graphs. Initialize graph object indicating whether it's directed or undirected.
Directed graph example:
>>> d_graph = GraphAdjacencyList()
>>> print(d_graph)
{}
>>> d_graph.add_edge(0, 1)
{0: [1], 1: []}
>>> d_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5)
{0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []}
>>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7)
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
>>> d_graph
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
>>> print(repr(d_graph))
{0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
Undirected graph example:
>>> u_graph = GraphAdjacencyList(directed=False)
>>> u_graph.add_edge(0, 1)
{0: [1], 1: [0]}
>>> u_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5)
{0: [1], 1: [0, 2, 4, 5], 2: [1], 4: [1], 5: [1]}
>>> u_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7)
{0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1], 5: [1], 6: [2], 7: [2]}
>>> u_graph.add_edge(4, 5)
{0: [1, 2],
1: [0, 2, 4, 5],
2: [1, 0, 6, 7],
4: [1, 5],
5: [1, 4],
6: [2],
7: [2]}
>>> print(u_graph)
{0: [1, 2],
1: [0, 2, 4, 5],
2: [1, 0, 6, 7],
4: [1, 5],
5: [1, 4],
6: [2],
7: [2]}
>>> print(repr(u_graph))
{0: [1, 2],
1: [0, 2, 4, 5],
2: [1, 0, 6, 7],
4: [1, 5],
5: [1, 4],
6: [2],
7: [2]}
>>> char_graph = GraphAdjacencyList(directed=False)
>>> char_graph.add_edge('a', 'b')
{'a': ['b'], 'b': ['a']}
>>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f')
{'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
>>> char_graph
{'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
"""
def __init__(self, directed: bool = True) -> None:
"""
Parameters:
directed: (bool) Indicates if graph is directed or undirected. Default is True.
"""
self.adj_list: dict[T, list[T]] = {} # dictionary of lists
self.directed = directed
def add_edge(
self, source_vertex: T, destination_vertex: T
) -> GraphAdjacencyList[T]:
"""
Connects vertices together. Creates and Edge from source vertex to destination
vertex.
Vertices will be created if not found in graph
"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
self.adj_list[destination_vertex].append(source_vertex)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
self.adj_list[destination_vertex] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(source_vertex)
self.adj_list[source_vertex] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
self.adj_list[source_vertex] = [destination_vertex]
self.adj_list[destination_vertex] = [source_vertex]
# For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
elif source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
self.adj_list[destination_vertex] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
self.adj_list[source_vertex] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
self.adj_list[source_vertex] = [destination_vertex]
self.adj_list[destination_vertex] = []
return self
def __repr__(self) -> str:
return pformat(self.adj_list)
| GraphAdjacencyList |
python | huggingface__transformers | src/transformers/models/xcodec/modeling_xcodec.py | {
"start": 1960,
"end": 2262
} | class ____(ModelOutput):
"""
Args:
audio_values (`torch.FloatTensor` of shape `(batch_size, channels, num_samples)`, *optional*):
Decoded audio values obtained using the decoder part of Xcodec.
"""
audio_values: Optional[torch.FloatTensor] = None
| XcodecDecoderOutput |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/operators/test_hive.py | {
"start": 2198,
"end": 3891
} | class ____(TestHiveEnvironment):
def test_hiveconf_jinja_translate(self):
hql = "SELECT ${num_col} FROM ${hiveconf:table};"
op = HiveOperator(hiveconf_jinja_translate=True, task_id="dry_run_basic_hql", hql=hql, dag=self.dag)
op.prepare_template()
assert op.hql == "SELECT {{ num_col }} FROM {{ table }};"
def test_hiveconf(self):
hql = "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});"
op = HiveOperator(
hiveconfs={"table": "static_babynames", "day": "{{ ds }}"},
task_id="dry_run_basic_hql",
hql=hql,
dag=self.dag,
)
op.prepare_template()
assert op.hql == "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});"
@mock.patch("airflow.providers.apache.hive.operators.hive.HiveOperator.hook", mock.MagicMock())
def test_mapred_job_name(self, mock_hook):
op = HiveOperator(task_id="test_mapred_job_name", hql=self.hql, dag=self.dag)
fake_run_id = "test_mapred_job_name"
fake_logical_date = timezone.datetime(2018, 6, 19)
fake_ti = TaskInstance(task=op)
fake_ti.dag_run = DagRun(run_id=fake_run_id, logical_date=fake_logical_date)
fake_ti.hostname = "fake_hostname"
fake_context = {"ti": fake_ti}
op.execute(fake_context)
assert (
"Airflow HiveOperator task for "
f"{fake_ti.hostname}.{self.dag.dag_id}.{op.task_id}.{fake_logical_date.isoformat()}"
== mock_hook.mapred_job_name
)
@pytest.mark.skipif(
"AIRFLOW_RUNALL_TESTS" not in os.environ, reason="Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
| HiveOperatorTest |
python | explosion__spaCy | spacy/lang/sl/__init__.py | {
"start": 255,
"end": 505
} | class ____(BaseDefaults):
stop_words = STOP_WORDS
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
prefixes = TOKENIZER_PREFIXES
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
| SlovenianDefaults |
python | ZoranPandovski__al-go-rithms | machine_learning/cluster_analysis/dbscan/python/DBScan.py | {
"start": 63,
"end": 2855
} | class ____:
def __init__(self, points, eps, min_points):
"""
DBScan is initialized by passing a list of points, the eps, and
a minimum number of points per cluster.
Args:
param1: The dataset of points to be analyzed
param2: The eps neighborhood of a point
param3: The minimum number of elements per cluster.
"""
self._points = points
self._clusters = 0 # initial clusters
for point in self._points:
if point.is_visited():
continue
point.visit()
neighbors_point = self.find_neighbours(point, eps)
if len(neighbors_point) < min_points:
point.set_cluster(-1) # a noise point
else:
self._clusters = self._clusters + 1
self.expand_cluster(point, neighbors_point,
self._clusters, eps, min_points)
def expand_cluster(self, point, neighbors_points,
cluster, eps, min_points):
"""
This method is resposible for expanding the neighbors of a point.
Args:
param1: Point itself
param2: The actual cluster being of that points
param3: The eps neighborhood of a point
param4: The minimum number of elements per cluster
"""
point.set_cluster(cluster)
for p in neighbors_points:
if not p.is_visited():
p.visit()
neighbors_p = self.find_neighbours(p, eps)
if len(neighbors_p) >= min_points:
neighbors_points.extend(neighbors_p)
if p.get_cluster() == 0:
p.set_cluster(cluster)
def find_neighbours(self, point, eps):
"""
This method is resposible for finding the neighbors of a points.
The metric being used is the Euclidean distance.
Args:
param1: Point itself
param2: The eps neighborhood of a point
"""
x, y = point.get_values()
neighbors = []
for p in self._points:
m, n = p.get_values()
distance = ((m - x) * (m - x) + (n - y) * (n - y))
distance = sqrt(distance)
if distance <= eps:
neighbors.append(p)
return neighbors
def get_clusters(self):
"""
This method is resposible for returning the clusters found, and their
respective elements.
"""
clusters = [[] for i in range(0, self._clusters)]
for point in self._points:
if (point.get_cluster() != -1):
clusters[point.get_cluster() - 1].append(point)
return clusters
# DBScan.py
| DBScan |
python | astropy__astropy | astropy/table/table.py | {
"start": 12973,
"end": 13979
} | class ____(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
{'__attributes__': {'identifier': 10}}
"""
| TableAttribute |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_context.py | {
"start": 3871,
"end": 6608
} | class ____:
def test_context_to_airflow_vars_empty_context(self):
assert context_to_airflow_vars({}) == {}
def test_context_to_airflow_vars_all_context(self, create_runtime_ti):
task = BaseOperator(
task_id="test_context_vars",
owner=["owner1", "owner2"],
email="email1@test.com",
)
rti = create_runtime_ti(
task=task,
dag_id="dag_id",
run_id="dag_run_id",
logical_date="2017-05-21T00:00:00Z",
try_number=1,
)
context = rti.get_template_context()
assert context_to_airflow_vars(context) == {
"airflow.ctx.dag_id": "dag_id",
"airflow.ctx.logical_date": "2017-05-21T00:00:00+00:00",
"airflow.ctx.task_id": "test_context_vars",
"airflow.ctx.dag_run_id": "dag_run_id",
"airflow.ctx.try_number": "1",
"airflow.ctx.dag_owner": "owner1,owner2",
"airflow.ctx.dag_email": "email1@test.com",
}
assert context_to_airflow_vars(context, in_env_var_format=True) == {
"AIRFLOW_CTX_DAG_ID": "dag_id",
"AIRFLOW_CTX_LOGICAL_DATE": "2017-05-21T00:00:00+00:00",
"AIRFLOW_CTX_TASK_ID": "test_context_vars",
"AIRFLOW_CTX_TRY_NUMBER": "1",
"AIRFLOW_CTX_DAG_RUN_ID": "dag_run_id",
"AIRFLOW_CTX_DAG_OWNER": "owner1,owner2",
"AIRFLOW_CTX_DAG_EMAIL": "email1@test.com",
}
def test_context_to_airflow_vars_from_policy(self):
with mock.patch("airflow.settings.get_airflow_context_vars") as mock_method:
airflow_cluster = "cluster-a"
mock_method.return_value = {"airflow_cluster": airflow_cluster}
context_vars = context_to_airflow_vars({})
assert context_vars["airflow.ctx.airflow_cluster"] == airflow_cluster
context_vars = context_to_airflow_vars({}, in_env_var_format=True)
assert context_vars["AIRFLOW_CTX_AIRFLOW_CLUSTER"] == airflow_cluster
with mock.patch("airflow.settings.get_airflow_context_vars") as mock_method:
mock_method.return_value = {"airflow_cluster": [1, 2]}
with pytest.raises(TypeError) as error:
context_to_airflow_vars({})
assert str(error.value) == "value of key <airflow_cluster> must be string, not <class 'list'>"
with mock.patch("airflow.settings.get_airflow_context_vars") as mock_method:
mock_method.return_value = {1: "value"}
with pytest.raises(TypeError) as error:
context_to_airflow_vars({})
assert str(error.value) == "key <1> must be string"
| TestAirflowContextHelpers |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_integration.py | {
"start": 2082,
"end": 5954
} | class ____:
"""Test against real Dagster symbols to ensure they work in practice."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
@pytest.mark.parametrize(
"symbol",
[
"dagster.asset",
"dagster.op",
"dagster.job",
"dagster.resource",
"dagster.Config",
"dagster.DagsterInstance",
],
)
def test_check_docstrings_real_dagster_symbols(self, symbol):
"""Test checking docstrings for real Dagster symbols."""
result = self.runner.invoke(main, ["check", "docstrings", "--symbol", symbol])
# Should complete successfully (exit code 0 or 1 for validation errors)
assert result.exit_code in [0, 1]
# Should either validate the symbol or show it's excluded
assert (
f"Validating docstring for: {symbol}" in result.output
or f"Symbol '{symbol}' is in the exclude list" in result.output
)
# Should show some result (valid or invalid or excluded)
assert (
"✓" in result.output
or "✗" in result.output
or "ERROR" in result.output
or "WARNING" in result.output
or "excluded" in result.output
)
def test_check_docstrings_dagster_package(self):
"""Test checking docstrings for the main dagster package."""
result = self.runner.invoke(main, ["check", "docstrings", "--package", "dagster"])
# Should complete (may have validation errors but should not crash)
assert result.exit_code in [0, 1]
assert "Validating" in result.output
assert "public symbols in dagster" in result.output
assert "Summary:" in result.output
@pytest.mark.parametrize(
"package",
[
"dagster._core.definitions",
"dagster._core.types",
],
)
def test_check_docstrings_smaller_packages(self, package):
"""Test checking docstrings for smaller packages."""
result = self.runner.invoke(main, ["check", "docstrings", "--package", package])
# Should complete (may have validation errors but should not crash)
assert result.exit_code in [0, 1]
assert "Validating" in result.output
assert f"public symbols in {package}" in result.output
assert "Summary:" in result.output
def test_ls_symbols_dagster_package(self):
"""Test listing symbols for the main dagster package."""
result = self.runner.invoke(main, ["ls", "symbols", "--package", "dagster"])
# Should complete successfully
assert result.exit_code == 0
# Should have some output (at least one symbol)
lines = result.output.strip().split("\n")
assert len(lines) > 0
assert all(line.startswith("dagster") for line in lines if line.strip())
# Should contain some @public-decorated dagster symbols
output = result.output
assert "dagster.Component" in output
assert "dagster.ComponentLoadContext" in output
assert "dagster.definitions" in output
@pytest.mark.parametrize(
"package",
[
"dagster._core.definitions",
"dagster._core.types",
],
)
def test_ls_symbols_other_packages(self, package):
"""Test listing symbols for other packages."""
result = self.runner.invoke(main, ["ls", "symbols", "--package", package])
# Should complete successfully
assert result.exit_code == 0
# Output may be empty for packages with no @public symbols, which is valid
lines = [line for line in result.output.strip().split("\n") if line.strip()]
if len(lines) > 0:
assert all(line.startswith(package) for line in lines)
| TestRealDagsterSymbols |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/designerExample.py | {
"start": 604,
"end": 1122
} | class ____(TemplateBaseClass):
def __init__(self):
TemplateBaseClass.__init__(self)
self.setWindowTitle('pyqtgraph example: Qt Designer')
# Create the main window
self.ui = WindowTemplate()
self.ui.setupUi(self)
self.ui.plotBtn.clicked.connect(self.plot)
self.show()
def plot(self):
self.ui.plot.plot(np.random.normal(size=100), clear=True)
win = MainWindow()
if __name__ == '__main__':
pg.exec()
| MainWindow |
python | networkx__networkx | networkx/algorithms/components/tests/test_semiconnected.py | {
"start": 68,
"end": 1792
} | class ____:
def test_undirected(self):
pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.Graph())
pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.MultiGraph())
def test_empty(self):
pytest.raises(nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.DiGraph())
pytest.raises(
nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.MultiDiGraph()
)
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
assert nx.is_semiconnected(G)
def test_path(self):
G = nx.path_graph(100, create_using=nx.DiGraph())
assert nx.is_semiconnected(G)
G.add_edge(100, 99)
assert not nx.is_semiconnected(G)
def test_cycle(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
assert nx.is_semiconnected(G)
G = nx.path_graph(100, create_using=nx.DiGraph())
G.add_edge(0, 99)
assert nx.is_semiconnected(G)
def test_tree(self):
G = nx.DiGraph()
G.add_edges_from(
chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)] for i in range(100))
)
assert not nx.is_semiconnected(G)
def test_dumbbell(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100))
assert not nx.is_semiconnected(G) # G is disconnected.
G.add_edge(100, 99)
assert nx.is_semiconnected(G)
def test_alternating_path(self):
G = nx.DiGraph(
chain.from_iterable([(i, i - 1), (i, i + 1)] for i in range(0, 100, 2))
)
assert not nx.is_semiconnected(G)
| TestIsSemiconnected |
python | bokeh__bokeh | src/bokeh/models/widgets/buttons.py | {
"start": 2866,
"end": 3749
} | class ____(Widget, ButtonLike):
''' A base class that defines common properties for all button types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
label = Either(Instance(DOMNode), String, default="Button", help="""
Either HTML or plain text label for the button to display.
""")
icon = Nullable(Instance(Icon), help="""
An optional image appearing to the left of button's text. An instance of
:class:`~bokeh.models.Icon` (such as :class:`~bokeh.models.BuiltinIcon`,
:class:`~bokeh.models.SVGIcon`, or :class:`~bokeh.models.TablerIcon`).`
""")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
| AbstractButton |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 10872,
"end": 11027
} | class ____:
def m0(self):
return A24.tito(self.m1())
def m1(self):
return 0
@staticmethod
def tito(a):
return a
| A24 |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/random.py | {
"start": 984,
"end": 3889
} | class ____(Random, abc.ABC):
"""A subclass of Random designed to expose the seed it was initially
provided with."""
def __init__(self, *, note_method_calls: bool) -> None:
self._note_method_calls = note_method_calls
def __deepcopy__(self, table):
return self.__copy__()
@abc.abstractmethod
def seed(self, seed):
raise NotImplementedError
@abc.abstractmethod
def getstate(self):
raise NotImplementedError
@abc.abstractmethod
def setstate(self, state):
raise NotImplementedError
@abc.abstractmethod
def _hypothesis_do_random(self, method, kwargs):
raise NotImplementedError
def _hypothesis_log_random(self, method, kwargs, result):
if not (self._note_method_calls and should_note()):
return
args, kwargs = convert_kwargs(method, kwargs)
argstr = ", ".join(
list(map(repr, args)) + [f"{k}={v!r}" for k, v in kwargs.items()]
)
report(f"{self!r}.{method}({argstr}) -> {result!r}")
RANDOM_METHODS = [
name
for name in [
"_randbelow",
"betavariate",
"binomialvariate",
"choice",
"choices",
"expovariate",
"gammavariate",
"gauss",
"getrandbits",
"lognormvariate",
"normalvariate",
"paretovariate",
"randint",
"random",
"randrange",
"sample",
"shuffle",
"triangular",
"uniform",
"vonmisesvariate",
"weibullvariate",
"randbytes",
]
if hasattr(Random, name)
]
# Fake shims to get a good signature
def getrandbits(self, n: int) -> int: # type: ignore
raise NotImplementedError
def random(self) -> float: # type: ignore
raise NotImplementedError
def _randbelow(self, n: int) -> int: # type: ignore
raise NotImplementedError
STUBS = {f.__name__: f for f in [getrandbits, random, _randbelow]}
SIGNATURES: dict[str, inspect.Signature] = {}
def sig_of(name):
try:
return SIGNATURES[name]
except KeyError:
pass
target = getattr(Random, name)
result = inspect.signature(STUBS.get(name, target))
SIGNATURES[name] = result
return result
def define_copy_method(name):
target = getattr(Random, name)
def implementation(self, **kwargs):
result = self._hypothesis_do_random(name, kwargs)
self._hypothesis_log_random(name, kwargs, result)
return result
sig = inspect.signature(STUBS.get(name, target))
result = define_function_signature(target.__name__, target.__doc__, sig)(
implementation
)
result.__module__ = __name__
result.__qualname__ = "HypothesisRandom." + result.__name__
setattr(HypothesisRandom, name, result)
for r in RANDOM_METHODS:
define_copy_method(r)
@dataclass(slots=True, frozen=False)
| HypothesisRandom |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/common/test_dagbag.py | {
"start": 1084,
"end": 3065
} | class ____:
"""Tests to ensure that DagBag is instantiated only once per app lifecycle."""
dagbag_call_counter = {"count": 0}
def setup(self):
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
def teardown(self):
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@pytest.fixture(autouse=True)
def patch_dagbag_once_before_app(self):
"""Patch DagBag once before app is created, and reset counter."""
self.dagbag_call_counter["count"] = 0
from airflow.models.dagbag import DBDagBag as RealDagBag
def factory(*args, **kwargs):
self.dagbag_call_counter["count"] += 1
return RealDagBag(*args, **kwargs)
with mock.patch("airflow.api_fastapi.common.dagbag.DBDagBag", side_effect=factory):
purge_cached_app()
yield
def test_dagbag_used_as_singleton_in_dependency(self, session, dag_maker, test_client):
"""
Ensure DagBag is created only once and reused across multiple API requests.
This test sets up a single DAG, patches the DagBag constructor to track instantiation count,
and verifies that two calls to the `/api/v2/dags/{dag_id}` endpoint both return 200 OK,
while only one DagBag instance is created.
This validates that the FastAPI DagBag dependency correctly resolves to app.state.dag_bag,
maintaining singleton behavior instead of creating a new DagBag per request.
"""
dag_id = "dagbag_singleton_test"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
BaseOperator(task_id="test_task")
session.commit()
resp1 = test_client.get(f"/api/v2/dags/{dag_id}")
assert resp1.status_code == 200
resp2 = test_client.get(f"/api/v2/dags/{dag_id}")
assert resp2.status_code == 200
assert self.dagbag_call_counter["count"] == 1
| TestDagBagSingleton |
python | getsentry__sentry | src/sentry/snuba/metrics/query_builder.py | {
"start": 17923,
"end": 22692
} | class ____:
"""
Class meant to serve as a thin layer that converts API request params to the fields necessary to
instantiate an instance of `MetricsQuery`
Adapted from [`sentry.snuba.sessions_v2`] and meant to keep consistency in naming between
sessions v2 and metrics APIs.
"""
def __init__(
self,
projects,
query_params,
allow_mri: bool = False,
paginator_kwargs: dict | None = None,
):
self._projects = projects
paginator_kwargs = paginator_kwargs or {}
self.query = query_params.get("query", "")
self.groupby = [
MetricGroupByField(groupby_col) for groupby_col in query_params.getlist("groupBy", [])
]
self.fields = [
parse_field(
key,
allow_mri=allow_mri,
)
for key in query_params.getlist("field", [])
]
self.orderby = self._parse_orderby(query_params, allow_mri)
self.limit: Limit | None = self._parse_limit(paginator_kwargs)
self.offset: Offset | None = self._parse_offset(paginator_kwargs)
self.having: ConditionGroup | None = query_params.getlist("having")
self.where = parse_conditions(
self.query, projects, environments=query_params.getlist("environment")
)
start, end, rollup = get_date_range(query_params)
self.rollup = rollup
self.start = start
self.end = end
self.include_series = query_params.get("includeSeries", "1") == "1"
self.include_totals = query_params.get("includeTotals", "1") == "1"
def to_metrics_query(self) -> DeprecatingMetricsQuery:
return DeprecatingMetricsQuery(
org_id=org_id_from_projects(self._projects),
project_ids=[project.id for project in self._projects],
include_totals=self.include_totals,
include_series=self.include_series,
select=self.fields,
start=self.start,
end=self.end,
where=self.where,
having=self.having,
groupby=self.groupby,
orderby=self.orderby,
limit=self.limit,
offset=self.offset,
granularity=Granularity(self.rollup),
)
@staticmethod
def _parse_orderby(query_params, allow_mri: bool = False):
orderbys = query_params.getlist("orderBy", [])
if not orderbys:
return None
orderby_list = []
for orderby in orderbys:
direction = Direction.ASC
if orderby[0] == "-":
orderby = orderby[1:]
direction = Direction.DESC
field = parse_field(orderby, allow_mri=allow_mri)
orderby_list.append(MetricsOrderBy(field=field, direction=direction))
return orderby_list
@staticmethod
def _parse_limit(paginator_kwargs) -> Limit | None:
if "limit" not in paginator_kwargs:
return None
return Limit(paginator_kwargs["limit"])
@staticmethod
def _parse_offset(paginator_kwargs) -> Offset | None:
if "offset" not in paginator_kwargs:
return None
return Offset(paginator_kwargs["offset"])
def get_date_range(params: Mapping) -> tuple[datetime, datetime, int]:
"""Get start, end, rollup for the given parameters.
Apply a similar logic as `sessions_v2.get_constrained_date_range`,
but with fewer constraints. More constraints may be added in the future.
Note that this function returns a right-exclusive date range [start, end),
contrary to the one used in sessions_v2.
"""
interval_td = parse_stats_period(params.get("interval", "1h"))
interval = int(3600 if interval_td is None else interval_td.total_seconds())
start, end = get_date_range_from_params(params, default_stats_period=timedelta(days=1))
date_range = timedelta(
seconds=int(interval * get_num_intervals(end=end, start=start, granularity=interval))
)
end = to_datetime(int(interval * get_num_intervals(start=None, end=end, granularity=interval)))
start = end - date_range
# NOTE: The sessions_v2 implementation cuts the `end` time to now + 1 minute
# if `end` is in the future. This allows for better real time results when
# caching is enabled on the snuba queries. Removed here for simplicity,
# but we might want to reconsider once caching becomes an issue for metrics.
return start, end, interval
def get_metric_object_from_metric_field(
metric_field: MetricField,
) -> MetricExpressionBase:
"""Get the metric object from a metric field"""
return metric_object_factory(op=metric_field.op, metric_mri=metric_field.metric_mri)
| QueryDefinition |
python | django__django | django/forms/widgets.py | {
"start": 19690,
"end": 21231
} | class ____(Input):
input_type = "checkbox"
template_name = "django/forms/widgets/checkbox.html"
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == "":
return
return str(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
attrs = {**(attrs or {}), "checked": True}
return super().get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {"true": True, "false": False}
if isinstance(value, str):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
| CheckboxInput |
python | pytorch__pytorch | test/test_tensorexpr_pybind.py | {
"start": 659,
"end": 14336
} | class ____(JitTestCase):
def test_simple_sum(self):
n = 32
cg = construct_adder(n)
tA = torch.randn(n)
tB = torch.randn(n)
tC = torch.empty(n)
cg.call([tA, tB, tC])
torch.testing.assert_close(tA + tB, tC)
def test_call_raw(self):
n = 16
cg = construct_adder(n, dtype=torch.float64)
tA = torch.randn(n, dtype=torch.float64)
tB = torch.randn(n, dtype=torch.float64)
tC = torch.empty(n, dtype=torch.float64)
cg.call_raw([tA.data_ptr(), tB.data_ptr(), tC.data_ptr()])
torch.testing.assert_close(tA + tB, tC)
def test_external_calls(self):
dtype = torch.float32
A = te.BufHandle("A", [1, 4], dtype)
B = te.BufHandle("B", [4, 1], dtype)
C = te.BufHandle("C", [1, 1], dtype)
s = te.ExternalCall(C, "nnc_aten_matmul", [A, B], [])
loopnest = te.LoopNest(s, [C])
loopnest.prepare_for_codegen()
codegen = te.construct_codegen("ir_eval", s, [A, B, C])
tA = torch.ones(1, 4)
tB = torch.ones(4, 1)
tC = torch.empty(1, 1)
codegen.call([tA, tB, tC])
torch.testing.assert_close(torch.matmul(tA, tB), tC)
def test_dynamic_shape(self):
dN = te.VarHandle(torch.int32)
A = te.BufHandle([dN], torch.float64)
B = te.BufHandle([dN], torch.float64)
def compute(i):
return A.load(i) - B.load(i)
C = te.Compute("C", [dN], compute)
loopnest = te.LoopNest([C])
loopnest.prepare_for_codegen()
cg = te.construct_codegen("ir_eval", loopnest.simplify(), [A, B, C, dN])
def test_with_shape(n):
tA = torch.randn(n, dtype=torch.double)
tB = torch.randn(n, dtype=torch.double)
tC = torch.empty(n, dtype=torch.double)
cg.call([tA, tB, tC, n])
torch.testing.assert_close(tA - tB, tC)
test_with_shape(8)
test_with_shape(31)
def test_dynamic_shape_2d(self):
dN = te.VarHandle(torch.int32)
dM = te.VarHandle(torch.int32)
A = te.BufHandle([dN, dM], torch.float64)
B = te.BufHandle([dN, dM], torch.float64)
def compute(i, j):
return A.load([i, j]) - B.load([i, j])
C = te.Compute("C", [dN, dM], compute)
loopnest = te.LoopNest([C])
loopnest.prepare_for_codegen()
cg = te.construct_codegen("ir_eval", loopnest.simplify(), [A, B, C, dN, dM])
def test_with_shape(n, m):
tA = torch.randn(n, m, dtype=torch.double)
tB = torch.randn(n, m, dtype=torch.double)
tC = torch.empty(n, m, dtype=torch.double)
cg.call([tA, tB, tC, n, m])
torch.testing.assert_close(tA - tB, tC)
test_with_shape(2, 4)
test_with_shape(5, 3)
def test_dtype_error(self):
te.BufHandle("a", [1], torch.float32) # ok
self.assertRaises(TypeError, lambda: te.BufHandle("a", [1], "float55"))
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_tensor_inputs(self):
def f(a, b, c):
return a + b + c
device, size = "cpu", (4, 4)
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
z = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu),
%b.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu),
%c.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%6 : int = prim::Constant[value=1]()
%7 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::add(%a.1, %b.1, %6)
%3 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::add(%7, %c.1, %6)
return (%3)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x, y, z))
res2 = kernel.fallback((x, y, z))
correct = f(x, y, z)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_scalar_inputs(self):
def f(a, b, c):
return a + b + c
x = torch.tensor(0.1, dtype=torch.float, device="cpu")
y = torch.tensor(0.6, dtype=torch.float, device="cpu")
z = torch.tensor(0.7, dtype=torch.float, device="cpu")
graph_str = """
graph(%a.1 : Float(requires_grad=0, device=cpu),
%b.1 : Float(requires_grad=0, device=cpu),
%c.1 : Float(requires_grad=0, device=cpu)):
%3 : int = prim::Constant[value=1]()
%6 : Float(requires_grad=0, device=cpu) = aten::add(%a.1, %b.1, %3)
%9 : Float(requires_grad=0, device=cpu) = aten::add(%6, %c.1, %3)
return (%9)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x, y, z))
res2 = kernel.fallback((x, y, z))
correct = f(x, y, z)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_shape_prop(self):
device, size = "cpu", (4, 4)
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
graph_str = """
graph(%a : Tensor, %b : Tensor):
%c : Tensor = aten::mul(%a, %b)
return (%c)
"""
graph = torch._C.parse_ir(graph_str)
exception_thrown = False
try:
kernel = te.TensorExprKernel(graph)
except RuntimeError:
# Graph doesn't have shape info for inputs => compilation should
# fail
exception_thrown = True
assert exception_thrown
# Inject shape info and try compiling again
example_inputs = [torch.rand(4, 4), torch.rand(4, 4)]
torch._C._te.annotate_input_shapes(graph, example_inputs)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
# Now compilation should pass
kernel = te.TensorExprKernel(graph)
res = kernel.run((x, y))
correct = torch.mul(x, y)
np.testing.assert_allclose(res.numpy(), correct.numpy(), atol=1e-5)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_shape_prop_module(self):
class TestModule(torch.nn.Module):
def forward(self, x, y):
return x * x + y
graph = torch.jit.script(TestModule()).graph
# Try compiling the graph as-is. It should fail because it doesn't have
# shape info.
exception_thrown = False
try:
kernel = te.TensorExprKernel(graph)
except RuntimeError:
exception_thrown = True
assert exception_thrown
# Try injecting shape info for graph inputs
example_inputs = [torch.rand(4, 4), torch.rand(4, 4)]
exception_thrown = False
try:
torch._C._te.annotate_input_shapes(graph, example_inputs)
except RuntimeError:
# Graph has a 'self' argument for which we can't set shapes
exception_thrown = True
assert exception_thrown
# Remove 'self' argument and try annotating shapes one more time
torch._C._te.remove_unused_self_argument(graph)
# Inject shape info and try compiling again
torch._C._te.annotate_input_shapes(graph, example_inputs)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
# Now compilation should pass
kernel = te.TensorExprKernel(graph)
device, size = "cpu", (4, 4)
x = torch.rand(size, device=device)
y = torch.rand(size, device=device)
res = kernel.run((x, y))
correct = TestModule().forward(x, y)
np.testing.assert_allclose(res.numpy(), correct.numpy(), atol=1e-5)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_t(self):
def f(a):
return a.t()
device, size = "cpu", (3, 4)
x = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%3 : Float(4, 3, strides=[4, 1], requires_grad=0, device=cpu) = aten::t(%a.1)
return (%3)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_transpose(self):
def f(a):
return a.transpose(-1, -2)
device, size = "cpu", (3, 4)
x = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
%2 : int = prim::Constant[value=-1]()
%3 : int = prim::Constant[value=-2]()
%4 : Float(4, 3, strides=[4, 1], requires_grad=0, device=cpu) = aten::transpose(%a.1, %2, %3)
return (%4)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_permute(self):
def f(a):
return a.permute([2, 1, 0])
device, size = "cpu", (3, 4, 5)
x = torch.rand(size, device=device)
graph_str = """
graph(%a.1 : Float(3, 4, 5, strides=[20, 5, 1], requires_grad=0, device=cpu)):
%1 : int = prim::Constant[value=2]()
%2 : int = prim::Constant[value=1]()
%3 : int = prim::Constant[value=0]()
%4 : int[] = prim::ListConstruct(%1, %2, %3)
%5 : Float(5, 4, 3, strides=[12, 3, 1], requires_grad=0, device=cpu) = aten::permute(%a.1, %4)
return (%5)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_custom_lowering(self):
def f(a):
return a.nan_to_num()
device = "cpu"
x = torch.ones((2, 2), device=device)
x[0, 0] = x[1, 1] = torch.nan
graph_str = """
graph(%x : Float(2, 2, strides=[2, 1], requires_grad=0, device=cpu)):
%none : NoneType = prim::Constant()
%y : Float(2, 2, strides=[2, 1], requires_grad=0, device=cpu) = aten::nan_to_num(%x, %none, %none, %none)
return (%y)
"""
graph = torch._C.parse_ir(graph_str)
def my_custom_lowering(inputs, out_shape, out_stride, out_type, device):
def compute(idxs):
load = inputs[0].as_buf().load(idxs)
return te.ifThenElse(
te.ExprHandle.isnan(load), te.ExprHandle.float(0.0), load
)
return te.Compute2("custom_nan_to_num", out_shape, compute)
kernel = te.TensorExprKernel(graph, {"aten::nan_to_num": my_custom_lowering})
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_kernel_with_expand(self):
def f(a):
return a.expand((2, 3, 4))
device = "cpu"
x = torch.rand((1, 3, 1), device=device)
graph_str = """
graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
%1 : int = prim::Constant[value=2]()
%2 : int = prim::Constant[value=3]()
%3 : int = prim::Constant[value=4]()
%4 : int[] = prim::ListConstruct(%1, %2, %3)
%5 : bool = prim::Constant[value=0]()
%6 : Float(2, 3, 4, strides=[12, 4, 0], requires_grad=0, device=cpu) = aten::expand(%a, %4, %5)
return (%6)
"""
graph = torch._C.parse_ir(graph_str)
kernel = te.TensorExprKernel(graph)
res1 = kernel.run((x,))
res2 = kernel.fallback((x,))
correct = f(x)
np.testing.assert_allclose(res1.numpy(), correct.numpy(), atol=2e-3)
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
def test_alloc_in_loop(self):
a, tmp, b = (
te.BufHandle(name, [1], torch.float32) for name in ["a", "tmp", "b"]
)
body = te.Block([tmp.store([0], a.load([0])), b.store([0], tmp.load([0]))])
for _ in range(4):
i = te.VarHandle("i", torch.int32)
body = te.For.make(i, 0, 100, body)
nest = te.LoopNest(body, [b])
nest.prepare_for_codegen()
f = te.construct_codegen("llvm", nest.simplify(), [a, b])
ta, tb = (torch.ones(1) for _ in range(2))
f.call([ta.data_ptr(), tb.data_ptr()])
| TestTensorExprPyBind |
python | ethereum__web3.py | web3/contract/base_contract.py | {
"start": 32968,
"end": 47561
} | class ____:
"""
Base class for Contract proxy classes.
First you need to create your Contract classes using
:meth:`web3.eth.Eth.contract` that takes compiled Solidity contract
ABI definitions as input. The created class object will be a subclass of
this base class.
After you have your Contract proxy class created you can interact with
smart contracts
* Create a Contract proxy object for an existing deployed smart contract by
its address using :meth:`__init__`
* Deploy a new smart contract using :py:meth:`Contract.constructor.transact()`
"""
w3: Union["Web3", "AsyncWeb3[Any]"] = None
# instance level properties
address: ChecksumAddress = None
# class properties (overridable at instance level)
abi: ABI = None
asm = None
ast = None
bytecode = None
bytecode_runtime = None
clone_bin = None
decode_tuples = None
dev_doc = None
interface = None
metadata = None
opcodes = None
src_map = None
src_map_runtime = None
user_doc = None
# Public API
#
@combomethod
def encode_abi(
cls,
abi_element_identifier: str,
args: Any | None = None,
kwargs: Any | None = None,
data: HexStr | None = None,
) -> HexStr:
"""
Encodes the arguments using the Ethereum ABI for the contract function
that matches the given name and arguments.
:param data: defaults to function selector
"""
args = args or tuple()
kwargs = kwargs or {}
element_info = get_abi_element_info(
cls.abi,
abi_element_identifier,
*args,
abi_codec=cls.w3.codec,
**kwargs,
)
if data is None:
data = element_info["selector"]
return encode_abi(cls.w3, element_info["abi"], element_info["arguments"], data)
#
# Functions API
#
@combomethod
def all_functions(
self,
) -> list["BaseContractFunction"]:
"""
Return all functions in the contract.
"""
return self.find_functions_by_identifier(
self.abi, self.w3, self.address, lambda _: True
)
@combomethod
def get_function_by_signature(self, signature: str) -> "BaseContractFunction":
"""
Return a distinct function with matching signature.
Raises a Web3ValueError if the signature is invalid or if there is no match or
more than one is found.
"""
if " " in signature:
raise Web3ValueError(
"Function signature should not contain any spaces. "
f"Found spaces in input: {signature}"
)
def callable_check(fn_abi: ABIFunction) -> bool:
return abi_to_signature(fn_abi) == signature
fns = self.find_functions_by_identifier(
self.abi, self.w3, self.address, callable_check
)
return self.get_function_by_identifier(fns, "signature")
@combomethod
def find_functions_by_name(self, fn_name: str) -> list["BaseContractFunction"]:
"""
Return all functions with matching name.
Raises a Web3ValueError if there is no match or more than one is found.
"""
def callable_check(fn_abi: ABIFunction) -> bool:
return fn_abi["name"] == fn_name
return self.find_functions_by_identifier(
self.abi, self.w3, self.address, callable_check
)
@combomethod
def get_function_by_name(self, fn_name: str) -> "BaseContractFunction":
"""
Return a distinct function with matching name.
Raises a Web3ValueError if there is no match or more than one is found.
"""
fns = self.find_functions_by_name(fn_name)
return self.get_function_by_identifier(fns, "name")
@combomethod
def get_function_by_selector(
self, selector: bytes | int | HexStr
) -> "BaseContractFunction":
"""
Return a distinct function with matching 4byte selector.
Raises a Web3ValueError if there is no match or more than one is found.
"""
def callable_check(fn_abi: ABIFunction) -> bool:
return encode_hex(function_abi_to_4byte_selector(fn_abi)) == to_4byte_hex(
selector
)
fns = self.find_functions_by_identifier(
self.abi, self.w3, self.address, callable_check
)
return self.get_function_by_identifier(fns, "selector")
@combomethod
def decode_function_input(
self, data: HexStr
) -> tuple["BaseContractFunction", dict[str, Any]]:
"""
Return a Tuple of the function selector and decoded arguments.
"""
func = self.get_function_by_selector(HexBytes(data)[:4])
arguments = decode_transaction_data(
func.abi, data, normalizers=BASE_RETURN_NORMALIZERS
)
return func, arguments
@combomethod
def find_functions_by_args(self, *args: Any) -> "BaseContractFunction":
"""
Return all functions with matching args, checking each argument can be encoded
with the type.
"""
def callable_check(fn_abi: ABIFunction) -> bool:
return check_if_arguments_can_be_encoded(
fn_abi,
*args,
abi_codec=self.w3.codec,
**{},
)
return self.find_functions_by_identifier(
self.abi, self.w3, self.address, callable_check
)
@combomethod
def get_function_by_args(self, *args: Any) -> "BaseContractFunction":
"""
Return a distinct function with matching args, checking each argument can be
encoded with the type.
Raises a Web3ValueError if there is no match or more than one is found.
"""
fns = self.find_functions_by_args(*args)
return self.get_function_by_identifier(fns, "args")
#
# Events API
#
@combomethod
def all_events(self) -> list["BaseContractEvent"]:
"""
Return all events in the contract.
"""
return self.find_events_by_identifier(
self.abi, self.w3, self.address, lambda _: True
)
@combomethod
def get_event_by_signature(self, signature: str) -> "BaseContractEvent":
"""
Return a distinct event with matching signature.
Raises a Web3ValueError if the signature is invalid or if there is no match or
more than one is found.
"""
def callable_check(event_abi: ABIEvent) -> bool:
return abi_to_signature(event_abi) == signature.replace(" ", "")
events = self.find_events_by_identifier(
self.abi, self.w3, self.address, callable_check
)
return self.get_event_by_identifier(events, "signature")
@combomethod
def find_events_by_name(self, event_name: str) -> list["BaseContractEvent"]:
"""
Return all events with matching name.
Raises a Web3ValueError if there is no match or more than one is found.
"""
def callable_check(fn_abi: ABIFunction) -> bool:
return fn_abi["name"] == event_name
return self.find_events_by_identifier(
self.abi, self.w3, self.address, callable_check
)
@combomethod
def get_event_by_name(self, event_name: str) -> "BaseContractEvent":
"""
Return a distinct event with matching name.
Raises a Web3ValueError if there is no match or more than one is found.
"""
events = self.find_events_by_name(event_name)
return self.get_event_by_identifier(events, "name")
@combomethod
def find_events_by_selector(
self, selector: bytes | int | HexStr
) -> list["BaseContractEvent"]:
"""
Return all events with matching selector.
Raises a Web3ValueError if there is no match or more than one is found.
"""
def callable_check(event_abi: ABIEvent) -> bool:
return encode_hex(
keccak(text=abi_to_signature(event_abi).replace(" ", ""))
) == encode_hex(hexstr_if_str(to_bytes, selector))
return self.find_events_by_identifier(
self.abi, self.w3, self.address, callable_check
)
@combomethod
def get_event_by_selector(
self, selector: bytes | int | HexStr
) -> "BaseContractEvent":
"""
Return a distinct event with matching keccak selector.
Raises a Web3ValueError if there is no match or more than one is found.
"""
events = self.find_events_by_selector(selector)
return self.get_event_by_identifier(events, "selector")
@combomethod
def find_events_by_topic(self, topic: HexStr) -> list["BaseContractEvent"]:
"""
Return all events with matching topic.
Raises a Web3ValueError if there is no match or more than one is found.
"""
def callable_check(event_abi: ABIEvent) -> bool:
return (
encode_hex(keccak(text=abi_to_signature(event_abi).replace(" ", "")))
== topic
)
return self.find_events_by_identifier(
self.abi, self.w3, self.address, callable_check
)
@combomethod
def get_event_by_topic(self, topic: HexStr) -> "BaseContractEvent":
"""
Return a distinct event with matching topic.
Raises a Web3ValueError if there is no match or more than one is found.
"""
events = self.find_events_by_topic(topic)
return self.get_event_by_identifier(events, "topic")
@combomethod
def find_functions_by_identifier(
cls,
contract_abi: ABI,
w3: Union["Web3", "AsyncWeb3[Any]"],
address: ChecksumAddress,
callable_check: Callable[..., Any],
) -> list[Any]:
raise NotImplementedError(
"This method should be implemented in the inherited class"
)
@combomethod
def get_function_by_identifier(
cls, fns: Sequence["BaseContractFunction"], identifier: str
) -> "BaseContractFunction":
raise NotImplementedError(
"This method should be implemented in the inherited class"
)
@combomethod
def find_events_by_identifier(
cls,
contract_abi: ABI,
w3: Union["Web3", "AsyncWeb3[Any]"],
address: ChecksumAddress,
callable_check: Callable[..., Any],
) -> list[Any]:
raise NotImplementedError(
"This method should be implemented in the inherited class"
)
@combomethod
def get_event_by_identifier(
cls, fns: Sequence["BaseContractEvent"], identifier: str
) -> "BaseContractEvent":
raise NotImplementedError(
"This method should be implemented in the inherited class"
)
@staticmethod
def get_fallback_function(
abi: ABI,
w3: Union["Web3", "AsyncWeb3[Any]"],
function_type: type["BaseContractFunction"],
address: ChecksumAddress | None = None,
) -> "BaseContractFunction":
if abi and fallback_func_abi_exists(abi):
fallback_abi = filter_abi_by_type("fallback", abi)[0]
return function_type.factory(
"fallback",
w3=w3,
contract_abi=abi,
address=address,
abi_element_identifier=FallbackFn,
abi=fallback_abi,
)()
return cast(function_type, NonExistentFallbackFunction()) # type: ignore
@staticmethod
def get_receive_function(
abi: ABI,
w3: Union["Web3", "AsyncWeb3[Any]"],
function_type: type["BaseContractFunction"],
address: ChecksumAddress | None = None,
) -> "BaseContractFunction":
if abi and receive_func_abi_exists(abi):
receive_abi = filter_abi_by_type("receive", abi)[0]
return function_type.factory(
"receive",
w3=w3,
contract_abi=abi,
address=address,
abi_element_identifier=ReceiveFn,
abi=receive_abi,
)()
return cast(function_type, NonExistentReceiveFunction()) # type: ignore
#
# Private Helpers
#
_return_data_normalizers: tuple[Callable[..., Any], ...] = tuple()
@classmethod
def _prepare_transaction(
cls,
abi_element_identifier: ABIElementIdentifier,
fn_args: Any | None = None,
fn_kwargs: Any | None = None,
transaction: TxParams | None = None,
) -> TxParams:
return prepare_transaction(
cls.address,
cls.w3,
abi_element_identifier=abi_element_identifier,
contract_abi=cls.abi,
transaction=transaction,
fn_args=fn_args,
fn_kwargs=fn_kwargs,
)
@classmethod
def _find_matching_fn_abi(
cls,
fn_identifier: ABIElementIdentifier | None = None,
*args: Sequence[Any],
**kwargs: dict[str, Any],
) -> ABIElement:
if not args and not kwargs:
fn_identifier = get_abi_element_signature(fn_identifier)
return get_abi_element(
cls.abi,
fn_identifier,
*args,
abi_codec=cls.w3.codec,
**kwargs,
)
@classmethod
def _get_event_abi(
cls,
event_name: str | None = None,
argument_names: Sequence[str] | None = None,
) -> ABIEvent:
return cast(
ABIEvent,
get_abi_element(
abi=cls.abi,
abi_element_identifier=event_name,
argument_names=argument_names,
),
)
@combomethod
def _encode_constructor_data(
cls, *args: Sequence[Any], **kwargs: dict[str, Any]
) -> HexStr:
constructor_abi = find_constructor_abi_element_by_type(cls.abi)
if constructor_abi:
arguments = get_normalized_abi_inputs(constructor_abi, *args, **kwargs)
deploy_data = add_0x_prefix(
encode_abi(cls.w3, constructor_abi, arguments, data=cls.bytecode)
)
else:
if args or kwargs:
msg = "Constructor args were provided, but no constructor function was provided." # noqa: E501
raise Web3TypeError(msg)
deploy_data = to_hex(cls.bytecode)
return deploy_data
| BaseContract |
python | langchain-ai__langchain | libs/core/langchain_core/embeddings/fake.py | {
"start": 1907,
"end": 3886
} | class ____(Embeddings, BaseModel):
"""Deterministic fake embedding model for unit testing purposes.
This embedding model creates embeddings by sampling from a normal distribution
with a seed based on the hash of the text.
!!! danger "Toy model"
Do not use this outside of testing, as it is not a real embedding model.
Instantiate:
```python
from langchain_core.embeddings import DeterministicFakeEmbedding
embed = DeterministicFakeEmbedding(size=100)
```
Embed single text:
```python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
```
```python
[-0.700234640213188, -0.581266257710429, -1.1328482266445354]
```
Embed multiple texts:
```python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
```
```python
2
[-0.5670477847544458, -0.31403828652395727, -0.5840547508955257]
```
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> list[float]:
# set the seed for the random generator
rng = np.random.default_rng(seed)
return list(rng.normal(size=self.size))
@staticmethod
def _get_seed(text: str) -> int:
"""Get a seed for the random generator, using the hash of the text."""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
@override
def embed_query(self, text: str) -> list[float]:
return self._get_embedding(seed=self._get_seed(text))
| DeterministicFakeEmbedding |
python | django-extensions__django-extensions | django_extensions/mongodb/fields/__init__.py | {
"start": 7011,
"end": 9465
} | class ____(StringField):
"""
UUIDField
By default uses UUID version 1 (generate from host ID, sequence number and current time)
The field support all uuid versions which are natively supported by the uuid python module.
For more information see: https://docs.python.org/lib/module-uuid.html
""" # noqa: E501
def __init__(
self,
verbose_name=None,
name=None,
auto=True,
version=1,
node=None,
clock_seq=None,
namespace=None,
**kwargs,
):
kwargs["max_length"] = 36
self.auto = auto
self.version = version
if version == 1:
self.node, self.clock_seq = node, clock_seq
elif version == 3 or version == 5:
self.namespace, self.name = namespace, name
StringField.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return StringField.__name__
def contribute_to_class(self, cls, name):
if self.primary_key:
assert not cls._meta.has_auto_field, (
"A model can't have more than one AutoField: %s %s %s; have %s"
% (self, cls, name, cls._meta.auto_field)
)
super().contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
else:
super().contribute_to_class(cls, name)
def create_uuid(self):
if not self.version or self.version == 4:
return uuid.uuid4()
elif self.version == 1:
return uuid.uuid1(self.node, self.clock_seq)
elif self.version == 2:
raise UUIDVersionError("UUID version 2 is not supported.")
elif self.version == 3:
return uuid.uuid3(self.namespace, self.name)
elif self.version == 5:
return uuid.uuid5(self.namespace, self.name)
else:
raise UUIDVersionError("UUID version %s is not valid." % self.version)
def pre_save(self, model_instance, add):
if self.auto and add:
value = str(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
else:
value = super().pre_save(model_instance, add)
if self.auto and not value:
value = str(self.create_uuid())
setattr(model_instance, self.attname, value)
return value
| UUIDField |
python | keras-team__keras | keras/src/ops/numpy_test.py | {
"start": 542,
"end": 2852
} | class ____(testing.TestCase):
def test_basic_rotation(self):
array = np.array([[1, 2, 3], [4, 5, 6]])
rotated = knp.rot90(array)
expected = np.rot90(array)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("k_0", 0, [[1, 2], [3, 4]]),
("k_1", 1, [[2, 4], [1, 3]]),
("k_2", 2, [[4, 3], [2, 1]]),
("k_neg1", -1, [[3, 1], [4, 2]]),
("k_5", 5, [[2, 4], [1, 3]]), # k=5 ≡ k=1 (mod 4)
("k_6", 6, [[4, 3], [2, 1]]), # k=6 ≡ k=2 (mod 4)
)
def test_k_parameter_variations(self, k, expected):
array = np.array([[1, 2], [3, 4]])
rotated = knp.rot90(array, k=k)
expected = np.array(expected)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("axes_0_1", (0, 1)), ("axes_1_2", (1, 2)), ("axes_0_2", (0, 2))
)
def test_3d_operations(self, axes):
array_3d = np.arange(12).reshape(3, 2, 2)
rotated = knp.rot90(array_3d, axes=axes)
expected = np.rot90(array_3d, axes=axes)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("single_image", np.random.random((4, 4, 3))),
("batch_images", np.random.random((2, 4, 4, 3))),
)
def test_image_processing(self, array):
np.random.seed(0)
rotated = knp.rot90(array, axes=(0, 1))
expected = np.rot90(array, axes=(0, 1))
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("single_row", [[1, 2, 3]]),
("single_column", [[1], [2], [3]]),
("negative_values", [[-1, 0], [1, -2]]),
)
def test_edge_conditions(self, array):
numpy_array = np.array(array)
rotated = knp.rot90(numpy_array)
expected = np.rot90(numpy_array)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("1D_array", np.array([1, 2, 3]), None),
("duplicate_axes", np.array([[1, 2], [3, 4]]), (0, 0)),
)
def test_error_conditions(self, array, axes):
if axes is None:
with self.assertRaises(ValueError):
knp.rot90(array)
else:
with self.assertRaises(ValueError):
knp.rot90(array, axes=axes)
| NumPyTestRot90 |
python | catalyst-team__catalyst | catalyst/runners/supervised.py | {
"start": 5503,
"end": 9085
} | class ____(ISupervisedRunner, Runner):
"""Runner for experiments with supervised model.
Args:
model: Torch model instance
engine: Engine instance
input_key: key in ``runner.batch`` dict mapping for model input
output_key: key for ``runner.batch`` to store model output
target_key: key in ``runner.batch`` dict mapping for target
loss_key: key for ``runner.batch_metrics`` to store criterion loss output
SupervisedRunner logic pseudocode:
.. code-block:: python
batch = {"input_key": tensor, "target_key": tensor}
output = model(batch["input_key"])
batch["output_key"] = output
loss = criterion(batch["output_key"], batch["target_key"])
batch_metrics["loss_key"] = loss
.. note::
Please follow the `minimal examples`_ sections for use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
model: RunnerModel = None,
engine: Engine = None,
input_key: Any = "features",
output_key: Any = "logits",
target_key: str = "targets",
loss_key: str = "loss",
):
"""Init."""
ISupervisedRunner.__init__(
self,
input_key=input_key,
output_key=output_key,
target_key=target_key,
loss_key=loss_key,
)
Runner.__init__(self, model=model, engine=engine)
@torch.no_grad()
def predict_batch(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
"""
Run model inference on specified data batch.
.. warning::
You should not override this method.
If you need specific model call, override runner.forward() method.
Args:
batch: dictionary with data batch from DataLoader.
**kwargs: additional kwargs to pass to the model
Returns:
Mapping[str, Any]: model output dictionary
"""
batch = self._process_batch(batch)
output = self.forward(batch, **kwargs)
return output
def get_callbacks(self) -> "OrderedDict[str, Callback]":
"""Returns the callbacks for the experiment."""
callbacks = sort_callbacks_by_order(super().get_callbacks())
callback_exists = lambda callback_fn: any(
callback_isinstance(x, callback_fn) for x in callbacks.values()
)
if isinstance(self._criterion, TorchCriterion) and not callback_exists(
ICriterionCallback
):
callbacks["_criterion"] = CriterionCallback(
input_key=self._output_key,
target_key=self._target_key,
metric_key=self._loss_key,
)
if isinstance(self._optimizer, TorchOptimizer) and not callback_exists(
IBackwardCallback
):
callbacks["_backward"] = BackwardCallback(metric_key=self._loss_key)
if isinstance(self._optimizer, TorchOptimizer) and not callback_exists(
IOptimizerCallback
):
callbacks["_optimizer"] = OptimizerCallback(metric_key=self._loss_key)
if isinstance(self._scheduler, TorchScheduler) and not callback_exists(
ISchedulerCallback
):
callbacks["_scheduler"] = SchedulerCallback(
loader_key=self._valid_loader, metric_key=self._valid_metric
)
return callbacks
__all__ = ["ISupervisedRunner", "SupervisedRunner"]
| SupervisedRunner |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_created_by.py | {
"start": 659,
"end": 836
} | class ____(GQLResult):
typename__: Typename[Literal["User"]]
ArtifactCreatedBy.model_rebuild()
ArtifactCreatedByArtifact.model_rebuild()
| ArtifactCreatedByArtifactCreatedByUser |
python | giampaolo__psutil | tests/test_memleaks.py | {
"start": 2454,
"end": 9154
} | class ____(MemoryLeakTestCase):
"""Test leaks of Process class methods."""
proc = thisproc
def execute_w_exc(self, exc, fun, **kwargs):
"""Run MemoryLeakTestCase.execute() expecting fun() to raise
exc on every call.
"""
def call():
try:
fun()
except exc:
pass
else:
return self.fail(f"{fun} did not raise {exc}")
self.execute(call, **kwargs)
def test_coverage(self):
ns = process_namespace(None)
ns.test_class_coverage(self, ns.getters + ns.setters)
@fewtimes_if_linux()
def test_name(self):
self.execute(self.proc.name)
@fewtimes_if_linux()
def test_cmdline(self):
self.execute(self.proc.cmdline)
@fewtimes_if_linux()
def test_exe(self):
self.execute(self.proc.exe)
@fewtimes_if_linux()
def test_ppid(self):
self.execute(self.proc.ppid)
@pytest.mark.skipif(not POSIX, reason="POSIX only")
@fewtimes_if_linux()
def test_uids(self):
self.execute(self.proc.uids)
@pytest.mark.skipif(not POSIX, reason="POSIX only")
@fewtimes_if_linux()
def test_gids(self):
self.execute(self.proc.gids)
@fewtimes_if_linux()
def test_status(self):
self.execute(self.proc.status)
def test_nice(self):
self.execute(self.proc.nice)
def test_nice_set(self):
niceness = thisproc.nice()
self.execute(lambda: self.proc.nice(niceness))
@pytest.mark.skipif(not HAS_IONICE, reason="not supported")
def test_ionice(self):
self.execute(self.proc.ionice)
@pytest.mark.skipif(not HAS_IONICE, reason="not supported")
def test_ionice_set(self):
if WINDOWS:
value = thisproc.ionice()
self.execute(lambda: self.proc.ionice(value))
else:
self.execute(lambda: self.proc.ionice(psutil.IOPRIO_CLASS_NONE))
@pytest.mark.skipif(not HAS_IONICE, reason="not supported")
@pytest.mark.skipif(WINDOWS, reason="not on WINDOWS")
def test_ionice_set_badarg(self):
fun = functools.partial(cext.proc_ioprio_set, os.getpid(), -1, 0)
self.execute_w_exc(OSError, fun)
@pytest.mark.skipif(not HAS_PROC_IO_COUNTERS, reason="not supported")
@fewtimes_if_linux()
def test_io_counters(self):
self.execute(self.proc.io_counters)
@pytest.mark.skipif(POSIX, reason="worthless on POSIX")
def test_username(self):
# always open 1 handle on Windows (only once)
psutil.Process().username()
self.execute(self.proc.username)
@fewtimes_if_linux()
def test_create_time(self):
self.execute(self.proc.create_time)
@fewtimes_if_linux()
@skip_on_access_denied(only_if=OPENBSD)
def test_num_threads(self):
self.execute(self.proc.num_threads)
@pytest.mark.skipif(not WINDOWS, reason="WINDOWS only")
def test_num_handles(self):
self.execute(self.proc.num_handles)
@pytest.mark.skipif(not POSIX, reason="POSIX only")
@fewtimes_if_linux()
def test_num_fds(self):
self.execute(self.proc.num_fds)
@fewtimes_if_linux()
def test_num_ctx_switches(self):
self.execute(self.proc.num_ctx_switches)
@fewtimes_if_linux()
@skip_on_access_denied(only_if=OPENBSD)
def test_threads(self):
self.execute(self.proc.threads)
@fewtimes_if_linux()
def test_cpu_times(self):
self.execute(self.proc.cpu_times)
@fewtimes_if_linux()
@pytest.mark.skipif(not HAS_PROC_CPU_NUM, reason="not supported")
def test_cpu_num(self):
self.execute(self.proc.cpu_num)
@fewtimes_if_linux()
def test_memory_info(self):
self.execute(self.proc.memory_info)
@fewtimes_if_linux()
def test_memory_full_info(self):
self.execute(self.proc.memory_full_info)
@pytest.mark.skipif(not POSIX, reason="POSIX only")
@fewtimes_if_linux()
def test_terminal(self):
self.execute(self.proc.terminal)
def test_resume(self):
times = FEW_TIMES if POSIX else self.times
self.execute(self.proc.resume, times=times)
@fewtimes_if_linux()
def test_cwd(self):
self.execute(self.proc.cwd)
@pytest.mark.skipif(not HAS_CPU_AFFINITY, reason="not supported")
def test_cpu_affinity(self):
self.execute(self.proc.cpu_affinity)
@pytest.mark.skipif(not HAS_CPU_AFFINITY, reason="not supported")
def test_cpu_affinity_set(self):
affinity = thisproc.cpu_affinity()
self.execute(lambda: self.proc.cpu_affinity(affinity))
@pytest.mark.skipif(not HAS_CPU_AFFINITY, reason="not supported")
def test_cpu_affinity_set_badarg(self):
self.execute_w_exc(ValueError, lambda: self.proc.cpu_affinity([-1]))
@fewtimes_if_linux()
def test_open_files(self):
with open(get_testfn(), 'w'):
self.execute(self.proc.open_files)
@pytest.mark.skipif(not HAS_MEMORY_MAPS, reason="not supported")
@fewtimes_if_linux()
def test_memory_maps(self):
self.execute(self.proc.memory_maps)
@pytest.mark.skipif(not LINUX, reason="LINUX only")
@pytest.mark.skipif(not HAS_RLIMIT, reason="not supported")
def test_rlimit(self):
self.execute(lambda: self.proc.rlimit(psutil.RLIMIT_NOFILE))
@pytest.mark.skipif(not LINUX, reason="LINUX only")
@pytest.mark.skipif(not HAS_RLIMIT, reason="not supported")
def test_rlimit_set(self):
limit = thisproc.rlimit(psutil.RLIMIT_NOFILE)
self.execute(lambda: self.proc.rlimit(psutil.RLIMIT_NOFILE, limit))
@pytest.mark.skipif(not LINUX, reason="LINUX only")
@pytest.mark.skipif(not HAS_RLIMIT, reason="not supported")
def test_rlimit_set_badarg(self):
self.execute_w_exc((OSError, ValueError), lambda: self.proc.rlimit(-1))
@fewtimes_if_linux()
# Windows implementation is based on a single system-wide
# function (tested later).
@pytest.mark.skipif(WINDOWS, reason="worthless on WINDOWS")
def test_net_connections(self):
# TODO: UNIX sockets are temporarily implemented by parsing
# 'pfiles' cmd output; we don't want that part of the code to
# be executed.
with create_sockets():
kind = 'inet' if SUNOS else 'all'
self.execute(lambda: self.proc.net_connections(kind))
@pytest.mark.skipif(not HAS_ENVIRON, reason="not supported")
def test_environ(self):
self.execute(self.proc.environ)
@pytest.mark.skipif(not WINDOWS, reason="WINDOWS only")
def test_proc_info(self):
self.execute(lambda: cext.proc_info(os.getpid()))
| TestProcessObjectLeaks |
python | altair-viz__altair | tools/versioning.py | {
"start": 2342,
"end": 8243
} | class ____:
_TABLE_PATH: ClassVar[Sequence[LiteralString]] = "tool", "altair", "vega"
"""
The table header path split by ``"."``::
[tool.altair.vega] -> "tool", "altair", "vega"
"""
_PY_DEPS_PATH: ClassVar[Sequence[LiteralString]] = (
"project",
"optional-dependencies",
)
_PY_DEPS: ClassVar[frozenset[VegaProjectPy]] = frozenset(
("vl-convert-python", "vegafusion")
)
_CONST_NAME: ClassVar[Literal["VERSIONS"]] = "VERSIONS"
"""Variable name for the exported literal."""
_mapping: Mapping[VegaProject, str]
def __init__(self) -> None:
pyproject = _read_pyproject_toml()
py_deps = _keypath(pyproject, self._PY_DEPS_PATH)
js_deps = _keypath(pyproject, self._TABLE_PATH)
all_deps = chain(js_deps.items(), self._iter_py_deps_versions(py_deps))
self._mapping = dict(sorted(all_deps))
def __getitem__(self, key: VegaProject) -> str:
return self._mapping[key]
def __repr__(self) -> str:
return repr(self._mapping)
def projects(self) -> KeysView[VegaProject]:
return self._mapping.keys()
def items(self) -> ItemsView[VegaProject, str]:
return self._mapping.items()
@property
def vlc_vega(self) -> str:
"""
Returns version of `Vega`_ bundled with `vl-convert`_.
.. _Vega:
https://github.com/vega/vega
.. _vl-convert:
https://github.com/vega/vl-convert
"""
return vlc.get_vega_version()
@property
def vlc_vega_embed(self) -> str:
"""
Returns version of `Vega-Embed`_ bundled with `vl-convert`_.
.. _Vega-Embed:
https://github.com/vega/vega-embed
.. _vl-convert:
https://github.com/vega/vl-convert
"""
return vlc.get_vega_embed_version()
@property
def vlc_vega_themes(self) -> str:
"""
Returns version of `Vega-Themes`_ bundled with `vl-convert`_.
.. _Vega-Themes:
https://github.com/vega/vega-themes
.. _vl-convert:
https://github.com/vega/vl-convert.
"""
return vlc.get_vega_themes_version()
@property
def vlc_vegalite(self) -> list[str]:
"""
Returns versions of `Vega-Lite`_ bundled with `vl-convert`_.
.. _Vega-Lite:
https://github.com/vega/vega-lite
.. _vl-convert:
https://github.com/vega/vl-convert
"""
return vlc.get_vegalite_versions()
@property
def _annotation(self) -> str:
return f"Mapping[{spell_literal(self.projects())}, str]"
@property
def _header(self) -> str:
return f"[{'.'.join(self._TABLE_PATH)}]"
def iter_inline_literal(self) -> Iterator[str]:
"""
Yields the ``[tool.altair.vega]`` table as an inline ``dict``.
Includes a type annotation and docstring.
Notes
-----
- Write at the bottom of ``altair.utils.schemapi``.
- Used in ``altair.utils._importers``.
"""
yield f"{self._CONST_NAME}: {self._annotation} = {self!r}\n"
yield '"""\n'
yield (
"Version pins for non-``python`` `vega projects`_.\n\n"
"Notes\n"
"-----\n"
f"When cutting a new release, make sure to update ``{self._header}`` in ``pyproject.toml``.\n\n"
".. _vega projects:\n"
" https://github.com/vega\n"
)
yield '"""\n'
def update_all(self) -> None:
"""Update all static version pins."""
print("Updating Vega project pins")
self.update_vega_embed()
def update_vega_embed(self) -> None:
"""Updates the **Vega-Lite** version used in ``JupyterChart``."""
fp = _REPO_ROOT / _JUPYTER_INDEX
embed = self["vega-embed"]
vega = parse_version(self.vlc_vega).major
vegalite = self["vega-lite"].lstrip("v")
stmt = f'import vegaEmbed from "https://esm.sh/vega-embed@{embed}?deps=vega@{vega}&deps=vega-lite@{vegalite}";\n'
with fp.open("r", encoding="utf-8", newline="\n") as f:
lines = deque(f.readlines())
lines.popleft()
print(f"Updating import in {fp.as_posix()!r}, to:\n {stmt!r}")
lines.appendleft(stmt)
with fp.open("w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
def _iter_py_deps_versions(
self, dep_groups: dict[str, Sequence[str]], /
) -> Iterator[tuple[VegaProjectPy, str]]:
"""
Extract the name and lower version bound for all Vega python packages.
Parameters
----------
dep_groups
Mapping of dependency/extra groups to requirement strings.
.. note::
It is expected that this is **either** `project.optional-dependencies`_ or `dependency-groups`_.
.. _project.optional-dependencies:
https://packaging.python.org/en/latest/specifications/pyproject-toml/#dependencies-optional-dependencies
.. _dependency-groups:
https://peps.python.org/pep-0735/
"""
for deps in dep_groups.values():
for req_string in deps:
req = Requirement(req_string)
if req.name in self._PY_DEPS:
it = (
parse_version(sp.version)
for sp in req.specifier
if sp.operator in _LOWER_BOUNDS
)
version = str(min(it))
yield req.name, version # type: ignore[misc]
def __getattr__(name: str) -> _Versions:
if name == "VERSIONS":
global VERSIONS
VERSIONS = _Versions()
return VERSIONS
else:
msg = f"module {__name__!r} has no attribute {name!r}"
raise AttributeError(msg)
| _Versions |
python | mamba-org__mamba | micromamba/tests/test_update.py | {
"start": 308,
"end": 8661
} | class ____:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
env_name = helpers.random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + helpers.random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
old_version = "0.21.10"
medium_old_version = "0.22"
@staticmethod
@pytest.fixture(scope="class")
def root(existing_cache):
os.environ["MAMBA_ROOT_PREFIX"] = TestUpdate.root_prefix
os.environ["CONDA_PREFIX"] = TestUpdate.prefix
yield
os.environ["MAMBA_ROOT_PREFIX"] = TestUpdate.current_root_prefix
os.environ["CONDA_PREFIX"] = TestUpdate.current_prefix
shutil.rmtree(TestUpdate.root_prefix)
@staticmethod
@pytest.fixture
def env_created(root):
if helpers.dry_run_tests == helpers.DryRun.OFF:
helpers.create(
f"xtensor={TestUpdate.old_version}",
"-n",
TestUpdate.env_name,
"--json",
no_dry_run=True,
)
res = helpers.umamba_list("xtensor", "-n", TestUpdate.env_name, "--json")
assert len(res) == 1
assert res[0]["version"].startswith(TestUpdate.old_version)
yield TestUpdate.env_name
shutil.rmtree(TestUpdate.prefix)
def test_constrained_update(self, env_created):
update_res = helpers.update(
"xtensor<=" + self.medium_old_version, "-n", env_created, "--json"
)
xtensor_link = [
to_link for to_link in update_res["actions"]["LINK"] if to_link["name"] == "xtensor"
][0]
assert xtensor_link["version"].startswith(self.medium_old_version)
# test that we relink noarch packages
def test_update_python_noarch(self, root):
if helpers.dry_run_tests == helpers.DryRun.OFF:
helpers.create(
"python=3.9",
"six",
"requests",
"-n",
TestUpdate.env_name,
"--json",
no_dry_run=True,
)
else:
return
res = helpers.umamba_list("python", "-n", TestUpdate.env_name, "--json")
assert len(res) >= 1
pyelem = [r for r in res if r["name"] == "python"][0]
assert pyelem["version"].startswith("3.9")
res = helpers.umamba_list("requests", "-n", TestUpdate.env_name, "--json")
prev_requests = [r for r in res if r["name"] == "requests"][0]
assert prev_requests["version"]
def site_packages_path(p, pyver):
if platform.system() == "Windows":
return os.path.join(self.prefix, "Lib\\site-packages\\", p)
else:
return os.path.join(self.prefix, f"lib/python{pyver}/site-packages", p)
assert os.path.exists(site_packages_path("requests/__pycache__", "3.9"))
prev_six = helpers.umamba_list("six", "-n", TestUpdate.env_name, "--json")[0]
update_res = helpers.update("-n", TestUpdate.env_name, "python=3.10", "--json")
six_link = [
to_link for to_link in update_res["actions"]["LINK"] if to_link["name"] == "six"
][0]
assert six_link["version"] == prev_six["version"]
assert six_link["build_string"] == prev_six["build_string"]
requests_link = [
to_link for to_link in update_res["actions"]["LINK"] if to_link["name"] == "requests"
][0]
requests_unlink = [
to_link for to_link in update_res["actions"]["UNLINK"] if to_link["name"] == "requests"
][0]
assert requests_link["version"] == requests_unlink["version"]
if platform.system() != "Windows":
assert not os.path.exists(site_packages_path("", "3.9"))
assert os.path.exists(site_packages_path("requests/__pycache__", "3.10"))
assert requests_link["version"] == prev_requests["version"]
assert requests_link["build_string"] == prev_requests["build_string"]
def test_further_constrained_update(self, env_created):
update_res = helpers.update("xtensor==0.24.5=*_0", "--json")
xtensor_link = [
to_link for to_link in update_res["actions"]["LINK"] if to_link["name"] == "xtensor"
][0]
assert xtensor_link["version"] == "0.24.5"
assert xtensor_link["build_number"] == 0
def test_classic_spec(self, env_created):
update_res = helpers.update("xtensor", "--json", "-n", TestUpdate.env_name)
xtensor_link = [
to_link for to_link in update_res["actions"]["LINK"] if to_link["name"] == "xtensor"
][0]
assert TestUpdate.old_version != xtensor_link["version"]
if helpers.dry_run_tests == helpers.DryRun.OFF:
pkg = helpers.get_concrete_pkg(update_res, "xtensor")
pkg_info = helpers.get_concrete_pkg_info(helpers.get_env(TestUpdate.env_name), pkg)
version = pkg_info["version"]
assert TestUpdate.old_version != version
# This should do nothing since python is not installed!
update_res = helpers.update("python", "-n", TestUpdate.env_name, "--json")
# TODO fix this?!
assert update_res["message"] == "All requested packages already installed"
assert update_res["success"] is True
assert "action" not in update_res
def test_update_all(self, env_created):
update_res = helpers.update("--all", "--json")
xtensor_link = [
to_link for to_link in update_res["actions"]["LINK"] if to_link["name"] == "xtensor"
][0]
assert TestUpdate.old_version != xtensor_link["version"]
if helpers.dry_run_tests == helpers.DryRun.OFF:
pkg = helpers.get_concrete_pkg(update_res, "xtensor")
pkg_info = helpers.get_concrete_pkg_info(helpers.get_env(TestUpdate.env_name), pkg)
version = pkg_info["version"]
assert TestUpdate.old_version != version
with open(Path(self.prefix) / "conda-meta" / "history") as h:
history = h.readlines()
print("".join(history))
for el in reversed(history):
x = el.strip()
if x.startswith(">=="):
break
assert not x.startswith("update specs:")
@pytest.mark.parametrize(
"alias",
[
None,
"https://conda.anaconda.org/",
"https://repo.mamba.pm/",
"https://repo.mamba.pm",
],
)
def test_channel_alias(self, alias, env_created):
if alias:
res = helpers.update(
"-n",
TestUpdate.env_name,
"xtensor",
"--json",
"--dry-run",
"--channel-alias",
alias,
)
else:
res = helpers.update("-n", TestUpdate.env_name, "xtensor", "--json", "--dry-run")
for to_link in res["actions"]["LINK"]:
assert to_link["channel"] == "conda-forge"
@pytest.mark.parametrize("output_flag", ["", "--json", "--quiet"])
def test_update_check_logs(self, env_created, output_flag):
res = helpers.update("-n", TestUpdate.env_name, "xtensor=0.24.5", output_flag)
if output_flag == "--json":
assert res["success"]
elif output_flag == "--quiet":
assert res == ""
else:
assert "To activate this environment, use:" not in res
def test_update_explains_problems(self, env_created):
# Non-regression test for: https://github.com/mamba-org/mamba/issues/3828
with pytest.raises(helpers.subprocess.CalledProcessError) as e:
helpers.update("-n", TestUpdate.env_name, "xtensor=0.24.5", "xtensor=0.25.0")
err_string = str(e.value.stderr.decode("utf-8"))
assert "The following packages are incompatible" in err_string
def test_update_explains_problems_json(self, env_created):
with pytest.raises(helpers.subprocess.CalledProcessError) as e:
helpers.update("-n", TestUpdate.env_name, "xtensor=0.24.5", "xtensor=0.25.0", "--json")
out_string = str(e.value.stdout.decode("utf-8"))
assert "cannot install both" in out_string
| TestUpdate |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 35272,
"end": 36830
} | class ____(unittest.TestCase):
"""Tests person in the hi_IN locale"""
def setUp(self):
self.fake = Faker("hi_IN")
Faker.seed(0)
def test_first_name(self):
"""Verify that gender specific names are set correctly"""
name = self.fake.first_name_male()
assert name in HiINProvider.first_names_male
name = self.fake.first_name_female()
assert name in HiINProvider.first_names_female
name = self.fake.first_name()
assert name in HiINProvider.first_names_male
def test_last_name(self):
last_name = self.fake.last_name()
assert last_name in HiINProvider.last_names
def test_name(self):
name = self.fake.name().split()
assert all(isinstance(n, str) for n in name)
prefixes = HiINProvider.prefixes_male + HiINProvider.prefixes_female + HiINProvider.prefixes
# name should always be 2-3 words. If 3, first word should be a prefix.
if len(name) == 3:
assert all(
[
name[0] in prefixes,
name[1] in HiINProvider.first_names,
name[2] in HiINProvider.last_names,
]
)
else:
assert name[0] in HiINProvider.first_names
if name[-1].endswith(HiINProvider.suffixes):
assert name[-1][:-1] in HiINProvider.last_names
assert name[-1][-1] in HiINProvider.suffixes
else:
assert name[-1] in HiINProvider.last_names
| TestHiIN |
python | django__django | tests/field_subclassing/tests.py | {
"start": 339,
"end": 1245
} | class ____(SimpleTestCase):
def test_descriptor_class(self):
class CustomDescriptorModel(models.Model):
name = CustomDescriptorField(max_length=32)
m = CustomDescriptorModel()
self.assertFalse(hasattr(m, "_name_get_count"))
# The field is set to its default in the model constructor.
self.assertEqual(m._name_set_count, 1)
m.name = "foo"
self.assertFalse(hasattr(m, "_name_get_count"))
self.assertEqual(m._name_set_count, 2)
self.assertEqual(m.name, "foo")
self.assertEqual(m._name_get_count, 1)
self.assertEqual(m._name_set_count, 2)
m.name = "bar"
self.assertEqual(m._name_get_count, 1)
self.assertEqual(m._name_set_count, 3)
self.assertEqual(m.name, "bar")
self.assertEqual(m._name_get_count, 2)
self.assertEqual(m._name_set_count, 3)
| DescriptorClassTest |
python | walkccc__LeetCode | solutions/210. Course Schedule II/210.py | {
"start": 85,
"end": 773
} | class ____:
def findOrder(
self,
numCourses: int,
prerequisites: list[list[int]],
) -> list[int]:
ans = []
graph = [[] for _ in range(numCourses)]
states = [State.INIT] * numCourses
for v, u in prerequisites:
graph[u].append(v)
def hasCycle(u: int) -> bool:
if states[u] == State.VISITING:
return True
if states[u] == State.VISITED:
return False
states[u] = State.VISITING
if any(hasCycle(v) for v in graph[u]):
return True
states[u] = State.VISITED
ans.append(u)
return False
if any(hasCycle(i) for i in range(numCourses)):
return []
return ans[::-1]
| Solution |
python | spyder-ide__spyder | external-deps/python-lsp-server/test/fixtures.py | {
"start": 1217,
"end": 5027
} | class ____(Endpoint):
"""
Fake Endpoint representing the editor / LSP client.
The `dispatcher` dict will be used to synchronously calculate the responses
for calls to `.request` and resolve the futures with the value or errors.
Fake methods in the `dispatcher` should raise `JsonRpcException` for any
error.
"""
def request(self, method, params=None):
request_future = super().request(method, params)
try:
request_future.set_result(self._dispatcher[method](params))
except JsonRpcException as e:
request_future.set_exception(e)
return request_future
@pytest.fixture
def pylsp(tmpdir):
"""Return an initialized python LS"""
ls = FakePythonLSPServer(StringIO, StringIO, endpoint_cls=FakeEndpoint)
ls.m_initialize(
processId=1, rootUri=uris.from_fs_path(str(tmpdir)), initializationOptions={}
)
return ls
@pytest.fixture
def pylsp_w_workspace_folders(tmpdir):
"""Return an initialized python LS"""
ls = FakePythonLSPServer(StringIO, StringIO, endpoint_cls=FakeEndpoint)
folder1 = tmpdir.mkdir("folder1")
folder2 = tmpdir.mkdir("folder2")
ls.m_initialize(
processId=1,
rootUri=uris.from_fs_path(str(folder1)),
initializationOptions={},
workspaceFolders=[
{"uri": uris.from_fs_path(str(folder1)), "name": "folder1"},
{"uri": uris.from_fs_path(str(folder2)), "name": "folder2"},
],
)
workspace_folders = [folder1, folder2]
return (ls, workspace_folders)
@pytest.fixture()
def consumer():
return MagicMock()
@pytest.fixture()
def endpoint(consumer):
class Dispatcher(FakeEditorMethodsMixin, MethodDispatcher):
pass
return FakeEndpoint(Dispatcher(), consumer, id_generator=lambda: "id")
@pytest.fixture
def workspace(tmpdir, endpoint) -> None:
"""Return a workspace."""
ws = Workspace(uris.from_fs_path(str(tmpdir)), endpoint)
ws._config = Config(ws.root_uri, {}, 0, {})
yield ws
ws.close()
@pytest.fixture
def workspace_other_root_path(tmpdir, endpoint):
"""Return a workspace with a root_path other than tmpdir."""
ws_path = str(tmpdir.mkdir("test123").mkdir("test456"))
ws = Workspace(uris.from_fs_path(ws_path), endpoint)
ws._config = Config(ws.root_uri, {}, 0, {})
return ws
@pytest.fixture
def config(workspace):
"""Return a config object."""
cfg = Config(workspace.root_uri, {}, 0, {})
cfg._plugin_settings = {
"plugins": {"pylint": {"enabled": False, "args": [], "executable": None}}
}
return cfg
@pytest.fixture
def doc(workspace):
return Document(DOC_URI, workspace, DOC)
@pytest.fixture
def temp_workspace_factory(workspace):
"""
Returns a function that creates a temporary workspace from the files dict.
The dict is in the format {"file_name": "file_contents"}
"""
def fn(files):
def create_file(name, content):
fn = os.path.join(workspace.root_path, name)
with open(fn, "w", encoding="utf-8") as f:
f.write(content)
workspace.put_document(uris.from_fs_path(fn), content)
for name, content in files.items():
create_file(name, content)
return workspace
return fn
@pytest.fixture
def client_server_pair() -> None:
"""A fixture that sets up a client/server pair and shuts down the server"""
client_server_pair_obj = ClientServerPair()
yield (client_server_pair_obj.client, client_server_pair_obj.server)
shutdown_response = client_server_pair_obj.client._endpoint.request(
"shutdown"
).result(timeout=CALL_TIMEOUT_IN_SECONDS)
assert shutdown_response is None
client_server_pair_obj.client._endpoint.notify("exit")
| FakeEndpoint |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/github_app_auth.py | {
"start": 346,
"end": 450
} | class ____(Exception):
"""Raised when GitHub App authentication fails."""
| GitHubAppAuthenticationError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/padding_fifo_queue_test.py | {
"start": 60468,
"end": 62732
} | class ____(test.TestCase):
def testQueueFromListShapes(self):
which = constant_op.constant(1)
def _cmp(expected, *shapes):
qs = [
data_flow_ops.PaddingFIFOQueue(10, [dtypes_lib.float32],
[tensor_shape.TensorShape(s)])
for s in shapes
]
s_expected = tensor_shape.TensorShape(expected)
s = data_flow_ops.QueueBase.from_list(which, qs).shapes[0]
if s_expected.ndims is None:
self.assertEqual(s_expected.ndims, s.ndims)
else:
self.assertEqual(s_expected.as_list(), s.as_list())
_cmp(None, [1, None], [None])
_cmp([None], [1], [2])
_cmp([1, None], [1, 1], [1, 2])
_cmp([1, None], [1, 1], [1, None])
_cmp([None, None], [None, 1], [1, None])
_cmp([1], [1], [1], [1])
_cmp([None], [1], [None], [1])
_cmp(None, [1, None], [1], [1])
def testQueueFromListShapesMultipleComponents(self):
q_u_u = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None])])
q_u_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([None]), tensor_shape.TensorShape([1, 2])])
q_f_f = data_flow_ops.PaddingFIFOQueue(
10, [dtypes_lib.float32, dtypes_lib.int32],
[tensor_shape.TensorShape([3, 4]), tensor_shape.TensorShape([1, 2])])
which = constant_op.constant(1)
s_cmp_1 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_u]).shapes
self.assertEqual([1, 1], [x.ndims for x in s_cmp_1])
self.assertEqual([None, None], [x.as_list()[0] for x in s_cmp_1])
s_cmp_2 = data_flow_ops.QueueBase.from_list(which,
[q_u_u, q_u_u, q_u_f]).shapes
self.assertEqual([1, None], [x.ndims for x in s_cmp_2])
self.assertEqual([None], s_cmp_2[0].as_list())
s_cmp_3 = data_flow_ops.QueueBase.from_list(which, [q_f_f, q_f_f]).shapes
self.assertEqual([2, 2], [x.ndims for x in s_cmp_3])
self.assertEqual([[3, 4], [1, 2]], [x.as_list() for x in s_cmp_3])
if __name__ == "__main__":
test.main()
| QueueFromListTest |
python | sdispater__pendulum | src/pendulum/tz/exceptions.py | {
"start": 218,
"end": 430
} | class ____(TimezoneError):
message = "The datetime {} does not exist."
def __init__(self, dt: datetime) -> None:
message = self.message.format(dt)
super().__init__(message)
| NonExistingTime |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 26346,
"end": 27102
} | class ____(Structure):
_fields_ = (
("init_address", p_uint32),
("init_module", p_uint32),
("reserved1", p_uint32),
("reserved2", p_uint32),
("reserved3", p_uint32),
("reserved4", p_uint32),
("reserved5", p_uint32),
("reserved6", p_uint32),
)
def describe(self):
s = {}
s["init_address"] = int(self.init_address)
s["init_module"] = int(self.init_module)
s["reserved1"] = int(self.reserved1)
s["reserved2"] = int(self.reserved2)
s["reserved3"] = int(self.reserved3)
s["reserved4"] = int(self.reserved4)
s["reserved5"] = int(self.reserved5)
s["reserved6"] = int(self.reserved6)
return s
| routines_command |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_twodim_base.py | {
"start": 15168,
"end": 16977
} | class ____(TestCase):
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, k=2)
iu3 = triu_indices(4, m=5)
iu4 = triu_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
assert_array_equal(a[iu1], array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
assert_array_equal(
b[iu3], array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20])
)
# And for assigning values:
a[iu1] = -1
assert_array_equal(
a,
array(
[[-1, -1, -1, -1], [5, -1, -1, -1], [9, 10, -1, -1], [13, 14, 15, -1]]
),
)
b[iu3] = -1
assert_array_equal(
b,
array(
[
[-1, -1, -1, -1, -1],
[6, -1, -1, -1, -1],
[11, 12, -1, -1, -1],
[16, 17, 18, -1, -1],
]
),
)
# These cover almost the whole array (two diagonals right of the
# main one):
a[iu2] = -10
assert_array_equal(
a,
array(
[
[-1, -1, -10, -10],
[5, -1, -1, -10],
[9, 10, -1, -1],
[13, 14, 15, -1],
]
),
)
b[iu4] = -10
assert_array_equal(
b,
array(
[
[-1, -1, -10, -10, -10],
[6, -1, -1, -10, -10],
[11, 12, -1, -1, -10],
[16, 17, 18, -1, -1],
]
),
)
| TestTriuIndices |
python | getsentry__sentry | src/sentry/replays/usecases/query/__init__.py | {
"start": 7663,
"end": 18366
} | class ____:
response: list[Any]
has_more: bool
source: str
def _has_viewed_by_filter(search_filter: QueryToken) -> bool:
if isinstance(search_filter, SearchFilter):
return search_filter.key.name in VIEWED_BY_KEYS
if isinstance(search_filter, ParenExpression):
return any([_has_viewed_by_filter(child) for child in search_filter.children])
return False # isinstance(search_filter, str) - not parseable
def query_using_optimized_search(
fields: list[str],
search_filters: Sequence[QueryToken],
environments: list[str],
sort: str | None,
pagination: Paginators,
organization: Organization | None,
project_ids: list[int],
period_start: datetime,
period_stop: datetime,
request_user_id: int | None = None,
preferred_source: PREFERRED_SOURCE = "scalar",
):
tenant_id = _make_tenant_id(organization)
# Environments is provided to us outside of the ?query= url parameter. It's stil filtered like
# the values in that parameter so let's shove it inside and process it like any other filter.
if environments:
search_filters = [
*search_filters,
SearchFilter(SearchKey("environment"), "IN", SearchValue(environments)),
]
viewed_by_denylist = options.get("replay.viewed-by.project-denylist")
if any([project_id in viewed_by_denylist for project_id in project_ids]):
# Skip all viewed by filters if in denylist
for search_filter in search_filters:
if _has_viewed_by_filter(search_filter):
raise BadRequest(message=VIEWED_BY_DENYLIST_MSG)
else:
# Translate "viewed_by_me" filters, which are aliases for "viewed_by_id"
search_filters = handle_viewed_by_me_filters(search_filters, request_user_id)
if preferred_source == "aggregated":
query, referrer, source = _query_using_aggregated_strategy(
search_filters,
sort,
project_ids,
period_start,
period_stop,
)
else:
query, referrer, source = _query_using_scalar_strategy(
search_filters,
sort,
project_ids,
period_start,
period_stop,
)
query = query.set_limit(pagination.limit)
query = query.set_offset(pagination.offset)
subquery_response = execute_query(query, tenant_id, referrer)
# The query "has more rows" if the number of rows found matches the limit (which is
# the requested limit + 1).
has_more = len(subquery_response.get("data", [])) == pagination.limit
if has_more:
subquery_response["data"].pop()
# These replay_ids are ordered by the OrderBy expression in the query above.
replay_ids = [row["replay_id"] for row in subquery_response.get("data", [])]
if not replay_ids:
return QueryResponse(
response=[],
has_more=has_more,
source=source,
)
# The final aggregation step. Here we pass the replay_ids as the only filter. In this step
# we select everything and use as much memory as we need to complete the operation.
#
# If this step runs out of memory your pagination size is about 1,000,000 rows too large.
# That's a joke. This will complete very quickly at normal pagination sizes.
results = execute_query(
make_full_aggregation_query(
fields=fields,
replay_ids=replay_ids,
project_ids=project_ids,
period_start=period_start,
period_end=period_stop,
request_user_id=request_user_id,
),
tenant_id,
referrer="replays.query.browse_query",
)["data"]
return QueryResponse(
response=_make_ordered(replay_ids, results),
has_more=has_more,
source=source,
)
def _query_using_scalar_strategy(
search_filters: Sequence[QueryToken],
sort: str | None,
project_ids: list[int],
period_start: datetime,
period_stop: datetime,
):
can_scalar_search = can_scalar_search_subquery(search_filters, period_start)
can_scalar_sort = sort_is_scalar_compatible(sort or DEFAULT_SORT_FIELD)
if not can_scalar_search or not can_scalar_sort:
return _query_using_aggregated_strategy(
search_filters,
sort,
project_ids,
period_start,
period_stop,
)
# NOTE: This query may return replay-ids which do not have a segment_id 0 row. These replays
# will be removed from the final output and could lead to pagination peculiarities. In
# practice, this is not expected to be noticable by the end-user.
#
# To fix this issue remove the ability to search against "varying" columns and apply a
# "segment_id = 0" condition to the WHERE clause.
try:
where = handle_search_filters(scalar_search_config, search_filters)
orderby = handle_ordering(
agg_sort_config,
sort or "-" + DEFAULT_SORT_FIELD,
tiebreaker="replay_id", # Ensure stable sort when ordering by column with duplicates
)
except RetryAggregated:
return _query_using_aggregated_strategy(
search_filters,
sort,
project_ids,
period_start,
period_stop,
)
query = Query(
match=Entity("replays"),
select=[Column("replay_id")],
where=[
Condition(Column("project_id"), Op.IN, project_ids),
Condition(Column("timestamp"), Op.LT, period_stop),
Condition(Column("timestamp"), Op.GTE, period_start),
*where,
],
orderby=orderby,
groupby=[Column("replay_id")],
granularity=Granularity(3600),
)
return (query, "replays.query.browse_scalar_conditions_subquery", "scalar-subquery")
def _query_using_aggregated_strategy(
search_filters: Sequence[QueryToken],
sort: str | None,
project_ids: list[int],
period_start: datetime,
period_stop: datetime,
):
orderby = handle_ordering(
agg_sort_config,
sort or "-" + DEFAULT_SORT_FIELD,
tiebreaker="replay_id", # Ensure stable sort when ordering by column with duplicates
)
having: list[Condition] = handle_search_filters(agg_search_config, search_filters)
having.append(Condition(Function("min", parameters=[Column("segment_id")]), Op.EQ, 0))
query = Query(
match=Entity("replays"),
select=[Column("replay_id")],
where=[
Condition(Column("project_id"), Op.IN, project_ids),
Condition(Column("timestamp"), Op.LT, period_stop),
Condition(Column("timestamp"), Op.GTE, period_start),
],
having=having,
orderby=orderby,
groupby=[Column("replay_id")],
granularity=Granularity(3600),
)
return (query, "replays.query.browse_aggregated_conditions_subquery", "aggregated-subquery")
def make_full_aggregation_query(
fields: list[str],
replay_ids: list[str],
project_ids: list[int],
period_start: datetime,
period_end: datetime,
request_user_id: int | None,
) -> Query:
"""Return a query to fetch every replay in the set.
Arguments:
fields -- if non-empty, used to query a subset of fields. Corresponds to the keys in QUERY_ALIAS_COLUMN_MAP.
"""
from sentry.replays.query import select_from_fields
select = select_from_fields(fields, user_id=request_user_id)
return Query(
match=Entity("replays"),
select=select,
where=[
Condition(Column("project_id"), Op.IN, project_ids),
# Replay-ids were pre-calculated so no having clause and no aggregating significant
# amounts of data.
Condition(Column("replay_id"), Op.IN, replay_ids),
# We can scan an extended time range to account for replays which span either end of
# the range. These timestamps are an optimization and could be removed with minimal
# performance impact. It's a point query. Its super fast.
Condition(Column("timestamp"), Op.GTE, period_start - timedelta(hours=1)),
Condition(Column("timestamp"), Op.LT, period_end + timedelta(hours=1)),
],
# NOTE: Refer to this note: "make_scalar_search_conditions_query".
#
# This condition ensures that every replay shown to the user is valid.
having=[Condition(Function("min", parameters=[Column("segment_id")]), Op.EQ, 0)],
groupby=[Column("replay_id")],
granularity=Granularity(3600),
)
def execute_query(query: Query, tenant_id: dict[str, int], referrer: str) -> Mapping[str, Any]:
try:
return raw_snql_query(
Request(
dataset="replays",
app_id="replay-backend-web",
query=query,
tenant_ids=tenant_id,
),
referrer,
)
except RateLimitExceeded as exc:
sentry_sdk.set_tag("replay-rate-limit-exceeded", True)
sentry_sdk.set_tag("org_id", tenant_id.get("organization_id"))
sentry_sdk.set_extra("referrer", referrer)
sentry_sdk.capture_exception(exc)
raise
def handle_ordering(
config: dict[str, Expression], sort: str, tiebreaker: str | None = None
) -> list[OrderBy]:
direction = Direction.DESC if sort.startswith("-") else Direction.ASC
bare_sort = sort[1:] if sort.startswith("-") else sort
orderby = [OrderBy(_get_sort_column(config, bare_sort), direction)]
if tiebreaker:
orderby.append(OrderBy(Column(tiebreaker), direction))
return orderby
def _get_sort_column(config: dict[str, Expression], column_name: str) -> Function:
try:
return config[column_name]
except KeyError:
raise ParseError(f"The field `{column_name}` is not a sortable field.")
def _make_tenant_id(organization: Organization | None) -> dict[str, int]:
if organization is None:
return {}
else:
return {"organization_id": organization.id}
def _make_ordered(replay_ids: list[str], results: Any) -> list[Any]:
if not replay_ids:
return []
elif not results:
return []
replay_id_to_index = {}
i = 0
for replay_id in replay_ids:
if replay_id not in replay_id_to_index:
replay_id_to_index[replay_id] = i
i += 1
ordered_results = [None] * len(replay_id_to_index)
for result in results:
index = replay_id_to_index[result["replay_id"]]
ordered_results[index] = result
return list(filter(None, ordered_results))
| QueryResponse |
python | PyCQA__pylint | tests/regrtest_data/max_inferable_limit_for_classes/main.py | {
"start": 198,
"end": 239
} | class ____(HasMemoized):
...
| Generative |
python | mlflow__mlflow | mlflow/utils/search_logged_model_utils.py | {
"start": 2076,
"end": 4245
} | class ____:
entity: Entity
op: str
value: str | float
def parse_filter_string(filter_string: str | None) -> list[Comparison]:
if not filter_string:
return []
try:
parsed = sqlparse.parse(filter_string)
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Invalid filter string: {filter_string!r}. {e!r}"
) from e
if len(parsed) != 1:
raise MlflowException.invalid_parameter_value(
f"Invalid filter string: {filter_string!r}. Expected a single SQL expression.",
)
comparisons: list[sqlalchemy.BinaryExpression] = []
for stmt in _join_in_comparison_tokens(parsed[0].tokens):
# while index < len(statements):
if isinstance(stmt, sqlparse.sql.Comparison):
non_whitespace_tokens = [str(t) for t in stmt.tokens if not t.is_whitespace]
if len(non_whitespace_tokens) != 3:
raise MlflowException.invalid_parameter_value(
f"Invalid comparison: {stmt}. Expected a comparison with 3 tokens."
)
identifier, op, value = non_whitespace_tokens
entity = Entity.from_str(identifier)
entity.validate_op(op)
value = float(value) if entity.is_numeric() else value.strip("'")
if entity.is_numeric():
value = float(value)
else:
if value.startswith("(") and value.endswith(")"):
value = ast.literal_eval(value)
value = (value,) if isinstance(value, str) else value
else:
value = value.strip("'")
comparisons.append(Comparison(entity=entity, op=op, value=value))
elif stmt.value.strip().upper() == "AND":
# Do nothing, this is just a separator
pass
else:
raise MlflowException.invalid_parameter_value(
f"Invalid filter string: {filter_string!r}. Expected a list of comparisons "
f"separated by 'AND' (e.g. 'metrics.loss > 0.1 AND params.lr = 0.01')."
)
return comparisons
| Comparison |
python | tensorflow__tensorflow | tensorflow/python/framework/c_api_util.py | {
"start": 4546,
"end": 4841
} | class ____(object):
"""An internal class to help manage the TF_Buffer lifetime."""
__slots__ = ["buffer"]
def __init__(self, buf_string):
self.buffer = c_api.TF_NewBufferFromString(compat.as_bytes(buf_string))
def __del__(self):
c_api.TF_DeleteBuffer(self.buffer)
| ScopedTFBuffer |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_tasks.py | {
"start": 3703,
"end": 4524
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_get_queue(self, mock_hook):
mock_hook.return_value.get_queue.return_value = TEST_QUEUE
operator = CloudTasksQueueGetOperator(location=LOCATION, queue_name=QUEUE_ID, task_id="id")
result = operator.execute(context=mock.MagicMock())
assert result == {"name": FULL_QUEUE_PATH, "state": 0}
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_queue.assert_called_once_with(
location=LOCATION,
queue_name=QUEUE_ID,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudTasksQueueGet |
python | uqfoundation__dill | dill/tests/test_classdef.py | {
"start": 825,
"end": 5458
} | class ____(type):
pass
def __call__(self):
pass
def ok(self):
return True
_mclass = _meta("_mclass", (object,), {"__call__": __call__, "ok": ok})
del __call__
del ok
o = _class()
oc = _class2()
n = _newclass()
nc = _newclass2()
m = _mclass()
if sys.hexversion < 0x03090000:
import typing
class customIntList(typing.List[int]):
pass
else:
class customIntList(list[int]):
pass
# test pickles for class instances
def test_class_instances():
assert dill.pickles(o)
assert dill.pickles(oc)
assert dill.pickles(n)
assert dill.pickles(nc)
assert dill.pickles(m)
def test_class_objects():
clslist = [_class,_class2,_newclass,_newclass2,_mclass]
objlist = [o,oc,n,nc,m]
_clslist = [dill.dumps(obj) for obj in clslist]
_objlist = [dill.dumps(obj) for obj in objlist]
for obj in clslist:
globals().pop(obj.__name__)
del clslist
for obj in ['o','oc','n','nc']:
globals().pop(obj)
del objlist
del obj
for obj,cls in zip(_objlist,_clslist):
_cls = dill.loads(cls)
_obj = dill.loads(obj)
assert _obj.ok()
assert _cls.ok(_cls())
if _cls.__name__ == "_mclass":
assert type(_cls).__name__ == "_meta"
# test NoneType
def test_specialtypes():
assert dill.pickles(type(None))
assert dill.pickles(type(NotImplemented))
assert dill.pickles(type(Ellipsis))
assert dill.pickles(type(EnumMeta))
from collections import namedtuple
Z = namedtuple("Z", ['a','b'])
Zi = Z(0,1)
X = namedtuple("Y", ['a','b'])
X.__name__ = "X"
X.__qualname__ = "X" #XXX: name must 'match' or fails to pickle
Xi = X(0,1)
Bad = namedtuple("FakeName", ['a','b'])
Badi = Bad(0,1)
Defaults = namedtuple('Defaults', ['x', 'y'], defaults=[1])
Defaultsi = Defaults(2)
# test namedtuple
def test_namedtuple():
assert Z is dill.loads(dill.dumps(Z))
assert Zi == dill.loads(dill.dumps(Zi))
assert X is dill.loads(dill.dumps(X))
assert Xi == dill.loads(dill.dumps(Xi))
assert Defaults is dill.loads(dill.dumps(Defaults))
assert Defaultsi == dill.loads(dill.dumps(Defaultsi))
assert Bad is not dill.loads(dill.dumps(Bad))
assert Bad._fields == dill.loads(dill.dumps(Bad))._fields
assert tuple(Badi) == tuple(dill.loads(dill.dumps(Badi)))
class A:
class B(namedtuple("C", ["one", "two"])):
'''docstring'''
B.__module__ = 'testing'
a = A()
assert dill.copy(a)
assert dill.copy(A.B).__name__ == 'B'
assert dill.copy(A.B).__qualname__.endswith('.<locals>.A.B')
assert dill.copy(A.B).__doc__ == 'docstring'
assert dill.copy(A.B).__module__ == 'testing'
from typing import NamedTuple
def A():
class B(NamedTuple):
x: int
return B
assert type(dill.copy(A()(8))).__qualname__ == type(A()(8)).__qualname__
def test_dtype():
try:
import numpy as np
dti = np.dtype('int')
assert np.dtype == dill.copy(np.dtype)
assert dti == dill.copy(dti)
except ImportError: pass
def test_array_nested():
try:
import numpy as np
x = np.array([1])
y = (x,)
assert y == dill.copy(y)
except ImportError: pass
def test_array_subclass():
try:
import numpy as np
class TestArray(np.ndarray):
def __new__(cls, input_array, color):
obj = np.asarray(input_array).view(cls)
obj.color = color
return obj
def __array_finalize__(self, obj):
if obj is None:
return
if isinstance(obj, type(self)):
self.color = obj.color
def __getnewargs__(self):
return np.asarray(self), self.color
a1 = TestArray(np.zeros(100), color='green')
if not dill._dill.IS_PYPY:
assert dill.pickles(a1)
assert a1.__dict__ == dill.copy(a1).__dict__
a2 = a1[0:9]
if not dill._dill.IS_PYPY:
assert dill.pickles(a2)
assert a2.__dict__ == dill.copy(a2).__dict__
class TestArray2(np.ndarray):
color = 'blue'
a3 = TestArray2([1,2,3,4,5])
a3.color = 'green'
if not dill._dill.IS_PYPY:
assert dill.pickles(a3)
assert a3.__dict__ == dill.copy(a3).__dict__
except ImportError: pass
def test_method_decorator():
class A(object):
@classmethod
def test(cls):
pass
a = A()
res = dill.dumps(a)
new_obj = dill.loads(res)
new_obj.__class__.test()
# test slots
| _meta |
python | doocs__leetcode | solution/2300-2399/2348.Number of Zero-Filled Subarrays/Solution.py | {
"start": 0,
"end": 255
} | class ____:
def zeroFilledSubarray(self, nums: List[int]) -> int:
ans = cnt = 0
for x in nums:
if x == 0:
cnt += 1
ans += cnt
else:
cnt = 0
return ans
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_button15.py | {
"start": 315,
"end": 843
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("button15.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button("C2", {"description": "Some alternative text"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | tests/models/flava/test_modeling_flava.py | {
"start": 12279,
"end": 16424
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
vocab_size=102,
type_vocab_size=2,
max_position_embeddings=512,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
qkv_bias=True,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.seq_length = seq_length
self.vocab_size = vocab_size
self.type_vocab_size = type_vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.pad_token_id = pad_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask
def get_config(self):
return FlavaTextConfig(
vocab_size=self.vocab_size,
type_vocab_size=self.type_vocab_size,
max_position_embeddings=self.max_position_embeddings,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
layer_norm_eps=self.layer_norm_eps,
pad_token_id=self.pad_token_id,
qkv_bias=self.qkv_bias,
)
def create_and_check_model(self, config, input_ids, token_type_ids, input_mask):
model = FlavaTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| FlavaTextModelTester |
python | fluentpython__example-code | 20-descriptor/bulkfood/bulkfood_v4b.py | {
"start": 1281,
"end": 1897
} | class ____:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self # <1>
else:
return getattr(instance, self.storage_name) # <2>
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value)
else:
raise ValueError('value must be > 0')
# END LINEITEM_V4B
| Quantity |
python | getsentry__sentry | src/sentry/seer/anomaly_detection/types.py | {
"start": 2601,
"end": 2756
} | class ____(TypedDict):
external_alert_id: int
timestamp: float
value: float
yhat_lower: float
yhat_upper: float
| AnomalyThresholdDataPoint |
python | celery__celery | celery/bin/logtool.py | {
"start": 666,
"end": 869
} | class ____(list):
@property
def format(self):
return '\n'.join('{}: {}'.format(*i) for i in self)
def task_info(line):
m = RE_TASK_INFO.match(line)
return m.groups()
| _task_counts |
python | spyder-ide__spyder | spyder/plugins/help/confpage.py | {
"start": 444,
"end": 2925
} | class ____(PluginConfigPage):
def setup_page(self):
# Connections group
connections_group = QGroupBox(_("Automatic connections"))
connections_label = QLabel(_("This pane can automatically "
"show an object's help information after "
"a left parenthesis is written next to it. "
"Below you can decide to which plugin "
"you want to connect it to turn on this "
"feature."))
connections_label.setWordWrap(True)
editor_box = self.create_checkbox(_("Editor"), 'connect/editor')
ipython_box = self.create_checkbox(_("IPython Console"),
'connect/ipython_console')
connections_layout = QVBoxLayout()
connections_layout.addWidget(connections_label)
connections_layout.addWidget(editor_box)
connections_layout.addWidget(ipython_box)
connections_group.setLayout(connections_layout)
# Features group
features_group = QGroupBox(_("Additional features"))
math_box = self.create_checkbox(_("Render mathematical equations"),
'math')
# ??? Do we want to increase minimum sphinx requirement for Spyder?
req_sphinx = programs.is_module_installed('sphinx', '>=1.1')
math_box.setEnabled(req_sphinx)
if not req_sphinx:
sphinx_ver = programs.get_module_version('sphinx')
sphinx_tip = _("This feature requires Sphinx 1.1 or superior.")
sphinx_tip += "\n" + _("Sphinx %s is currently installed.") % sphinx_ver
math_box.setToolTip(sphinx_tip)
features_layout = QVBoxLayout()
features_layout.addWidget(math_box)
features_group.setLayout(features_layout)
# Source code group
sourcecode_group = QGroupBox(_("Source code"))
wrap_mode_box = self.create_checkbox(_("Wrap lines"), 'wrap')
sourcecode_layout = QVBoxLayout()
sourcecode_layout.addWidget(wrap_mode_box)
sourcecode_group.setLayout(sourcecode_layout)
# Final layout
vlayout = QVBoxLayout()
vlayout.addWidget(connections_group)
vlayout.addWidget(features_group)
vlayout.addWidget(sourcecode_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
| HelpConfigPage |
python | openai__openai-python | src/openai/types/audio/translation_create_params.py | {
"start": 318,
"end": 1572
} | class ____(TypedDict, total=False):
file: Required[FileTypes]
"""
The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
"""
model: Required[Union[str, AudioModel]]
"""ID of the model to use.
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
currently available.
"""
prompt: str
"""An optional text to guide the model's style or continue a previous audio
segment.
The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
should be in English.
"""
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
"""
The format of the output, in one of these options: `json`, `text`, `srt`,
`verbose_json`, or `vtt`.
"""
temperature: float
"""The sampling temperature, between 0 and 1.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic. If set to 0, the model will use
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
"""
| TranslationCreateParams |
python | jpadilla__pyjwt | jwt/exceptions.py | {
"start": 1184,
"end": 1299
} | class ____(PyJWTError):
"""Raised when the specified key is not in the proper format"""
pass
| InvalidKeyError |
python | getsentry__sentry-python | sentry_sdk/integrations/asyncio.py | {
"start": 4409,
"end": 4610
} | class ____(Integration):
identifier = "asyncio"
origin = f"auto.function.{identifier}"
@staticmethod
def setup_once():
# type: () -> None
patch_asyncio()
| AsyncioIntegration |
python | wandb__wandb | wandb/sdk/artifacts/_models/base_model.py | {
"start": 287,
"end": 761
} | class ____(JsonableModel, ABC):
# See: https://docs.pydantic.dev/latest/api/config/#pydantic.config.ConfigDict
model_config = ConfigDict(
# Most likely, some fields won't be pydantic types
arbitrary_types_allowed=True,
# Assume instances of the same class have already been validated to save time,
# but validate subclasses in case they override the default behavior.
revalidate_instances="subclass-instances",
)
| ArtifactsBase |
python | PrefectHQ__prefect | src/prefect/client/schemas/sorting.py | {
"start": 1687,
"end": 1925
} | class ____(AutoEnum):
"""Defines artifact sorting options."""
CREATED_DESC = AutoEnum.auto()
UPDATED_DESC = AutoEnum.auto()
ID_DESC = AutoEnum.auto()
KEY_DESC = AutoEnum.auto()
KEY_ASC = AutoEnum.auto()
| ArtifactSort |
python | jazzband__django-simple-history | simple_history/registry_tests/tests.py | {
"start": 6544,
"end": 6839
} | class ____(TestCase):
"""https://github.com/django-commons/django-simple-history/issues/431"""
def test_custom_attr(self):
field = ModelWithCustomAttrForeignKey.history.model._meta.get_field("poll")
self.assertEqual(field.attr_name, "custom_poll")
| TestCustomAttrForeignKey |
python | django__django | tests/delete/models.py | {
"start": 5194,
"end": 5260
} | class ____(Avatar):
class Meta:
proxy = True
| AvatarProxy |
python | python-markdown__markdown | tests/test_syntax/extensions/test_fenced_code.py | {
"start": 1362,
"end": 12255
} | class ____(TestCase):
def testBasicFence(self):
self.assertMarkdownRenders(
self.dedent(
'''
A paragraph before a fenced code block:
```
Fenced code block
```
'''
),
self.dedent(
'''
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
'''
),
extensions=['fenced_code']
)
def testNestedFence(self):
self.assertMarkdownRenders(
self.dedent(
'''
````
```
````
'''
),
self.dedent(
'''
<pre><code>
```
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedTildes(self):
self.assertMarkdownRenders(
self.dedent(
'''
~~~
# Arbitrary code
``` # these backticks will not close the block
~~~
'''
),
self.dedent(
'''
<pre><code># Arbitrary code
``` # these backticks will not close the block
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedLanguageNoDot(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` python
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedLanguageWithDot(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` .python
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def test_fenced_code_in_raw_html(self):
self.assertMarkdownRenders(
self.dedent(
"""
<details>
```
Begone placeholders!
```
</details>
"""
),
self.dedent(
"""
<details>
<pre><code>Begone placeholders!
</code></pre>
</details>
"""
),
extensions=['fenced_code']
)
def testFencedLanguageInAttr(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` {.python}
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedMultipleClassesInAttr(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` {.python .foo .bar}
# Some python code
```
'''
),
self.dedent(
'''
<pre class="foo bar"><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedIdInAttr(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { #foo }
# Some python code
```
'''
),
self.dedent(
'''
<pre id="foo"><code># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedIdAndLangInAttr(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python #foo }
# Some python code
```
'''
),
self.dedent(
'''
<pre id="foo"><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedIdAndLangAndClassInAttr(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python #foo .bar }
# Some python code
```
'''
),
self.dedent(
'''
<pre id="foo" class="bar"><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedLanguageIdAndPygmentsDisabledInAttrNoCodehilite(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python #foo use_pygments=False }
# Some python code
```
'''
),
self.dedent(
'''
<pre id="foo"><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedLanguageIdAndPygmentsEnabledInAttrNoCodehilite(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python #foo use_pygments=True }
# Some python code
```
'''
),
self.dedent(
'''
<pre id="foo"><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code']
)
def testFencedLanguageNoCodehiliteWithAttrList(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python foo=bar }
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="language-python" foo="bar"># Some python code
</code></pre>
'''
),
extensions=['fenced_code', 'attr_list']
)
def testFencedLanguagePygmentsDisabledInAttrNoCodehiliteWithAttrList(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python foo=bar use_pygments=False }
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="language-python" foo="bar"># Some python code
</code></pre>
'''
),
extensions=['fenced_code', 'attr_list']
)
def testFencedLanguagePygmentsEnabledInAttrNoCodehiliteWithAttrList(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { .python foo=bar use_pygments=True }
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="language-python"># Some python code
</code></pre>
'''
),
extensions=['fenced_code', 'attr_list']
)
def testFencedLanguageNoPrefix(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` python
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="python"># Some python code
</code></pre>
'''
),
extensions=[markdown.extensions.fenced_code.FencedCodeExtension(lang_prefix='')]
)
def testFencedLanguageAltPrefix(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` python
# Some python code
```
'''
),
self.dedent(
'''
<pre><code class="lang-python"># Some python code
</code></pre>
'''
),
extensions=[markdown.extensions.fenced_code.FencedCodeExtension(lang_prefix='lang-')]
)
def testFencedCodeEscapedAttrs(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { ."weird #"foo bar=">baz }
# Some python code
```
'''
),
self.dedent(
'''
<pre id=""foo"><code class="language-"weird" bar="">baz"># Some python code
</code></pre>
'''
),
extensions=['fenced_code', 'attr_list']
)
def testFencedCodeCurlyInAttrs(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { data-test="{}" }
# Some python code
```
'''
),
self.dedent(
'''
<pre><code data-test="{}"># Some python code
</code></pre>
'''
),
extensions=['fenced_code', 'attr_list']
)
def testFencedCodeMismatchedCurlyInAttrs(self):
self.assertMarkdownRenders(
self.dedent(
'''
``` { data-test="{}" } }
# Some python code
```
```
test
```
'''
),
self.dedent(
'''
<p>``` { data-test="{}" } }</p>
<h1>Some python code</h1>
<pre><code></code></pre>
<p>test
```</p>
'''
),
extensions=['fenced_code', 'attr_list']
)
| TestFencedCode |
python | networkx__networkx | networkx/algorithms/shortest_paths/tests/test_generic.py | {
"start": 518,
"end": 17070
} | class ____:
@classmethod
def setup_class(cls):
from networkx import convert_node_labels_to_integers as cnlti
cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted")
cls.cycle = nx.cycle_graph(7)
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
cls.neg_weights = nx.DiGraph()
cls.neg_weights.add_edge(0, 1, weight=1)
cls.neg_weights.add_edge(0, 2, weight=3)
cls.neg_weights.add_edge(1, 3, weight=1)
cls.neg_weights.add_edge(2, 3, weight=-2)
def test_sentinel_trick_all_algorithms(self):
def reconstruct_path(pred, source, target):
path = [target]
while path[-1] != source:
path.append(pred[path[-1]])
return list(reversed(path))
# Build the test graph inline
G = nx.Graph()
G.add_edge("A", "B", weight=1)
G.add_edge("B", "C", weight=1)
G.add_edge("C", "D", weight=1)
G.add_edge("D", "E", weight=1)
source = "A"
targets = {"C", "D", "E"}
expected_target = "C" # A-B-C is closest
sentinel = "_sentinel_"
G.add_node(sentinel)
for t in targets:
G.add_edge(t, sentinel, weight=0)
# shortest_path: Dijkstra (default)
path = nx.shortest_path(G, source=source, target=sentinel, weight="weight")
assert path[-2] == expected_target
# shortest_path: Bellman-Ford
path = nx.shortest_path(
G, source=source, target=sentinel, weight="weight", method="bellman-ford"
)
assert path[-2] == expected_target
# shortest_path: Unweighted (BFS)
path = nx.shortest_path(G, source=source, target=sentinel, weight=None)
assert path[-2] == expected_target
# bidirectional_dijkstra
_, path = nx.bidirectional_dijkstra(G, source, sentinel, weight="weight")
assert path[-2] == expected_target
# goldberg_radzik
pred, _ = nx.goldberg_radzik(G, source, weight="weight")
path = reconstruct_path(pred, source, sentinel)
assert path[-2] == expected_target
# astar_path with zero heuristic
path = nx.astar_path(
G, source, sentinel, heuristic=lambda u, v: 0, weight="weight"
)
assert path[-2] == expected_target
# johnson (all-pairs shortest paths)
paths = nx.johnson(G, weight="weight")
assert paths[source][sentinel][-2] == expected_target
# floyd_warshall_predecessor_and_distance
pred, _ = nx.floyd_warshall_predecessor_and_distance(G, weight="weight")
path = reconstruct_path(pred[source], source, sentinel)
assert path[-2] == expected_target
def test_shortest_path(self):
assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3]
assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4]
validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12))
assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3]
# now with weights
assert nx.shortest_path(self.cycle, 0, 3, weight="weight") == [0, 1, 2, 3]
assert nx.shortest_path(self.cycle, 0, 4, weight="weight") == [0, 6, 5, 4]
validate_grid_path(
4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12, weight="weight")
)
assert nx.shortest_path(self.directed_cycle, 0, 3, weight="weight") == [
0,
1,
2,
3,
]
# weights and method specified
assert nx.shortest_path(
self.directed_cycle, 0, 3, weight="weight", method="dijkstra"
) == [0, 1, 2, 3]
assert nx.shortest_path(
self.directed_cycle, 0, 3, weight="weight", method="bellman-ford"
) == [0, 1, 2, 3]
# when Dijkstra's will probably (depending on precise implementation)
# incorrectly return [0, 1, 3] instead
assert nx.shortest_path(
self.neg_weights, 0, 3, weight="weight", method="bellman-ford"
) == [0, 2, 3]
# confirm bad method rejection
pytest.raises(ValueError, nx.shortest_path, self.cycle, method="SPAM")
# confirm absent source rejection
pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8)
def test_shortest_path_target(self):
answer = {0: [0, 1], 1: [1], 2: [2, 1]}
sp = nx.shortest_path(nx.path_graph(3), target=1)
assert sp == answer
# with weights
sp = nx.shortest_path(nx.path_graph(3), target=1, weight="weight")
assert sp == answer
# weights and method specified
sp = nx.shortest_path(
nx.path_graph(3), target=1, weight="weight", method="dijkstra"
)
assert sp == answer
sp = nx.shortest_path(
nx.path_graph(3), target=1, weight="weight", method="bellman-ford"
)
assert sp == answer
def test_shortest_path_length(self):
assert nx.shortest_path_length(self.cycle, 0, 3) == 3
assert nx.shortest_path_length(self.grid, 1, 12) == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4
# now with weights
assert nx.shortest_path_length(self.cycle, 0, 3, weight="weight") == 3
assert nx.shortest_path_length(self.grid, 1, 12, weight="weight") == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight="weight") == 4
# weights and method specified
assert (
nx.shortest_path_length(
self.cycle, 0, 3, weight="weight", method="dijkstra"
)
== 3
)
assert (
nx.shortest_path_length(
self.cycle, 0, 3, weight="weight", method="bellman-ford"
)
== 3
)
# confirm bad method rejection
pytest.raises(ValueError, nx.shortest_path_length, self.cycle, method="SPAM")
# confirm absent source rejection
pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8)
def test_shortest_path_length_target(self):
answer = {0: 1, 1: 0, 2: 1}
sp = nx.shortest_path_length(nx.path_graph(3), target=1)
assert sp == answer
# with weights
sp = nx.shortest_path_length(nx.path_graph(3), target=1, weight="weight")
assert sp == answer
# weights and method specified
sp = nx.shortest_path_length(
nx.path_graph(3), target=1, weight="weight", method="dijkstra"
)
assert sp == answer
sp = nx.shortest_path_length(
nx.path_graph(3), target=1, weight="weight", method="bellman-ford"
)
assert sp == answer
def test_single_source_shortest_path(self):
p = nx.shortest_path(self.cycle, 0)
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
p = nx.shortest_path(self.grid, 1)
validate_grid_path(4, 4, 1, 12, p[12])
# now with weights
p = nx.shortest_path(self.cycle, 0, weight="weight")
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_dijkstra_path(self.cycle, 0)
p = nx.shortest_path(self.grid, 1, weight="weight")
validate_grid_path(4, 4, 1, 12, p[12])
# weights and method specified
p = nx.shortest_path(self.cycle, 0, method="dijkstra", weight="weight")
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
p = nx.shortest_path(self.cycle, 0, method="bellman-ford", weight="weight")
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
def test_single_source_shortest_path_length(self):
ans = nx.shortest_path_length(self.cycle, 0)
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == nx.single_source_shortest_path_length(self.cycle, 0)
ans = nx.shortest_path_length(self.grid, 1)
assert ans[16] == 6
# now with weights
ans = nx.shortest_path_length(self.cycle, 0, weight="weight")
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == nx.single_source_dijkstra_path_length(self.cycle, 0)
ans = nx.shortest_path_length(self.grid, 1, weight="weight")
assert ans[16] == 6
# weights and method specified
ans = dict(
nx.shortest_path_length(self.cycle, 0, weight="weight", method="dijkstra")
)
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == nx.single_source_dijkstra_path_length(self.cycle, 0)
ans = dict(
nx.shortest_path_length(
self.cycle, 0, weight="weight", method="bellman-ford"
)
)
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == nx.single_source_bellman_ford_path_length(self.cycle, 0)
def test_single_source_all_shortest_paths(self):
cycle_ans = {0: [[0]], 1: [[0, 1]], 2: [[0, 1, 2], [0, 3, 2]], 3: [[0, 3]]}
ans = dict(nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0))
assert sorted(ans[2]) == cycle_ans[2]
ans = dict(nx.single_source_all_shortest_paths(self.grid, 1))
grid_ans = [
[1, 2, 3, 7, 11],
[1, 2, 6, 7, 11],
[1, 2, 6, 10, 11],
[1, 5, 6, 7, 11],
[1, 5, 6, 10, 11],
[1, 5, 9, 10, 11],
]
assert sorted(ans[11]) == grid_ans
ans = dict(
nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0, weight="weight")
)
assert sorted(ans[2]) == cycle_ans[2]
ans = dict(
nx.single_source_all_shortest_paths(
nx.cycle_graph(4), 0, method="bellman-ford", weight="weight"
)
)
assert sorted(ans[2]) == cycle_ans[2]
ans = dict(nx.single_source_all_shortest_paths(self.grid, 1, weight="weight"))
assert sorted(ans[11]) == grid_ans
ans = dict(
nx.single_source_all_shortest_paths(
self.grid, 1, method="bellman-ford", weight="weight"
)
)
assert sorted(ans[11]) == grid_ans
G = nx.cycle_graph(4)
G.add_node(4)
ans = dict(nx.single_source_all_shortest_paths(G, 0))
assert sorted(ans[2]) == [[0, 1, 2], [0, 3, 2]]
ans = dict(nx.single_source_all_shortest_paths(G, 4))
assert sorted(ans[4]) == [[4]]
def test_all_pairs_shortest_path(self):
# shortest_path w/o source and target returns a generator instead of
# a dict beginning in version 3.5. Only the first call needed changed here.
p = dict(nx.shortest_path(self.cycle))
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_shortest_path(self.cycle))
p = dict(nx.shortest_path(self.grid))
validate_grid_path(4, 4, 1, 12, p[1][12])
# now with weights
p = dict(nx.shortest_path(self.cycle, weight="weight"))
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
p = dict(nx.shortest_path(self.grid, weight="weight"))
validate_grid_path(4, 4, 1, 12, p[1][12])
# weights and method specified
p = dict(nx.shortest_path(self.cycle, weight="weight", method="dijkstra"))
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
p = dict(nx.shortest_path(self.cycle, weight="weight", method="bellman-ford"))
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle))
def test_all_pairs_shortest_path_length(self):
ans = dict(nx.shortest_path_length(self.cycle))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.grid))
assert ans[1][16] == 6
# now with weights
ans = dict(nx.shortest_path_length(self.cycle, weight="weight"))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.grid, weight="weight"))
assert ans[1][16] == 6
# weights and method specified
ans = dict(
nx.shortest_path_length(self.cycle, weight="weight", method="dijkstra")
)
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
ans = dict(
nx.shortest_path_length(self.cycle, weight="weight", method="bellman-ford")
)
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_bellman_ford_path_length(self.cycle))
def test_all_pairs_all_shortest_paths(self):
ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)))
assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]]
ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)), weight="weight")
assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]]
ans = dict(
nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)),
method="bellman-ford",
weight="weight",
)
assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]]
G = nx.cycle_graph(4)
G.add_node(4)
ans = dict(nx.all_pairs_all_shortest_paths(G))
assert sorted(ans[4][4]) == [[4]]
def test_has_path(self):
G = nx.Graph()
nx.add_path(G, range(3))
nx.add_path(G, range(3, 5))
assert nx.has_path(G, 0, 2)
assert not nx.has_path(G, 0, 4)
def test_has_path_singleton(self):
G = nx.empty_graph(1)
assert nx.has_path(G, 0, 0)
def test_all_shortest_paths(self):
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(nx.all_shortest_paths(G, 0, 3))
# with weights
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
nx.all_shortest_paths(G, 0, 3, weight="weight")
)
# weights and method specified
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
nx.all_shortest_paths(G, 0, 3, weight="weight", method="dijkstra")
)
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
nx.all_shortest_paths(G, 0, 3, weight="weight", method="bellman-ford")
)
def test_all_shortest_paths_raise(self):
with pytest.raises(nx.NetworkXNoPath):
G = nx.path_graph(4)
G.add_node(4)
list(nx.all_shortest_paths(G, 0, 4))
def test_bad_method(self):
with pytest.raises(ValueError):
G = nx.path_graph(2)
list(nx.all_shortest_paths(G, 0, 1, weight="weight", method="SPAM"))
def test_single_source_all_shortest_paths_bad_method(self):
with pytest.raises(ValueError):
G = nx.path_graph(2)
dict(
nx.single_source_all_shortest_paths(
G, 0, weight="weight", method="SPAM"
)
)
def test_all_shortest_paths_zero_weight_edge(self):
g = nx.Graph()
nx.add_path(g, [0, 1, 3])
nx.add_path(g, [0, 1, 2, 3])
g.edges[1, 2]["weight"] = 0
paths30d = list(
nx.all_shortest_paths(g, 3, 0, weight="weight", method="dijkstra")
)
paths03d = list(
nx.all_shortest_paths(g, 0, 3, weight="weight", method="dijkstra")
)
paths30b = list(
nx.all_shortest_paths(g, 3, 0, weight="weight", method="bellman-ford")
)
paths03b = list(
nx.all_shortest_paths(g, 0, 3, weight="weight", method="bellman-ford")
)
assert sorted(paths03d) == sorted(p[::-1] for p in paths30d)
assert sorted(paths03d) == sorted(p[::-1] for p in paths30b)
assert sorted(paths03b) == sorted(p[::-1] for p in paths30b)
| TestGenericPath |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 2480,
"end": 5731
} | class ____(GoogleCloudBaseOperator):
"""
Starts asynchronous cancellation on a long-running DlpJob.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCancelDLPJobOperator`
:param dlp_job_id: ID of the DLP job resource to be cancelled.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default project_id
from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dlp_job_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobDetailsLink(),)
def __init__(
self,
*,
dlp_job_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.cancel_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobDetailsLink.persist(
context=context,
project_id=project_id,
job_name=self.dlp_job_id,
)
| CloudDLPCancelDLPJobOperator |
python | kubernetes-client__python | kubernetes/client/models/v1_owner_reference.py | {
"start": 383,
"end": 9564
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'block_owner_deletion': 'bool',
'controller': 'bool',
'kind': 'str',
'name': 'str',
'uid': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'block_owner_deletion': 'blockOwnerDeletion',
'controller': 'controller',
'kind': 'kind',
'name': 'name',
'uid': 'uid'
}
def __init__(self, api_version=None, block_owner_deletion=None, controller=None, kind=None, name=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1OwnerReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._block_owner_deletion = None
self._controller = None
self._kind = None
self._name = None
self._uid = None
self.discriminator = None
self.api_version = api_version
if block_owner_deletion is not None:
self.block_owner_deletion = block_owner_deletion
if controller is not None:
self.controller = controller
self.kind = kind
self.name = name
self.uid = uid
@property
def api_version(self):
"""Gets the api_version of this V1OwnerReference. # noqa: E501
API version of the referent. # noqa: E501
:return: The api_version of this V1OwnerReference. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1OwnerReference.
API version of the referent. # noqa: E501
:param api_version: The api_version of this V1OwnerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and api_version is None: # noqa: E501
raise ValueError("Invalid value for `api_version`, must not be `None`") # noqa: E501
self._api_version = api_version
@property
def block_owner_deletion(self):
"""Gets the block_owner_deletion of this V1OwnerReference. # noqa: E501
If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. # noqa: E501
:return: The block_owner_deletion of this V1OwnerReference. # noqa: E501
:rtype: bool
"""
return self._block_owner_deletion
@block_owner_deletion.setter
def block_owner_deletion(self, block_owner_deletion):
"""Sets the block_owner_deletion of this V1OwnerReference.
If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. # noqa: E501
:param block_owner_deletion: The block_owner_deletion of this V1OwnerReference. # noqa: E501
:type: bool
"""
self._block_owner_deletion = block_owner_deletion
@property
def controller(self):
"""Gets the controller of this V1OwnerReference. # noqa: E501
If true, this reference points to the managing controller. # noqa: E501
:return: The controller of this V1OwnerReference. # noqa: E501
:rtype: bool
"""
return self._controller
@controller.setter
def controller(self, controller):
"""Sets the controller of this V1OwnerReference.
If true, this reference points to the managing controller. # noqa: E501
:param controller: The controller of this V1OwnerReference. # noqa: E501
:type: bool
"""
self._controller = controller
@property
def kind(self):
"""Gets the kind of this V1OwnerReference. # noqa: E501
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1OwnerReference. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1OwnerReference.
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1OwnerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kind is None: # noqa: E501
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
self._kind = kind
@property
def name(self):
"""Gets the name of this V1OwnerReference. # noqa: E501
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
:return: The name of this V1OwnerReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1OwnerReference.
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names # noqa: E501
:param name: The name of this V1OwnerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def uid(self):
"""Gets the uid of this V1OwnerReference. # noqa: E501
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
:return: The uid of this V1OwnerReference. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1OwnerReference.
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids # noqa: E501
:param uid: The uid of this V1OwnerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1OwnerReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1OwnerReference):
return True
return self.to_dict() != other.to_dict()
| V1OwnerReference |
python | Pylons__pyramid | src/pyramid/scripts/proutes.py | {
"start": 6861,
"end": 12764
} | class ____:
description = """\
Print all URL dispatch routes used by a Pyramid application in the
order in which they are evaluated. Each route includes the name of the
route, the pattern of the route, and the view callable which will be
invoked when the route is matched.
This command accepts one positional argument named 'config_uri'. It
specifies the PasteDeploy config file to use for the interactive
shell. The format is 'inifile#name'. If the name is left off, 'main'
will be assumed. Example: 'proutes myapp.ini'.
"""
script_name = 'proutes'
bootstrap = staticmethod(bootstrap) # testing
get_config_loader = staticmethod(get_config_loader) # testing
stdout = sys.stdout
parser = argparse.ArgumentParser(
description=textwrap.dedent(description),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-g',
'--glob',
action='store',
dest='glob',
default='',
help='Display routes matching glob pattern',
)
parser.add_argument(
'-f',
'--format',
action='store',
dest='format',
default='',
help=(
'Choose which columns to display, this will '
'override the format key in the [proutes] ini '
'section'
),
)
parser.add_argument(
'config_uri',
nargs='?',
default=None,
help='The URI to the configuration file.',
)
parser.add_argument(
'config_vars',
nargs='*',
default=(),
help=(
"Variables required by the config file. For example, "
"`http_port=%%(http_port)s` would expect `http_port=8080` to be "
"passed here."
),
)
def __init__(self, argv, quiet=False):
self.args = self.parser.parse_args(argv[1:])
self.quiet = quiet
self.available_formats = ['name', 'pattern', 'view', 'method']
self.column_format = self.available_formats
def validate_formats(self, formats):
invalid_formats = []
for fmt in formats:
if fmt not in self.available_formats:
invalid_formats.append(fmt)
msg = 'You provided invalid formats %s. Available formats are %s'
if invalid_formats:
msg = msg % (invalid_formats, self.available_formats)
self.out(msg)
return False
return True
def proutes_file_config(self, loader, global_conf=None):
settings = loader.get_settings('proutes', global_conf)
format = settings.get('format')
if format:
cols = re.split(r'[,|\s\n]+', format)
self.column_format = [x.strip() for x in cols]
def out(self, msg): # pragma: no cover
if not self.quiet:
print(msg)
def _get_mapper(self, registry):
from pyramid.config import Configurator
config = Configurator(registry=registry)
return config.get_routes_mapper()
def run(self, quiet=False):
if not self.args.config_uri:
self.out('requires a config file argument')
return 2
config_uri = self.args.config_uri
config_vars = parse_vars(self.args.config_vars)
config_vars.setdefault('__script__', self.script_name)
loader = self.get_config_loader(config_uri)
loader.setup_logging(config_vars)
self.proutes_file_config(loader, config_vars)
env = self.bootstrap(config_uri, options=config_vars)
registry = env['registry']
mapper = self._get_mapper(registry)
if self.args.format:
columns = self.args.format.split(',')
self.column_format = [x.strip() for x in columns]
is_valid = self.validate_formats(self.column_format)
if is_valid is False:
return 2
if mapper is None:
return 0
max_name = len('Name')
max_pattern = len('Pattern')
max_view = len('View')
max_method = len('Method')
routes = mapper.get_routes(include_static=True)
if len(routes) == 0:
return 0
mapped_routes = [
{
'name': 'Name',
'pattern': 'Pattern',
'view': 'View',
'method': 'Method',
},
{
'name': '----',
'pattern': '-------',
'view': '----',
'method': '------',
},
]
for route in routes:
route_data = get_route_data(route, registry)
for name, pattern, view, method in route_data:
if self.args.glob:
match = fnmatch.fnmatch(
name, self.args.glob
) or fnmatch.fnmatch(pattern, self.args.glob)
if not match:
continue
if len(name) > max_name:
max_name = len(name)
if len(pattern) > max_pattern:
max_pattern = len(pattern)
if len(view) > max_view:
max_view = len(view)
if len(method) > max_method:
max_method = len(method)
mapped_routes.append(
{
'name': name,
'pattern': pattern,
'view': view,
'method': method,
}
)
fmt = _get_print_format(
self.column_format, max_name, max_pattern, max_view, max_method
)
for route in mapped_routes:
self.out(fmt.format(**route))
return 0
if __name__ == '__main__': # pragma: no cover
sys.exit(main() or 0)
| PRoutesCommand |
python | ansible__ansible | test/integration/targets/ssh_agent/action_plugins/ssh_keygen.py | {
"start": 689,
"end": 2316
} | class ____(ActionBase):
def run(self, tmp=None, task_vars=None):
results = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
match self._task.args.get('type'):
case 'ed25519':
private_key = Ed25519PrivateKey.generate()
case 'rsa':
private_key = rsa_generate_private_key(65537, 4096)
case 'dsa':
private_key = dsa_generate_private_key(1024)
case 'ecdsa':
private_key = ecdsa_generate_private_key(SECP384R1())
case _:
return {'failed': True, 'msg': 'not implemented'}
public_key = private_key.public_key()
public_key_msg = PublicKeyMsg.from_public_key(public_key)
if not (passphrase := self._task.args.get('passphrase')):
encryption_algorithm = serialization.NoEncryption()
else:
encryption_algorithm = serialization.BestAvailableEncryption(
to_bytes(passphrase)
)
return {
'changed': True,
'private_key': to_text(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.OpenSSH,
encryption_algorithm=encryption_algorithm,
)),
'public_key': to_text(public_key.public_bytes(
encoding=serialization.Encoding.OpenSSH,
format=serialization.PublicFormat.OpenSSH,
)),
'fingerprint': f'SHA256:{public_key_msg.fingerprint}',
}
| ActionModule |
python | huggingface__transformers | src/transformers/models/videomae/video_processing_videomae.py | {
"start": 800,
"end": 1597
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for backward compatibility with image processor workflows.
model_input_names = ["pixel_values"]
def preprocess(self, videos, **kwargs):
batch = super().preprocess(videos, **kwargs)
batch["pixel_values"] = batch.pop("pixel_values_videos")
return batch
__all__ = ["VideoMAEVideoProcessor"]
| VideoMAEVideoProcessor |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 36739,
"end": 37357
} | class ____(Dict["Requirement", Tuple[str, ...]]):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req: Requirement, extras: tuple[str, ...] | None = None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
| _ReqExtras |
python | ray-project__ray | rllib/utils/error.py | {
"start": 462,
"end": 5245
} | class ____(Exception):
"""Error if we encounter objects that can't be serialized by ray."""
pass
# -------
# Error messages
# -------
# Message explaining there are no GPUs available for the
# num_gpus=n or num_gpus_per_env_runner=m settings.
ERR_MSG_NO_GPUS = """Found {} GPUs on your machine (GPU devices found: {})! If your
machine does not have any GPUs, you should set the config keys
`num_gpus_per_learner` and `num_gpus_per_env_runner` to 0. They may be set to
1 by default for your particular RL algorithm."""
ERR_MSG_INVALID_ENV_DESCRIPTOR = """The env string you provided ('{}') is:
a) Not a supported or an installed environment.
b) Not a tune-registered environment creator.
c) Not a valid env class string.
Try one of the following:
a) For Atari support: `pip install gymnasium[atari]` and prefix the environment name with `ale_py:`, for example, `"ale_py:ALE/Pong-v5"`.
b) To register your custom env, do `from ray import tune; tune.register_env('[name]', lambda cfg: [return env obj from here using cfg])`.
Then in your config, do `config.environment(env='[name]').
c) Make sure you provide a fully qualified classpath, e.g.:
`ray.rllib.examples.envs.classes.repeat_after_me_env.RepeatAfterMeEnv`
"""
ERR_MSG_OLD_GYM_API = """Your environment ({}) does not abide to the new gymnasium-style API!
From Ray 2.3 on, RLlib only supports the new (gym>=0.26 or gymnasium) Env APIs.
{}
Learn more about the most important changes here:
https://github.com/openai/gym and here: https://github.com/Farama-Foundation/Gymnasium
In order to fix this problem, do the following:
1) Run `pip install gymnasium` on your command line.
2) Change all your import statements in your code from
`import gym` -> `import gymnasium as gym` OR
`from gym.spaces import Discrete` -> `from gymnasium.spaces import Discrete`
For your custom (single agent) gym.Env classes:
3.1) Either wrap your old Env class via the provided `from gymnasium.wrappers import
EnvCompatibility` wrapper class.
3.2) Alternatively to 3.1:
- Change your `reset()` method to have the call signature 'def reset(self, *,
seed=None, options=None)'
- Return an additional info dict (empty dict should be fine) from your `reset()`
method.
- Return an additional `truncated` flag from your `step()` method (between `done` and
`info`). This flag should indicate, whether the episode was terminated prematurely
due to some time constraint or other kind of horizon setting.
For your custom RLlib `MultiAgentEnv` classes:
4.1) Either wrap your old MultiAgentEnv via the provided
`from ray.rllib.env.wrappers.multi_agent_env_compatibility import
MultiAgentEnvCompatibility` wrapper class.
4.2) Alternatively to 4.1:
- Change your `reset()` method to have the call signature
'def reset(self, *, seed=None, options=None)'
- Return an additional per-agent info dict (empty dict should be fine) from your
`reset()` method.
- Rename `dones` into `terminateds` and only set this to True, if the episode is really
done (as opposed to has been terminated prematurely due to some horizon/time-limit
setting).
- Return an additional `truncateds` per-agent dictionary flag from your `step()`
method, including the `__all__` key (100% analogous to your `dones/terminateds`
per-agent dict).
Return this new `truncateds` dict between `dones/terminateds` and `infos`. This
flag should indicate, whether the episode (for some agent or all agents) was
terminated prematurely due to some time constraint or other kind of horizon setting.
""" # noqa
ERR_MSG_TF_POLICY_CANNOT_SAVE_KERAS_MODEL = """Could not save keras model under self[TfPolicy].model.base_model!
This is either due to ..
a) .. this Policy's ModelV2 not having any `base_model` (tf.keras.Model) property
b) .. the ModelV2's `base_model` not being used by the Algorithm and thus its
variables not being properly initialized.
""" # noqa
ERR_MSG_TORCH_POLICY_CANNOT_SAVE_MODEL = """Could not save torch model under self[TorchPolicy].model!
This is most likely due to the fact that you are using an Algorithm that
uses a Catalog-generated TorchModelV2 subclass, which is torch.save() cannot pickle.
""" # noqa
# -------
# HOWTO_ strings can be added to any error/warning/into message
# to eplain to the user, how to actually fix the encountered problem.
# -------
# HOWTO change the RLlib config, depending on how user runs the job.
HOWTO_CHANGE_CONFIG = """
To change the config for `tune.Tuner().fit()` in a script: Modify the python dict
passed to `tune.Tuner(param_space=[...]).fit()`.
To change the config for an RLlib Algorithm instance: Modify the python dict
passed to the Algorithm's constructor, e.g. `PPO(config=[...])`.
"""
| NotSerializable |
python | tensorflow__tensorflow | tensorflow/python/util/tf_export_test.py | {
"start": 957,
"end": 7336
} | class ____(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
super().setUp()
self._modules = []
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
self._test_function = _test_function
self._test_function2 = _test_function2
self._test_class_a = TestClassA
self._test_class_b = TestClassB
def tearDown(self):
super().tearDown()
for name in self._modules:
del sys.modules[name]
self._modules = []
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(self._test_function)
self.assertEqual(decorated_function, self._test_function)
self.assertEqual(('nameA', 'nameB'), decorated_function._tf_api_names)
self.assertEqual(['nameA', 'nameB'],
tf_export.get_v1_names(decorated_function))
self.assertEqual(['nameA', 'nameB'],
tf_export.get_v2_names(decorated_function))
self.assertEqual(
tf_export.get_symbol_from_name('nameA'), decorated_function)
self.assertEqual(
tf_export.get_symbol_from_name('nameB'), decorated_function)
self.assertEqual(
tf_export.get_symbol_from_name(
tf_export.get_canonical_name_for_symbol(decorated_function)),
decorated_function)
def testExportSingleFunctionV1Only(self):
export_decorator = tf_export.tf_export(v1=['nameA', 'nameB'])
decorated_function = export_decorator(self._test_function)
self.assertEqual(decorated_function, self._test_function)
self.assertAllEqual(('nameA', 'nameB'), decorated_function._tf_api_names_v1)
self.assertAllEqual(['nameA', 'nameB'],
tf_export.get_v1_names(decorated_function))
self.assertEqual([], tf_export.get_v2_names(decorated_function))
self.assertEqual(
tf_export.get_symbol_from_name('compat.v1.nameA'), decorated_function)
self.assertEqual(
tf_export.get_symbol_from_name('compat.v1.nameB'), decorated_function)
self.assertEqual(
tf_export.get_symbol_from_name(
tf_export.get_canonical_name_for_symbol(
decorated_function, add_prefix_to_v1_names=True)),
decorated_function)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(self._test_function)
decorated_function2 = export_decorator2(self._test_function2)
self.assertEqual(decorated_function1, self._test_function)
self.assertEqual(decorated_function2, self._test_function2)
self.assertEqual(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEqual(('nameC', 'nameD'), decorated_function2._tf_api_names)
self.assertEqual(
tf_export.get_symbol_from_name('nameB'), decorated_function1)
self.assertEqual(
tf_export.get_symbol_from_name('nameD'), decorated_function2)
self.assertEqual(
tf_export.get_symbol_from_name(
tf_export.get_canonical_name_for_symbol(decorated_function1)),
decorated_function1)
self.assertEqual(
tf_export.get_symbol_from_name(
tf_export.get_canonical_name_for_symbol(decorated_function2)),
decorated_function2)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(self._test_class_a)
self.assertEqual(('TestClassA1',), self._test_class_a._tf_api_names)
self.assertNotIn('_tf_api_names', self._test_class_b.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(self._test_class_b)
self.assertEqual(('TestClassA1',), self._test_class_a._tf_api_names)
self.assertEqual(('TestClassB1',), self._test_class_b._tf_api_names)
self.assertEqual(
['TestClassA1'], tf_export.get_v1_names(self._test_class_a)
)
self.assertEqual(
['TestClassB1'], tf_export.get_v1_names(self._test_class_b)
)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEqual([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
self.assertEqual([(('NAME_A', 'NAME_B'), 'test_constant')],
tf_export.get_v1_constants(module1))
self.assertEqual([(('NAME_A', 'NAME_B'), 'test_constant')],
tf_export.get_v2_constants(module1))
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEqual([(('NAME_A', 'NAME_B'), 123)], module1._tf_api_constants)
self.assertEqual([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)], module2._tf_api_constants)
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(self._test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEqual(decorated_function, exported_function)
self.assertEqual(('nameA', 'nameB'), self._test_function._tf_api_names)
if __name__ == '__main__':
test.main()
| ValidateExportTest |
python | huggingface__transformers | tests/models/jetmoe/test_modeling_jetmoe.py | {
"start": 3116,
"end": 3768
} | class ____(CausalLMModelTest, unittest.TestCase):
test_mismatched_shapes = False
test_cpu_offload = False
test_disk_offload_bin = False
test_disk_offload_safetensors = False
model_tester_class = JetMoeModelTester
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="JetMoe flash attention does not support right padding")
@unittest.skip(reason="JetMoe has no separate base model without a head.")
def test_model_base_model_prefix(self):
pass
@require_torch
| JetMoeModelTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 861244,
"end": 867459
} | class ____(VegaLiteSchema):
"""
PointSelectionConfig schema wrapper.
Parameters
----------
type : Literal['point']
Determines the default event processing and data query for the selection. Vega-Lite
currently supports two selection types:
* ``"point"`` -- to select multiple discrete data values; the first value is
selected on ``click`` and additional values toggled on shift-click.
* ``"interval"`` -- to select a continuous range of data values on ``drag``.
clear : str, bool, dict, :class:`Stream`, :class:`EventStream`, :class:`MergedStream`, :class:`DerivedStream`
Clears the selection, emptying it of all values. This property can be a `Event
Stream <https://vega.github.io/vega/docs/event-streams/>`__ or ``false`` to disable
clear.
**Default value:** ``dblclick``.
**See also:** `clear examples
<https://vega.github.io/vega-lite/docs/selection.html#clear>`__ in the
documentation.
encodings : Sequence[:class:`SingleDefUnitChannel`, Literal['text', 'shape', 'x', 'y', 'xOffset', 'yOffset', 'x2', 'y2', 'longitude', 'latitude', 'longitude2', 'latitude2', 'theta', 'theta2', 'radius', 'radius2', 'time', 'color', 'fill', 'stroke', 'opacity', 'fillOpacity', 'strokeOpacity', 'strokeWidth', 'strokeDash', 'size', 'angle', 'key', 'href', 'url', 'description']]
An array of encoding channels. The corresponding data field values must match for a
data tuple to fall within the selection.
**See also:** The `projection with encodings and fields section
<https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the
documentation.
fields : Sequence[str, :class:`FieldName`]
An array of field names whose values must match for a data tuple to fall within the
selection.
**See also:** The `projection with encodings and fields section
<https://vega.github.io/vega-lite/docs/selection.html#project>`__ in the
documentation.
nearest : bool
When true, an invisible voronoi diagram is computed to accelerate discrete
selection. The data value *nearest* the mouse cursor is added to the selection.
**Default value:** ``false``, which means that data values must be interacted with
directly (e.g., clicked on) to be added to the selection.
**See also:** `nearest examples
<https://vega.github.io/vega-lite/docs/selection.html#nearest>`__ documentation.
on : str, dict, :class:`Stream`, :class:`EventStream`, :class:`MergedStream`, :class:`DerivedStream`
A `Vega event stream <https://vega.github.io/vega/docs/event-streams/>`__ (object or
selector) that triggers the selection. For interval selections, the event stream
must specify a `start and end
<https://vega.github.io/vega/docs/event-streams/#between-filters>`__.
**See also:** `on examples
<https://vega.github.io/vega-lite/docs/selection.html#on>`__ in the documentation.
resolve : :class:`SelectionResolution`, Literal['global', 'union', 'intersect']
With layered and multi-view displays, a strategy that determines how selections'
data queries are resolved when applied in a filter transform, conditional encoding
rule, or scale domain.
One of:
* ``"global"`` -- only one brush exists for the entire SPLOM. When the user begins
to drag, any previous brushes are cleared, and a new one is constructed.
* ``"union"`` -- each cell contains its own brush, and points are highlighted if
they lie within *any* of these individual brushes.
* ``"intersect"`` -- each cell contains its own brush, and points are highlighted
only if they fall within *all* of these individual brushes.
**Default value:** ``global``.
**See also:** `resolve examples
<https://vega.github.io/vega-lite/docs/selection.html#resolve>`__ in the
documentation.
toggle : str, bool
Controls whether data values should be toggled (inserted or removed from a point
selection) or only ever inserted into point selections.
One of:
* ``true`` -- the default behavior, which corresponds to ``"event.shiftKey"``. As a
result, data values are toggled when the user interacts with the shift-key
pressed.
* ``false`` -- disables toggling behaviour; the selection will only ever contain a
single data value corresponding to the most recent interaction.
* A `Vega expression <https://vega.github.io/vega/docs/expressions/>`__ which is
re-evaluated as the user interacts. If the expression evaluates to ``true``, the
data value is toggled into or out of the point selection. If the expression
evaluates to ``false``, the point selection is first cleared, and the data value
is then inserted. For example, setting the value to the Vega expression ``"true"``
will toggle data values without the user pressing the shift-key.
**Default value:** ``true``
**See also:** `toggle examples
<https://vega.github.io/vega-lite/docs/selection.html#toggle>`__ in the
documentation.
"""
_schema = {"$ref": "#/definitions/PointSelectionConfig"}
def __init__(
self,
type: Optional[Literal["point"]] = Undefined,
clear: Optional[str | bool | SchemaBase | Map] = Undefined,
encodings: Optional[Sequence[SchemaBase | SingleDefUnitChannel_T]] = Undefined,
fields: Optional[Sequence[str | SchemaBase]] = Undefined,
nearest: Optional[bool] = Undefined,
on: Optional[str | SchemaBase | Map] = Undefined,
resolve: Optional[SchemaBase | SelectionResolution_T] = Undefined,
toggle: Optional[str | bool] = Undefined,
**kwds,
):
super().__init__(
type=type,
clear=clear,
encodings=encodings,
fields=fields,
nearest=nearest,
on=on,
resolve=resolve,
toggle=toggle,
**kwds,
)
| PointSelectionConfig |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 17145,
"end": 17192
} | class ____(GeomOutputGeoFunc):
pass
| MakeValid |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_regions.py | {
"start": 488,
"end": 4940
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-regions"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
@override_regions(region_config)
def test_get(self) -> None:
self.login_as(user=self.user)
self.create_organization(region="us", owner=self.user)
self.create_organization(region="de", owner=self.user)
self.create_organization(region="acme", owner=self.user)
response = self.get_response("me")
assert response.status_code == 200
assert "regions" in response.data
assert response.data["regions"] == [
st.api_serialize(),
de.api_serialize(),
us.api_serialize(),
]
@override_regions(region_config)
def test_get_only_memberships(self) -> None:
self.login_as(user=self.user)
other = self.create_user()
self.create_organization(region="acme", owner=other)
self.create_organization(region="de", owner=self.user)
response = self.get_response("me")
assert response.status_code == 200
assert "regions" in response.data
assert response.data["regions"] == [de.api_serialize()]
@override_regions(region_config)
def test_get_other_user_error(self) -> None:
self.login_as(user=self.user)
other = self.create_user()
self.create_organization(region="acme", owner=other)
response = self.get_response(other.id)
assert response.status_code == 403
@override_regions(region_config)
def test_allow_superuser_to_query_all(self) -> None:
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
test_user_1 = self.create_user()
self.create_organization(region="us", owner=test_user_1)
self.create_organization(region="de", owner=test_user_1)
self.create_organization(region="acme", owner=test_user_1)
test_user_2 = self.create_user()
response = self.get_response(test_user_1.id)
assert response.status_code == 200
assert "regions" in response.data
assert response.data["regions"] == [
st.api_serialize(),
de.api_serialize(),
us.api_serialize(),
]
response = self.get_response(test_user_2.id)
assert response.status_code == 200
assert "regions" in response.data
assert response.data["regions"] == []
@override_regions(region_config)
def test_get_for_user_with_auth_token(self) -> None:
self.create_organization(region="us", owner=self.user)
self.create_organization(region="de", owner=self.user)
auth_token = self.create_user_auth_token(user=self.user, scope_list=["org:read"])
response = self.get_success_response(
"me", extra_headers={"HTTP_AUTHORIZATION": f"Bearer {auth_token.token}"}
)
assert "regions" in response.data
assert response.data["regions"] == [de.api_serialize(), us.api_serialize()]
@override_regions(region_config)
def test_get_other_user_with_auth_token_error(self) -> None:
other_user = self.create_user()
self.create_organization(region="us", owner=other_user)
self.create_organization(region="de", owner=other_user)
auth_token = self.create_user_auth_token(user=self.user, scope_list=["org:read"])
self.get_error_response(
other_user.id,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {auth_token.token}"},
status_code=403,
)
@override_regions(region_config)
def test_get_for_user_with_wrong_scopes_error(self) -> None:
self.create_organization(region="us", owner=self.user)
self.create_organization(region="de", owner=self.user)
auth_token = self.create_user_auth_token(user=self.user, scope_list=["project:read"])
self.get_error_response(
"me",
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {auth_token.token}"},
status_code=403,
)
@override_regions(region_config)
def test_get_for_user_with_no_auth(self) -> None:
self.create_organization(region="us", owner=self.user)
self.create_organization(region="de", owner=self.user)
self.get_error_response("me", status_code=401)
self.get_error_response(self.user.id, status_code=401)
| UserUserRolesTest |
python | jazzband__prettytable | tests/test_colortable.py | {
"start": 1474,
"end": 2169
} | class ____:
def test_basic(self) -> None:
assert Theme.format_code("31") == "\x1b[31m"
def test_prefix(self) -> None:
assert Theme.format_code("\x1b[35m") == "\x1b[35m"
def test_escapes(self) -> None:
assert Theme.format_code("\033[41m") == "\x1b[41m"
assert Theme.format_code("\u001b[41m") == "\x1b[41m"
def test_empty(self) -> None:
assert Theme.format_code("") == ""
def test_stripped(self) -> None:
assert Theme.format_code("\t\t \t") == ""
def test_multiple(self) -> None:
assert Theme.format_code("30;42") == "\x1b[30;42m"
assert Theme.format_code("\x1b[30;42m") == "\x1b[30;42m"
| TestFormatCode |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 1083,
"end": 1459
} | class ____(BaseModel):
CPUPercentage: Optional[int] = Field(default=None, alias="cpu_percentage")
def _to_dict(self) -> Dict[str, Any]:
ret = cast(dict, self.model_dump(exclude_none=True))
for key, val in ret.items():
if isinstance(val, _BackupLocationConfig):
ret[key] = val._to_dict()
return ret
| _BackupConfigBase |
python | pandas-dev__pandas | asv_bench/benchmarks/io/sql.py | {
"start": 1533,
"end": 3177
} | class ____:
params = (
["sqlalchemy", "sqlite"],
[
"float",
"float_with_nan",
"string",
"bool",
"int",
"date",
"time",
"datetime",
],
)
param_names = ["connection", "dtype"]
def setup(self, connection, dtype):
N = 10000
con = {
"sqlalchemy": create_engine("sqlite:///:memory:"),
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_col = f"SELECT {dtype} FROM {self.table_name}"
self.con = con[connection]
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=Index([f"i-{i}" for i in range(N)], dtype=object),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_to_sql_dataframe_column(self, connection, dtype):
self.df[[dtype]].to_sql("test1", self.con, if_exists="replace")
def time_read_sql_query_select_column(self, connection, dtype):
read_sql_query(self.query_col, self.con)
| WriteSQLDtypes |
python | pypa__warehouse | warehouse/packaging/models.py | {
"start": 4774,
"end": 19337
} | class ____(SitemapMixin, HasEvents, HasObservations, db.Model):
__tablename__ = "projects"
__repr__ = make_repr("name")
# TODO: Cannot update columns that are used in triggers.
name: Mapped[str] = mapped_column(Text)
normalized_name: Mapped[str] = mapped_column(
unique=True,
server_default=FetchedValue(),
server_onupdate=FetchedValue(),
)
created: Mapped[datetime_now | None] = mapped_column(
index=True,
)
has_docs: Mapped[bool | None]
upload_limit: Mapped[int | None]
total_size_limit: Mapped[int | None] = mapped_column(BigInteger)
last_serial: Mapped[int] = mapped_column(server_default=sql.text("0"))
total_size: Mapped[int | None] = mapped_column(
BigInteger, server_default=sql.text("0")
)
lifecycle_status: Mapped[LifecycleStatus | None] = mapped_column(
comment="Lifecycle status can change project visibility and access"
)
lifecycle_status_changed: Mapped[datetime_now | None] = mapped_column(
onupdate=func.now(),
comment="When the lifecycle status was last changed",
)
lifecycle_status_note: Mapped[str | None] = mapped_column(
comment="Note about the lifecycle status"
)
oidc_publishers: Mapped[list[OIDCPublisher]] = orm.relationship(
secondary="oidc_publisher_project_association",
back_populates="projects",
passive_deletes=True,
)
organization: Mapped[Organization] = orm.relationship(
secondary=OrganizationProject.__table__,
back_populates="projects",
uselist=False,
viewonly=True,
)
roles: Mapped[list[Role]] = orm.relationship(
back_populates="project",
passive_deletes=True,
)
invitations: Mapped[list[RoleInvitation]] = orm.relationship(
back_populates="project",
passive_deletes=True,
)
team: Mapped[Team] = orm.relationship(
secondary=TeamProjectRole.__table__,
back_populates="projects",
viewonly=True,
)
team_project_roles: Mapped[list[TeamProjectRole]] = orm.relationship(
back_populates="project",
passive_deletes=True,
)
users: Mapped[list[User]] = orm.relationship(
secondary=Role.__table__, back_populates="projects", viewonly=True
)
releases: Mapped[list[Release]] = orm.relationship(
cascade="all, delete-orphan",
order_by=lambda: Release._pypi_ordering.desc(),
passive_deletes=True,
)
alternate_repositories: Mapped[list[AlternateRepository]] = orm.relationship(
cascade="all, delete-orphan",
back_populates="project",
passive_deletes=True,
)
__table_args__ = (
CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="projects_valid_name",
),
CheckConstraint(
"upload_limit <= 1073741824", # 1.0 GiB == 1073741824 bytes
name="projects_upload_limit_max_value",
),
Index(
"project_name_ultranormalized",
func.ultranormalize_name(name),
),
Index("projects_lifecycle_status_idx", "lifecycle_status"),
)
def __getitem__(self, version):
session = orm_session_from_obj(self)
canonical_version = packaging.utils.canonicalize_version(version)
try:
return (
session.query(Release)
.filter(
Release.project == self,
Release.canonical_version == canonical_version,
)
.one()
)
except MultipleResultsFound:
# There are multiple releases of this project which have the same
# canonical version that were uploaded before we checked for
# canonical version equivalence, so return the exact match instead
try:
return (
session.query(Release)
.filter(Release.project == self, Release.version == version)
.one()
)
except NoResultFound:
# There are multiple releases of this project which have the
# same canonical version, but none that have the exact version
# specified, so just 404
raise KeyError from None
except NoResultFound:
raise KeyError from None
def __acl__(self):
session = orm_session_from_obj(self)
acls = [
# TODO: Similar to `warehouse.accounts.models.User.__acl__`, we express the
# permissions here in terms of the permissions that the user has on
# the project. This is more complex, as add ACL Entries based on other
# criteria, such as the user's role in the project.
(
Allow,
"group:admins",
(
Permissions.AdminDashboardSidebarRead,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminProhibitedProjectsWrite,
Permissions.AdminProhibitedUsernameWrite,
Permissions.AdminProjectsDelete,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminProjectsWrite,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
),
),
(
Allow,
"group:moderators",
(
Permissions.AdminDashboardSidebarRead,
Permissions.AdminObservationsRead,
Permissions.AdminObservationsWrite,
Permissions.AdminProjectsRead,
Permissions.AdminProjectsSetLimit,
Permissions.AdminRoleAdd,
Permissions.AdminRoleDelete,
),
),
(Allow, "group:observers", Permissions.APIObservationsAdd),
(Allow, Authenticated, Permissions.SubmitMalwareObservation),
]
if self.lifecycle_status not in [
LifecycleStatus.Archived,
LifecycleStatus.ArchivedNoindex,
]:
# The project has zero or more OIDC publishers registered to it,
# each of which serves as an identity with the ability to upload releases
# (only if the project is not archived)
for publisher in self.oidc_publishers:
acls.append(
(Allow, f"oidc:{publisher.id}", [Permissions.ProjectsUpload])
)
# Get all of the users for this project.
user_query = (
session.query(Role)
.filter(Role.project == self)
.options(orm.lazyload(Role.project), orm.lazyload(Role.user))
)
permissions = {
(role.user_id, "Administer" if role.role_name == "Owner" else "Upload")
for role in user_query.all()
}
# Add all of the team members for this project.
team_query = (
session.query(TeamProjectRole)
.filter(TeamProjectRole.project == self)
.options(
orm.lazyload(TeamProjectRole.project),
orm.lazyload(TeamProjectRole.team),
)
)
for role in team_query.all():
permissions |= {
(user.id, "Administer" if role.role_name.value == "Owner" else "Upload")
for user in role.team.members
}
# Add all organization owners for this project.
if self.organization:
org_query = (
session.query(OrganizationRole)
.filter(
OrganizationRole.organization == self.organization,
OrganizationRole.role_name == OrganizationRoleType.Owner,
)
.options(
orm.lazyload(OrganizationRole.organization),
orm.lazyload(OrganizationRole.user),
)
)
permissions |= {(role.user_id, "Administer") for role in org_query.all()}
for user_id, permission_name in sorted(permissions, key=lambda x: (x[1], x[0])):
# Disallow Write permissions for Projects in quarantine, allow Upload
if self.lifecycle_status == LifecycleStatus.QuarantineEnter:
current_permissions = [
Permissions.ProjectsRead,
Permissions.ProjectsUpload,
]
elif permission_name == "Administer":
current_permissions = [
Permissions.ProjectsRead,
Permissions.ProjectsUpload,
Permissions.ProjectsWrite,
]
else:
current_permissions = [Permissions.ProjectsUpload]
if self.lifecycle_status in [
LifecycleStatus.Archived,
LifecycleStatus.ArchivedNoindex,
]:
# Disallow upload permissions for archived projects
current_permissions.remove(Permissions.ProjectsUpload)
if current_permissions:
acls.append((Allow, f"user:{user_id}", current_permissions))
return acls
@property
def documentation_url(self):
# TODO: Move this into the database and eliminate the use of the
# threadlocal here.
request = get_current_request()
# If the project doesn't have docs, then we'll just return a None here.
if not self.has_docs:
return
return request.route_url("legacy.docs", project=self.name)
@property
def owners(self):
"""Return all users who are owners of the project."""
session = orm_session_from_obj(self)
owner_roles = (
session.query(User.id)
.join(Role.user)
.filter(Role.role_name == "Owner", Role.project == self)
.subquery()
)
return session.query(User).join(owner_roles, User.id == owner_roles.c.id).all()
@property
def maintainers(self):
"""Return all users who are maintainers of the project."""
session = orm_session_from_obj(self)
maintainer_roles = (
session.query(User.id)
.join(Role.user)
.filter(Role.role_name == "Maintainer", Role.project == self)
.subquery()
)
return (
session.query(User)
.join(maintainer_roles, User.id == maintainer_roles.c.id)
.all()
)
@property
def all_versions(self):
session = orm_session_from_obj(self)
return (
session.query(
Release.version,
Release.created,
Release.is_prerelease,
Release.yanked,
Release.yanked_reason,
)
.filter(Release.project == self)
.order_by(Release._pypi_ordering.desc())
.all()
)
@property
def latest_version(self):
session = orm_session_from_obj(self)
return (
session.query(
Release.version, Release.created, Release.is_prerelease, Release.summary
)
.filter(Release.project == self, Release.yanked.is_(False))
.order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())
.first()
)
@property
def active_releases(self):
return (
orm_session_from_obj(self)
.query(Release)
.filter(Release.project == self, Release.yanked.is_(False))
.order_by(Release._pypi_ordering.desc())
.all()
)
@property
def yanked_releases(self):
return (
orm_session_from_obj(self)
.query(Release)
.filter(Release.project == self, Release.yanked.is_(True))
.order_by(Release._pypi_ordering.desc())
.all()
)
@property
def project_status(self) -> ProjectStatusMarker:
"""
Return the PEP 792 project status marker that's equivalent
to this project's lifecycle status.
"""
if self.lifecycle_status == LifecycleStatus.QuarantineEnter:
return ProjectStatusMarker.Quarantined
elif self.lifecycle_status in (
LifecycleStatus.Archived,
LifecycleStatus.ArchivedNoindex,
):
return ProjectStatusMarker.Archived
# PyPI doesn't yet have a deprecated lifecycle status
# and "quarantine-exit" means a return to active.
return ProjectStatusMarker.Active
@property
def upload_limit_size(self) -> int:
"""
Return the effective file size upload limit for this project.
Uses the most generous (highest) limit from:
- System default (MAX_FILESIZE)
- Project-specific limit (if set)
- Organization limit (if project belongs to org and org has limit)
This allows organizations and projects to have higher limits than
the system default, with users benefiting from the most generous
limit available to them.
"""
limits_to_check = [MAX_FILESIZE, self.upload_limit]
if self.organization:
limits_to_check.append(self.organization.upload_limit)
valid_limits = [limit for limit in limits_to_check if limit is not None]
return max(valid_limits)
@property
def total_size_limit_value(self) -> int:
"""
Return the effective total size limit for this project.
Uses the most generous (highest) limit from:
- System default (MAX_PROJECT_SIZE)
- Project-specific limit (if set)
- Organization limit (if project belongs to org and org has limit)
This allows organizations and projects to have higher limits than
the system default, with users benefiting from the most generous
limit available to them.
"""
limits_to_check = [MAX_PROJECT_SIZE, self.total_size_limit]
if self.organization:
limits_to_check.append(self.organization.total_size_limit)
valid_limits = [limit for limit in limits_to_check if limit is not None]
return max(valid_limits)
| Project |
python | fluentpython__example-code | 13-op-overloading/vector_py3_5.py | {
"start": 6903,
"end": 10423
} | class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
if isinstance(other, Vector):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
else:
return NotImplemented
def __hash__(self):
hashes = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, int):
return self._components[index]
else:
msg = '{.__name__} indices must be integers'
raise TypeError(msg.format(cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
def angle(self, n):
r = math.sqrt(sum(x * x for x in self[n:]))
a = math.atan2(r, self[n-1])
if (n == len(self) - 1) and (self[-1] < 0):
return math.pi * 2 - a
else:
return a
def angles(self):
return (self.angle(n) for n in range(1, len(self)))
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('h'): # hyperspherical coordinates
fmt_spec = fmt_spec[:-1]
coords = itertools.chain([abs(self)],
self.angles())
outer_fmt = '<{}>'
else:
coords = self
outer_fmt = '({})'
components = (format(c, fmt_spec) for c in coords)
return outer_fmt.format(', '.join(components))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
def __add__(self, other):
try:
pairs = itertools.zip_longest(self, other, fillvalue=0.0)
return Vector(a + b for a, b in pairs)
except TypeError:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, scalar):
if isinstance(scalar, numbers.Real):
return Vector(n * scalar for n in self)
else:
return NotImplemented
def __rmul__(self, scalar):
return self * scalar
def __matmul__(self, other):
try:
return sum(a * b for a, b in zip(self, other))
except TypeError:
return NotImplemented
def __rmatmul__(self, other):
return self @ other # this only works in Python 3.5
| Vector |
python | scipy__scipy | benchmarks/benchmarks/sparse_linalg_expm.py | {
"start": 1049,
"end": 1499
} | class ____(Benchmark):
def setup(self):
self.n = 2000
self.i = 100
self.j = 200
nnz_per_row = 25
self.A = random_sparse_csr(self.n, self.n, nnz_per_row)
def time_expm_multiply(self):
# computing only column', j, 'of expm of the sparse matrix
v = np.zeros(self.n, dtype=float)
v[self.j] = 1
A_expm_col_j = expm_multiply(self.A, v)
A_expm_col_j[self.i]
| ExpmMultiply |
python | huggingface__transformers | src/transformers/models/mt5/modeling_mt5.py | {
"start": 58442,
"end": 65844
} | class ____(MT5PreTrainedModel):
_keys_to_ignore_on_load_unexpected = ["decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight"]
# Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.__init__ with T5->MT5
def __init__(self, config: MT5Config):
super().__init__(config)
self.transformer = MT5Model(config)
self.classification_head = MT5ClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
# Copied from transformers.models.t5.modeling_t5.T5ForSequenceClassification.forward with T5->MT5, t5->mt5
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. MT5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [MT5 Training](./mt5#training).
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
MT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
To know more on how to prepare `decoder_input_ids` for pretraining take a look at [MT5
Training](./mt5#training).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
# Copied from models.bart.modeling_bart.BartModel.forward different to other models, MT5 automatically creates
# decoder_input_ids from input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = self._shift_right(input_ids)
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
eos_mask = input_ids.eq(self.config.eos_token_id).to(sequence_output.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
batch_size, _, hidden_size = sequence_output.shape
sentence_representation = sequence_output[eos_mask, :].view(batch_size, -1, hidden_size)[:, -1, :]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@auto_docstring
| MT5ForSequenceClassification |
python | fluentpython__example-code-2e | 12-seq-hacking/vector_v1.py | {
"start": 1752,
"end": 2703
} | class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components) # <1>
def __iter__(self):
return iter(self._components) # <2>
def __repr__(self):
components = reprlib.repr(self._components) # <3>
components = components[components.find('['):-1] # <4>
return f'Vector({components})'
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components)) # <5>
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return math.hypot(*self) # <6>
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv) # <7>
# end::VECTOR_V1[]
| Vector |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-motherduck/destination_motherduck/destination.py | {
"start": 2586,
"end": 4453
} | class ____:
"""Normalizer that preserves Unicode characters while following LowerCaseNormalizer behavior for ASCII."""
def normalize(self, name: str) -> str:
"""
Normalize name while preserving Unicode characters.
Behavior:
- Converts ASCII letters to lowercase
- Replaces whitespace with underscores
- Preserves Unicode letters and numbers
- Adds underscore prefix if name starts with ASCII digit
- Replaces other special characters with underscores
"""
if not name:
raise exc.AirbyteNameNormalizationError(
"Name cannot be empty after normalization.",
raw_name=name,
normalization_result="",
)
import unicodedata
# Convert to lowercase (handles both ASCII and Unicode)
result = name.lower()
# Replace whitespace with underscores
result = re.sub(r"\s+", "_", result)
# Replace special characters (non-letters, non-digits, non-underscores) with underscores
# But preserve Unicode letters and digits using Unicode-aware regex
result = re.sub(r"[^\w]", "_", result, flags=re.UNICODE)
# Collapse multiple consecutive underscores
result = re.sub(r"_+", "_", result)
# Remove leading/trailing underscores
result = result.strip("_")
# Add underscore prefix if starts with ASCII digit (following LowerCaseNormalizer behavior)
if result and result[0].isdigit():
result = "_" + result
# Final validation
if not result:
raise exc.AirbyteNameNormalizationError(
"Name cannot be empty after normalization.",
raw_name=name,
normalization_result=result,
)
return result
| UnicodeAwareNormalizer |
python | scrapy__scrapy | tests/test_spider.py | {
"start": 5690,
"end": 7694
} | class ____(TestSpider):
spider_class = XMLFeedSpider
def test_register_namespace(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:x="http://www.google.com/schemas/sitemap/0.84"
xmlns:y="http://www.example.com/schemas/extras/1.0">
<url><x:loc>http://www.example.com/Special-Offers.html</x:loc><y:updated>2009-08-16</y:updated>
<other value="bar" y:custom="fuu"/>
</url>
<url><loc>http://www.example.com/</loc><y:updated>2009-08-16</y:updated><other value="foo"/></url>
</urlset>"""
response = XmlResponse(url="http://example.com/sitemap.xml", body=body)
class _XMLSpider(self.spider_class):
itertag = "url"
namespaces = (
("a", "http://www.google.com/schemas/sitemap/0.84"),
("b", "http://www.example.com/schemas/extras/1.0"),
)
def parse_node(self, response, selector):
yield {
"loc": selector.xpath("a:loc/text()").getall(),
"updated": selector.xpath("b:updated/text()").getall(),
"other": selector.xpath("other/@value").getall(),
"custom": selector.xpath("other/@b:custom").getall(),
}
for iterator in ("iternodes", "xml"):
spider = _XMLSpider("example", iterator=iterator)
output = list(spider._parse(response))
assert len(output) == 2, iterator
assert output == [
{
"loc": ["http://www.example.com/Special-Offers.html"],
"updated": ["2009-08-16"],
"custom": ["fuu"],
"other": ["bar"],
},
{
"loc": [],
"updated": ["2009-08-16"],
"other": ["foo"],
"custom": [],
},
], iterator
| TestXMLFeedSpider |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 319237,
"end": 319573
} | class ____:
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1 - 2j, 1 + 2j])
with warnings.catch_warnings():
warnings.simplefilter("error", ComplexWarning)
assert_raises(ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
| TestWarnings |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/operator_resources.py | {
"start": 2969,
"end": 3160
} | class ____(Resource):
"""Represents a disk requirement in an execution environment for an operator."""
def __init__(self, qty):
super().__init__("Disk", "MB", qty)
| DiskResource |
python | wandb__wandb | wandb/sdk/artifacts/_generated/rename_registry.py | {
"start": 373,
"end": 565
} | class ____(GQLResult):
inserted: Optional[bool]
project: Optional[RegistryFragment]
RenameRegistry.model_rebuild()
RenameRegistryRenameProject.model_rebuild()
| RenameRegistryRenameProject |
python | ansible__ansible | lib/ansible/module_utils/facts/other/facter.py | {
"start": 337,
"end": 2742
} | class ____(BaseFactCollector):
name = 'facter'
_fact_ids = set(['facter']) # type: t.Set[str]
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='facter',
prefix='facter_')
super(FacterFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_facter(self, module):
facter_path = module.get_bin_path(
'facter',
opt_dirs=['/opt/puppetlabs/bin']
)
cfacter_path = module.get_bin_path(
'cfacter',
opt_dirs=['/opt/puppetlabs/bin']
)
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
return facter_path
def run_facter(self, module, facter_path):
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = module.run_command(facter_path + " --puppet --json")
# for some versions of facter, --puppet returns an error if puppet is not present,
# try again w/o it, other errors should still appear and be sent back
if rc != 0:
rc, out, err = module.run_command(facter_path + " --json")
return rc, out, err
def get_facter_output(self, module):
facter_path = self.find_facter(module)
if not facter_path:
return None
rc, out, err = self.run_facter(module, facter_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
# Note that this mirrors previous facter behavior, where there isn't
# a 'ansible_facter' key in the main fact dict, but instead, 'facter_whatever'
# items are added to the main dict.
facter_dict = {}
if not module:
return facter_dict
facter_output = self.get_facter_output(module)
# TODO: if we fail, should we add a empty facter key or nothing?
if facter_output is None:
return facter_dict
try:
facter_dict = json.loads(facter_output)
except Exception:
module.warn("Failed to parse facter facts")
return facter_dict
| FacterFactCollector |
python | kamyu104__LeetCode-Solutions | Python/find-the-median-of-the-uniqueness-array.py | {
"start": 100,
"end": 1080
} | class ____(object):
def medianOfUniquenessArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def binary_search(left, right, check):
while left <= right:
mid = left + (right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
def check(k):
result = 0
cnt = collections.Counter()
left = 0
for right in xrange(len(nums)):
cnt[nums[right]] += 1
while len(cnt) == k+1:
cnt[nums[left]] -= 1
if cnt[nums[left]] == 0:
del cnt[nums[left]]
left += 1
result += right-left+1
return result >= total-result
total = (len(nums)+1)*len(nums)//2
return binary_search(1, len(set(nums)), check)
| Solution |
python | xlwings__xlwings | xlwings/pro/_xlremote.py | {
"start": 28624,
"end": 30847
} | class ____(Collection, base_classes.Pictures):
_attr = "pictures"
_wrap = Picture
def append_json_action(self, **kwargs):
self.parent.book.append_json_action(
**{
**kwargs,
**{
"sheet_position": self.parent.index - 1,
},
}
)
def add(
self,
filename,
link_to_file=None,
save_with_document=None,
left=None,
top=None,
width=None,
height=None,
anchor=None,
):
if self.parent.book.api["client"] == "Google Apps Script" and (left or top):
raise ValueError(
"'left' and 'top' are not supported with Google Sheets. "
"Use 'anchor' instead."
)
if anchor is None:
column_index = 0
row_index = 0
else:
column_index = anchor.column - 1
row_index = anchor.row - 1
# Google Sheets allows a max size of 1 million pixels. For matplotlib, you
# can control the pixels like so: fig = plt.figure(figsize=(6, 4), dpi=200)
# This sample has (6 * 200) * (4 * 200) = 960,000 px
# Note that savefig(bbox_inches="tight") crops the image and therefore will
# reduce the number of pixels in a non-deterministic way. Existing figure
# size can be checked via fig.get_size_inches(). pandas accepts figsize also:
# ax = df.plot(figsize=(3,3))
# fig = ax.get_figure()
with open(filename, "rb") as image_file:
encoded_image_string = base64.b64encode(image_file.read()).decode("utf-8")
# TODO: width and height are currently ignored but can be set via obj properties
self.append_json_action(
func="addPicture",
args=[
encoded_image_string,
column_index,
row_index,
left if left else 0,
top if top else 0,
],
)
self.parent._api["pictures"].append(
{"name": "Image", "width": None, "height": None}
)
return Picture(self.parent, len(self.parent.api["pictures"]))
| Pictures |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.