language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_tasks.py | {
"start": 2793,
"end": 3703
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.tasks.CloudTasksHook")
def test_update_queue(self, mock_hook):
mock_hook.return_value.update_queue.return_value = TEST_QUEUE
operator = CloudTasksQueueUpdateOperator(task_queue=Queue(name=FULL_QUEUE_PATH), task_id="id")
result = operator.execute(context=mock.MagicMock())
assert result == {"name": FULL_QUEUE_PATH, "state": 0}
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_queue.assert_called_once_with(
task_queue=Queue(name=FULL_QUEUE_PATH),
project_id=None,
location=None,
queue_name=None,
update_mask=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudTasksQueueUpdate |
python | wandb__wandb | wandb/vendor/pygments/lexers/theorem.py | {
"start": 15815,
"end": 18983
} | class ____(RegexLexer):
"""
For the `Lean <https://github.com/leanprover/lean>`_
theorem prover.
.. versionadded:: 2.0
"""
name = 'Lean'
aliases = ['lean']
filenames = ['*.lean']
mimetypes = ['text/x-lean']
flags = re.MULTILINE | re.UNICODE
keywords1 = (
'import', 'abbreviation', 'opaque_hint', 'tactic_hint', 'definition',
'renaming', 'inline', 'hiding', 'exposing', 'parameter', 'parameters',
'conjecture', 'hypothesis', 'lemma', 'corollary', 'variable', 'variables',
'theorem', 'axiom', 'inductive', 'structure', 'universe', 'alias',
'help', 'options', 'precedence', 'postfix', 'prefix', 'calc_trans',
'calc_subst', 'calc_refl', 'infix', 'infixl', 'infixr', 'notation', 'eval',
'check', 'exit', 'coercion', 'end', 'private', 'using', 'namespace',
'including', 'instance', 'section', 'context', 'protected', 'expose',
'export', 'set_option', 'add_rewrite', 'extends', 'open', 'example',
'constant', 'constants', 'print', 'opaque', 'reducible', 'irreducible',
)
keywords2 = (
'forall', 'fun', 'Pi', 'obtain', 'from', 'have', 'show', 'assume',
'take', 'let', 'if', 'else', 'then', 'by', 'in', 'with', 'begin',
'proof', 'qed', 'calc', 'match',
)
keywords3 = (
# Sorts
'Type', 'Prop',
)
operators = (
u'!=', u'#', u'&', u'&&', u'*', u'+', u'-', u'/', u'@', u'!', u'`',
u'-.', u'->', u'.', u'..', u'...', u'::', u':>', u';', u';;', u'<',
u'<-', u'=', u'==', u'>', u'_', u'|', u'||', u'~', u'=>', u'<=', u'>=',
u'/\\', u'\\/', u'∀', u'Π', u'λ', u'↔', u'∧', u'∨', u'≠', u'≤', u'≥',
u'¬', u'⁻¹', u'⬝', u'▸', u'→', u'∃', u'ℕ', u'ℤ', u'≈', u'×', u'⌞',
u'⌟', u'≡', u'⟨', u'⟩',
)
punctuation = (u'(', u')', u':', u'{', u'}', u'[', u']', u'⦃', u'⦄',
u':=', u',')
tokens = {
'root': [
(r'\s+', Text),
(r'/-', Comment, 'comment'),
(r'--.*?$', Comment.Single),
(words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
(words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword),
(words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type),
(words(operators), Name.Builtin.Pseudo),
(words(punctuation), Operator),
(u"[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]"
u"[A-Za-z_'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079"
u"\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*", Name),
(r'\d+', Number.Integer),
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable)
],
'comment': [
# Multiline Comments
(r'[^/-]', Comment.Multiline),
(r'/-', Comment.Multiline, '#push'),
(r'-/', Comment.Multiline, '#pop'),
(r'[/-]', Comment.Multiline)
],
'string': [
(r'[^\\"]+', String.Double),
(r'\\[n"\\]', String.Escape),
('"', String.Double, '#pop'),
],
}
| LeanLexer |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 43151,
"end": 43342
} | class ____(BaseModel, extra="forbid"):
origin: "GeoPoint" = Field(..., description="")
to: str = Field(..., description="Payload field with the destination geo point")
| GeoDistanceParams |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 65656,
"end": 74124
} | class ____(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
super().__init__(config)
self.vision_backbone = OmDetTurboVisionBackbone(config)
self.language_backbone = OmDetTurboLanguageBackbone(config)
self.encoder = OmDetTurboHybridEncoder(config)
self.decoder = OmDetTurboDecoder(config)
self.num_queries = config.num_queries
self.language_cache_class = OmDetTurboLRUCache(config.cache_size)
self.language_cache_prompt = OmDetTurboLRUCache(config.cache_size)
self.vocab_size = config.text_config.vocab_size
self.post_init()
def get_input_embeddings(self):
return self.language_backbone.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_backbone.model.set_input_embeddings(value)
def resize_token_embeddings(
self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None, mean_resizing: bool = True
) -> nn.Embedding:
model_embeds = self.language_backbone.model.resize_token_embeddings(
new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of, mean_resizing=mean_resizing
)
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
classes_input_ids: torch.LongTensor,
classes_attention_mask: torch.LongTensor,
tasks_input_ids: torch.LongTensor,
tasks_attention_mask: torch.LongTensor,
classes_structure: torch.LongTensor,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], OmDetTurboObjectDetectionOutput]:
r"""
classes_input_ids (`torch.LongTensor` of shape `(total_classes (>= batch_size), sequence_length)`):
Indices of input classes sequence tokens in the vocabulary of the language model.
Several classes can be provided for each tasks, thus the tokenized classes are flattened
and the structure of the classes is provided in the `classes_structure` argument.
Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
classes_attention_mask (`torch.BoolTensor` of shape `(total_classes (>= batch_size), num_classes, sequence_length)`):
Attention mask for the classes. This is a binary mask that indicates which tokens should be attended to,
and which should not.
tasks_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input tasks sequence tokens in the vocabulary of the language model.
Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
tasks_attention_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Attention mask for the tasks. This is a binary mask that indicates which tokens should be attended to,
and which should not.
classes_structure (torch.LongTensor of shape `(batch_size)`):
Structure of the classes. This tensor indicates the number of classes for each task.
Examples:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, OmDetTurboForObjectDetection
>>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> classes = ["cat", "remote"]
>>> task = "Detect {}.".format(", ".join(classes))
>>> inputs = processor(image, text=classes, task=task, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits)
>>> results = processor.post_process_grounded_object_detection(
... outputs,
... classes=classes,
... target_sizes=[image.size[::-1]],
... score_threshold=0.3,
... nms_threshold=0.3,
>>> )[0]
>>> for score, class_name, box in zip(results["scores"], results["classes"], results["boxes"]):
... box = [round(i, 1) for i in box.tolist()]
... print(
... f"Detected {class_name} with confidence "
... f"{round(score.item(), 2)} at location {box}"
... )
Detected remote with confidence 0.76 at location [39.9, 71.3, 176.5, 117.9]
Detected cat with confidence 0.72 at location [345.1, 22.5, 639.7, 371.9]
Detected cat with confidence 0.65 at location [12.7, 53.8, 315.5, 475.3]
Detected remote with confidence 0.57 at location [333.4, 75.6, 370.7, 187.0]
```"""
if labels is not None:
raise NotImplementedError("Training is not implemented yet")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
image_features = self.vision_backbone(pixel_values)
encoder_outputs = self.encoder(
image_features,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
class_features, task_features, task_mask = self.get_language_embedding(
classes_input_ids,
classes_attention_mask,
tasks_input_ids,
tasks_attention_mask,
classes_structure,
)
encoder_extracted_states = encoder_outputs.extracted_states if return_dict else encoder_outputs[-1]
decoder_outputs = self.decoder(
encoder_extracted_states,
class_features,
task_features,
task_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return tuple(
output
for output in [
loss,
decoder_outputs[3][-1],
decoder_outputs[4][-1],
decoder_outputs[7],
decoder_outputs[8],
decoder_outputs[5],
decoder_outputs[6],
encoder_outputs[-1],
decoder_outputs[1],
decoder_outputs[2],
encoder_outputs[1],
encoder_outputs[2],
classes_structure,
]
if output is not None
)
return OmDetTurboObjectDetectionOutput(
loss=loss,
decoder_coord_logits=decoder_outputs.decoder_coords[-1],
decoder_class_logits=decoder_outputs.decoder_classes[-1],
init_reference_points=decoder_outputs.init_reference_points,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
encoder_coord_logits=decoder_outputs.encoder_coord_logits,
encoder_class_logits=decoder_outputs.encoder_class_logits,
encoder_extracted_states=encoder_outputs.extracted_states,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
classes_structure=classes_structure,
)
__all__ = ["OmDetTurboForObjectDetection", "OmDetTurboPreTrainedModel"]
| OmDetTurboForObjectDetection |
python | tensorflow__tensorflow | third_party/xla/xla/python/xla_client.py | {
"start": 3106,
"end": 3463
} | class ____:
"""Python representation of a xla.PrecisionConfig protobuf."""
__slots__ = ('operand_precision',)
Precision = ops.PrecisionConfig_Precision # pylint: disable=invalid-name
def __init__(self):
self.operand_precision = []
FftType = ops.FftType
ShapeIndex = ops.ShapeIndex
ResultAccuracyMode = ops.ResultAccuracy_Mode
| PrecisionConfig |
python | realpython__materials | python-copy/rectangle.py | {
"start": 14,
"end": 246
} | class ____:
def __init__(self, top_left, bottom_right):
self.top_left = top_left
self.bottom_right = bottom_right
def __repr__(self):
return f"Rectangle({self.top_left}, {self.bottom_right})"
| Rectangle |
python | sympy__sympy | sympy/physics/mechanics/pathway.py | {
"start": 406,
"end": 3182
} | class ____(ABC):
"""Abstract base class for all pathway classes to inherit from.
Notes
=====
Instances of this class cannot be directly instantiated by users. However,
it can be used to created custom pathway types through subclassing.
"""
def __init__(self, *attachments):
"""Initializer for ``PathwayBase``."""
self.attachments = attachments
@property
def attachments(self):
"""The pair of points defining a pathway's ends."""
return self._attachments
@attachments.setter
def attachments(self, attachments):
if hasattr(self, '_attachments'):
msg = (
f'Can\'t set attribute `attachments` to {repr(attachments)} '
f'as it is immutable.'
)
raise AttributeError(msg)
if len(attachments) != 2:
msg = (
f'Value {repr(attachments)} passed to `attachments` was an '
f'iterable of length {len(attachments)}, must be an iterable '
f'of length 2.'
)
raise ValueError(msg)
for i, point in enumerate(attachments):
if not isinstance(point, Point):
msg = (
f'Value {repr(point)} passed to `attachments` at index '
f'{i} was of type {type(point)}, must be {Point}.'
)
raise TypeError(msg)
self._attachments = tuple(attachments)
@property
@abstractmethod
def length(self):
"""An expression representing the pathway's length."""
pass
@property
@abstractmethod
def extension_velocity(self):
"""An expression representing the pathway's extension velocity."""
pass
@abstractmethod
def to_loads(self, force):
"""Loads required by the equations of motion method classes.
Explanation
===========
``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be
passed to the ``loads`` parameters of its ``kanes_equations`` method
when constructing the equations of motion. This method acts as a
utility to produce the correctly-structred pairs of points and vectors
required so that these can be easily concatenated with other items in
the list of loads and passed to ``KanesMethod.kanes_equations``. These
loads are also in the correct form to also be passed to the other
equations of motion method classes, e.g. ``LagrangesMethod``.
"""
pass
def __repr__(self):
"""Default representation of a pathway."""
attachments = ', '.join(str(a) for a in self.attachments)
return f'{self.__class__.__name__}({attachments})'
| PathwayBase |
python | google__jax | tests/tree_util_test.py | {
"start": 4668,
"end": 4751
} | class ____(dict):
pass
@tree_util.register_static
@dataclasses.dataclass
| StaticDict |
python | tensorflow__tensorflow | tensorflow/compiler/tests/concat_ops_test.py | {
"start": 13222,
"end": 13744
} | class ____(xla_test.XLATestCase):
def testBasic(self):
with self.session():
with self.test_scope():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
| ConcatOffsetTest |
python | keras-team__keras | keras/src/metrics/metrics_utils.py | {
"start": 858,
"end": 995
} | class ____(Enum):
TRUE_POSITIVES = "tp"
FALSE_POSITIVES = "fp"
TRUE_NEGATIVES = "tn"
FALSE_NEGATIVES = "fn"
| ConfusionMatrix |
python | kamyu104__LeetCode-Solutions | Python/split-array-largest-sum.py | {
"start": 55,
"end": 714
} | class ____(object):
def splitArray(self, nums, m):
"""
:type nums: List[int]
:type m: int
:rtype: int
"""
def check(nums, m, s):
cnt, curr_sum = 1, 0
for num in nums:
curr_sum += num
if curr_sum > s:
curr_sum = num
cnt += 1
return cnt <= m
left, right = max(nums), sum(nums)
while left <= right:
mid = left + (right - left) // 2
if check(nums, m, mid):
right = mid - 1
else:
left = mid + 1
return left
| Solution |
python | kamyu104__LeetCode-Solutions | Python/smallest-divisible-digit-product-ii.py | {
"start": 2232,
"end": 4138
} | class ____(object):
def smallestNumber(self, num, t):
"""
:type num: str
:type t: int
:rtype: str
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
def find_candidates(t, l): # Time: O(logt)
candidates = []
for x in reversed(xrange(2, 9+1)):
while t%x == 0:
t //= x
candidates.append(x)
if len(candidates) > l:
return []
if t == 1:
candidates.reverse()
return candidates
return []
def format(candidates, l):
result = [1]*l
i = len(result)-len(candidates)
for x in candidates:
result[i] = x
i += 1
return "".join(map(str, result))
nums = map(int, num)
candidates = find_candidates(t, float("inf"))
if t != 1 and not candidates:
return "-1"
i = next((i for i in xrange(len(nums)) if not nums[i]), len(nums))
for j in xrange(i, len(nums)):
nums[j] = 1
prefix = [1]*(len(nums)+1)
for i in xrange(len(prefix)-1):
prefix[i+1] = (prefix[i]*nums[i])%t
if not prefix[-1]:
return "".join(map(str, nums))
for i in reversed(xrange(len(nums))):
target = t//gcd(t, prefix[i])
for x in xrange(nums[i]+1, 9+1):
new_target = target//gcd(target, x)
tmp = find_candidates(new_target, len(nums)-1-i)
if new_target != 1 and not tmp:
continue
nums[i] = x
return "".join(map(str, nums[:i+1]))+format(tmp, len(nums)-1-i)
return format(candidates, max(len(nums)+1, len(candidates)))
| Solution2 |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_aliases.py | {
"start": 10356,
"end": 11234
} | class ____(GenerationBase):
"""
Test the ``generate_aliases_global`` signal handler behaviour.
"""
def get_signal_handler(self):
return signal_handlers.generate_aliases_global
def test_no_change(self):
"""
Thumbnails are only generated when the file is modified.
"""
profile = models.Profile(avatar='avatars/test.jpg')
files = self.fake_save(profile)
self.assertEqual(len(files), 1)
def test_changed(self):
"""
When a file is modified, thumbnails are built for all matching and
project-wide aliases.
"""
profile = models.Profile(avatar='avatars/test.jpg')
profile.avatar._committed = False
files = self.fake_save(profile)
# 1 source, 4 specific thumbs, 1 project-wide thumb.
self.assertEqual(len(files), 6)
| GlobalGenerationTest |
python | apache__airflow | helm-tests/tests/helm_tests/redis/test_labels_networkpolicy.py | {
"start": 900,
"end": 4278
} | class ____:
"""Tests redis network policy labels."""
AIRFLOW_EXECUTOR = "CeleryExecutor"
TEMPLATE_FILE = "templates/redis/redis-networkpolicy.yaml"
def test_should_add_global_labels(self):
"""Test adding only .Values.labels."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {"enabled": True},
"networkPolicies": {"enabled": True},
"labels": {"test_global_label": "test_global_label_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_global_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_global_label"] == "test_global_label_value"
def test_should_add_component_specific_labels(self):
"""Test adding only .Values.redis.labels."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"labels": {"test_component_label": "test_component_label_value"},
},
"networkPolicies": {"enabled": True},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_component_label" in jmespath.search("metadata.labels", docs[0])
assert (
jmespath.search("metadata.labels", docs[0])["test_component_label"]
== "test_component_label_value"
)
def test_should_merge_global_and_component_specific_labels(self):
"""Test adding both .Values.labels and .Values.redis.labels."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"labels": {"test_component_label": "test_component_label_value"},
},
"networkPolicies": {"enabled": True},
"labels": {"test_global_label": "test_global_label_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "test_global_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_global_label"] == "test_global_label_value"
assert "test_component_label" in jmespath.search("metadata.labels", docs[0])
assert (
jmespath.search("metadata.labels", docs[0])["test_component_label"]
== "test_component_label_value"
)
def test_component_specific_labels_should_override_global_labels(self):
"""Test that component-specific labels take precedence over global labels with the same key."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"labels": {"common_label": "component_value"},
},
"networkPolicies": {"enabled": True},
"labels": {"common_label": "global_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "common_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["common_label"] == "component_value"
| TestRedisNetworkPolicy |
python | pytorch__pytorch | test/quantization/eager/test_quantize_eager_qat.py | {
"start": 1744,
"end": 9738
} | class ____(torch.nn.Conv2d, torch.nn.modules.conv._ConvNd):
"""
Conv-BN fusion implemented with explicit folding. Useful
to verify numerical equivalency with non-folded version.
"""
def __init__(
self,
# ConvNd args
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
):
nn.modules.conv._ConvNd.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
False,
padding_mode,
)
assert qconfig, "qconfig must be provided for QAT module"
self.qconfig = qconfig
self.eps = eps
self.momentum = momentum
self.freeze_bn = freeze_bn if self.training else True
self.num_features = out_channels
self.gamma = nn.Parameter(torch.empty(out_channels))
self.beta = nn.Parameter(torch.empty(out_channels))
self.affine = True
self.track_running_stats = True
self.running_mean = nn.Buffer(torch.zeros(out_channels))
self.running_var = nn.Buffer(torch.ones(out_channels))
self.num_batches_tracked = nn.Buffer(torch.tensor(0, dtype=torch.long))
self.activation_post_process = self.qconfig.activation()
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = nn.Parameter(torch.empty(out_channels))
else:
self.register_parameter("bias", None)
self.reset_bn_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_bn_parameters(self):
self.reset_running_stats()
init.uniform_(self.gamma)
init.zeros_(self.beta)
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def reset_parameters(self):
super().reset_parameters()
# A hack to avoid resetting on undefined parameters
if hasattr(self, "gamma"):
self.reset_bn_parameters()
def update_bn_stats(self):
self.freeze_bn = False
return self
def freeze_bn_stats(self):
self.freeze_bn = True
return self
def _forward(self, input):
# exponential_average_factor is self.momentum set to
# (when it is available) only so that if gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and not self.freeze_bn and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
# we use running statistics from the previous batch, so this is an
# approximation of the approach mentioned in the whitepaper, but we only
# need to do one convolution in this case instead of two
running_std = torch.sqrt(self.running_var + self.eps)
scale_factor = self.gamma / running_std
scaled_weight = self.weight * scale_factor.reshape([-1, 1, 1, 1])
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
else:
zero_bias = torch.zeros(
self.out_channels, device=scaled_weight.device, dtype=input.dtype
)
conv = self._conv_forward(
input, self.weight_fake_quant(scaled_weight), zero_bias
)
if self.training and not self.freeze_bn:
# recovering original conv to get original batch_mean and batch_var
if self.bias is not None:
conv_orig = conv / scale_factor.reshape(
[1, -1, 1, 1]
) + self.bias.reshape([1, -1, 1, 1])
else:
conv_orig = conv / scale_factor.reshape([1, -1, 1, 1])
batch_mean = torch.mean(conv_orig, dim=[0, 2, 3])
batch_var = torch.var(conv_orig, dim=[0, 2, 3], unbiased=False)
n = float(conv_orig.numel() / conv_orig.size()[1])
unbiased_batch_var = batch_var * (n / (n - 1))
batch_rstd = torch.ones_like(
batch_var, memory_format=torch.contiguous_format
) / torch.sqrt(batch_var + self.eps)
conv = (self.gamma * batch_rstd).reshape([1, -1, 1, 1]) * conv_orig + (
self.beta - self.gamma * batch_rstd * batch_mean
).reshape([1, -1, 1, 1])
self.running_mean = (
exponential_average_factor * batch_mean.detach()
+ (1 - exponential_average_factor) * self.running_mean
)
self.running_var = (
exponential_average_factor * unbiased_batch_var.detach()
+ (1 - exponential_average_factor) * self.running_var
)
else:
if self.bias is None:
conv = conv + (
self.beta - self.gamma * self.running_mean / running_std
).reshape([1, -1, 1, 1])
else:
conv = conv + (
self.gamma * (self.bias - self.running_mean) / running_std
+ self.beta
).reshape([1, -1, 1, 1])
return conv
def extra_repr(self):
# TODO(jerryzh): extend
return super().extra_repr()
def forward(self, input):
return self.activation_post_process(self._forward(input))
@classmethod
def from_float(cls, mod, qconfig=None):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) is cls._FLOAT_MODULE, (
"qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
if not qconfig:
assert hasattr(mod, "qconfig"), (
"Input float module must have qconfig defined"
)
assert mod.qconfig, "Input float module must have a valid qconfig"
qconfig = mod.qconfig
conv, bn = mod[0], mod[1]
qat_convbn = cls(
conv.in_channels,
conv.out_channels,
conv.kernel_size,
conv.stride,
conv.padding,
conv.dilation,
conv.groups,
conv.bias is not None,
conv.padding_mode,
bn.eps,
bn.momentum,
False,
qconfig,
)
qat_convbn.weight = conv.weight
qat_convbn.bias = conv.bias
qat_convbn.gamma = bn.weight
qat_convbn.beta = bn.bias
qat_convbn.running_mean = bn.running_mean
qat_convbn.running_var = bn.running_var
qat_convbn.num_batches_tracked = bn.num_batches_tracked
return qat_convbn
| _ReferenceConvBnNd |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/repository_definition/repository_data.py | {
"start": 7428,
"end": 22442
} | class ____(RepositoryData):
"""Default implementation of RepositoryData used by the :py:func:`@repository <repository>` decorator."""
_all_jobs: Optional[Sequence[JobDefinition]]
_all_pipelines: Optional[Sequence[JobDefinition]]
def __init__(
self,
jobs: Mapping[str, Union[JobDefinition, Resolvable[JobDefinition]]],
schedules: Mapping[str, Union[ScheduleDefinition, Resolvable[ScheduleDefinition]]],
sensors: Mapping[str, Union[SensorDefinition, Resolvable[SensorDefinition]]],
source_assets_by_key: Mapping[AssetKey, SourceAsset],
assets_defs_by_key: Mapping[AssetKey, "AssetsDefinition"],
asset_checks_defs_by_key: Mapping[AssetCheckKey, "AssetsDefinition"],
top_level_resources: Mapping[str, ResourceDefinition],
utilized_env_vars: Mapping[str, AbstractSet[str]],
unresolved_partitioned_asset_schedules: Mapping[
str, "UnresolvedPartitionedAssetScheduleDefinition"
],
component_tree: Optional["ComponentTree"],
):
"""Constructs a new CachingRepositoryData object.
You may pass pipeline, job, and schedule definitions directly, or you may pass callables
with no arguments that will be invoked to lazily construct definitions when accessed by
name. This can be helpful for performance when there are many definitions in a repository,
or when constructing the definitions is costly.
Note that when lazily constructing a definition, the name of the definition must match its
key in its dictionary index, or a :py:class:`DagsterInvariantViolationError` will be thrown
at retrieval time.
Args:
jobs (Mapping[str, Union[JobDefinition, Callable[[], JobDefinition]]]):
The job definitions belonging to the repository.
schedules (Mapping[str, Union[ScheduleDefinition, Callable[[], ScheduleDefinition]]]):
The schedules belonging to the repository.
sensors (Mapping[str, Union[SensorDefinition, Callable[[], SensorDefinition]]]):
The sensors belonging to a repository.
source_assets_by_key (Mapping[AssetKey, SourceAsset]): The source assets belonging to a repository.
assets_defs_by_key (Mapping[AssetKey, AssetsDefinition]): The assets definitions
belonging to a repository.
asset_checks_defs_by_key (Mapping[AssetKey, AssetChecksDefinition]): The asset checks definitions
belonging to a repository.
top_level_resources (Mapping[str, ResourceDefinition]): A dict of top-level
resource keys to defintions, for resources which should be displayed in the UI.
"""
from dagster._core.definitions import AssetsDefinition
check.mapping_param(jobs, "jobs", key_type=str, value_type=(JobDefinition, FunctionType))
check.mapping_param(
schedules, "schedules", key_type=str, value_type=(ScheduleDefinition, FunctionType)
)
check.mapping_param(
sensors, "sensors", key_type=str, value_type=(SensorDefinition, FunctionType)
)
check.mapping_param(
source_assets_by_key, "source_assets_by_key", key_type=AssetKey, value_type=SourceAsset
)
check.mapping_param(
assets_defs_by_key, "assets_defs_by_key", key_type=AssetKey, value_type=AssetsDefinition
)
check.mapping_param(
asset_checks_defs_by_key,
"assets_checks_defs_by_key",
key_type=AssetCheckKey,
value_type=AssetsDefinition,
)
check.mapping_param(
top_level_resources, "top_level_resources", key_type=str, value_type=ResourceDefinition
)
check.mapping_param(
utilized_env_vars,
"utilized_resources",
key_type=str,
)
self._jobs = CacheingDefinitionIndex(
JobDefinition,
"JobDefinition",
"job",
jobs,
self._validate_job,
)
schedules = {
**schedules,
**{
name: self._resolve_partitioned_asset_schedule_lambda(
unresolved_partitioned_asset_schedule
)
for name, unresolved_partitioned_asset_schedule in unresolved_partitioned_asset_schedules.items()
},
}
self._schedules = CacheingDefinitionIndex(
ScheduleDefinition,
"ScheduleDefinition",
"schedule",
schedules,
self._validate_schedule,
)
# load all schedules to force validation
self._schedules.get_all_definitions()
self._source_assets_by_key = source_assets_by_key
self._assets_defs_by_key = assets_defs_by_key
self._assets_checks_defs_by_key = asset_checks_defs_by_key
self._top_level_resources = top_level_resources
self._utilized_env_vars = utilized_env_vars
self._component_tree = component_tree
self._sensors = CacheingDefinitionIndex(
SensorDefinition,
"SensorDefinition",
"sensor",
sensors,
self._validate_sensor,
)
# load all sensors to force validation
self._sensors.get_all_definitions()
self._all_jobs = None
def _resolve_partitioned_asset_schedule_lambda(
self, unresolved_partitioned_asset_schedule: "UnresolvedPartitionedAssetScheduleDefinition"
) -> Callable[[], ScheduleDefinition]:
def resolve_partitioned_asset_schedule() -> ScheduleDefinition:
job = self.get_job(unresolved_partitioned_asset_schedule.job.name)
return unresolved_partitioned_asset_schedule.resolve(job)
return resolve_partitioned_asset_schedule
@staticmethod
def from_dict(repository_definitions: dict[str, dict[str, Any]]) -> "CachingRepositoryData":
"""Static constructor.
Args:
repository_definition (Dict[str, Dict[str, ...]]): A dict of the form:
{
'jobs': Dict[str, Callable[[], JobDefinition]],
'schedules': Dict[str, Callable[[], ScheduleDefinition]]
}
This form is intended to allow definitions to be created lazily when accessed by name,
which can be helpful for performance when there are many definitions in a repository, or
when constructing the definitions is costly.
"""
from dagster._core.definitions.repository_definition.repository_data_builder import (
build_caching_repository_data_from_dict,
)
return build_caching_repository_data_from_dict(repository_definitions)
@classmethod
def from_list(
cls,
repository_definitions: Sequence[RepositoryElementDefinition],
default_executor_def: Optional[ExecutorDefinition] = None,
default_logger_defs: Optional[Mapping[str, LoggerDefinition]] = None,
top_level_resources: Optional[Mapping[str, ResourceDefinition]] = None,
component_tree: Optional["ComponentTree"] = None,
) -> "CachingRepositoryData":
"""Static constructor.
Args:
repository_definitions (List[Union[JobDefinition, ScheduleDefinition, SensorDefinition, GraphDefinition]]):
Use this constructor when you have no need to lazy load jobs or other definitions.
top_level_resources (Optional[Mapping[str, ResourceDefinition]]): A dict of top-level
resource keys to defintions, for resources which should be displayed in the UI.
"""
from dagster._core.definitions.repository_definition.repository_data_builder import (
build_caching_repository_data_from_list,
)
return build_caching_repository_data_from_list(
repository_definitions=repository_definitions,
default_executor_def=default_executor_def,
default_logger_defs=default_logger_defs,
top_level_resources=top_level_resources,
component_tree=component_tree,
)
def get_env_vars_by_top_level_resource(self) -> Mapping[str, AbstractSet[str]]:
return self._utilized_env_vars
def get_job_names(self) -> Sequence[str]:
"""Get the names of all jobs in the repository.
Returns:
List[str]
"""
return self._jobs.get_definition_names()
def has_job(self, job_name: str) -> bool:
"""Check if a job with a given name is present in the repository.
Args:
job_name (str): The name of the job.
Returns:
bool
"""
check.str_param(job_name, "job_name")
return self._jobs.has_definition(job_name)
def get_top_level_resources(self) -> Mapping[str, ResourceDefinition]:
return self._top_level_resources
def get_all_jobs(self) -> Sequence[JobDefinition]:
"""Return all jobs in the repository as a list.
Note that this will construct any job that has not yet been constructed.
Returns:
List[JobDefinition]: All jobs in the repository.
"""
if self._all_jobs is not None:
return self._all_jobs
self._all_jobs = self._jobs.get_all_definitions()
self._check_node_defs(self._all_jobs)
return self._all_jobs
def get_job(self, job_name: str) -> JobDefinition:
"""Get a job by name.
If this job has not yet been constructed, only this job is constructed, and will
be cached for future calls.
Args:
job_name (str): Name of the job to retrieve.
Returns:
JobDefinition: The job definition corresponding to the given name.
"""
check.str_param(job_name, "job_name")
return self._jobs.get_definition(job_name)
def get_schedule_names(self) -> Sequence[str]:
"""Get the names of all schedules in the repository.
Returns:
List[str]
"""
return self._schedules.get_definition_names()
def get_all_schedules(self) -> Sequence[ScheduleDefinition]:
"""Return all schedules in the repository as a list.
Note that this will construct any schedule that has not yet been constructed.
Returns:
List[ScheduleDefinition]: All schedules in the repository.
"""
return self._schedules.get_all_definitions()
def get_schedule(self, schedule_name: str) -> ScheduleDefinition:
"""Get a schedule by name.
if this schedule has not yet been constructed, only this schedule is constructed, and will
be cached for future calls.
Args:
schedule_name (str): name of the schedule to retrieve.
Returns:
ScheduleDefinition: The schedule definition corresponding to the given name.
"""
check.str_param(schedule_name, "schedule_name")
return self._schedules.get_definition(schedule_name)
def has_schedule(self, schedule_name: str) -> bool:
check.str_param(schedule_name, "schedule_name")
return self._schedules.has_definition(schedule_name)
def get_all_sensors(self) -> Sequence[SensorDefinition]:
return self._sensors.get_all_definitions()
def get_sensor_names(self) -> Sequence[str]:
return self._sensors.get_definition_names()
def get_sensor(self, sensor_name: str) -> SensorDefinition:
return self._sensors.get_definition(sensor_name)
def has_sensor(self, sensor_name: str) -> bool:
return self._sensors.has_definition(sensor_name)
def get_source_assets_by_key(self) -> Mapping[AssetKey, SourceAsset]:
return self._source_assets_by_key
def get_assets_defs_by_key(self) -> Mapping[AssetKey, "AssetsDefinition"]:
return self._assets_defs_by_key
def get_asset_checks_defs_by_key(self) -> Mapping[AssetCheckKey, "AssetChecksDefinition"]:
from dagster._core.definitions.asset_checks.asset_checks_definition import (
AssetChecksDefinition,
)
return {
key: (
ad
if isinstance(ad, AssetChecksDefinition)
# some of the items may be AssetsDefinition objects, but AssetChecksDefinition are
# expected, so convert if necessary
else AssetChecksDefinition(**ad.get_attributes_dict())
)
for key, ad in self._assets_checks_defs_by_key.items()
}
def get_component_tree(self) -> Optional["ComponentTree"]:
return self._component_tree
def _check_node_defs(self, job_defs: Sequence[JobDefinition]) -> None:
node_defs = {}
node_to_job = {}
for job_def in job_defs:
for node_def in [*job_def.all_node_defs, job_def.graph]:
# skip checks for subselected graphs because they don't have their own names
if isinstance(node_def, SubselectedGraphDefinition):
break
if node_def.name not in node_defs:
node_defs[node_def.name] = node_def
node_to_job[node_def.name] = job_def.name
if node_defs[node_def.name] is not node_def:
first_name, second_name = sorted([node_to_job[node_def.name], job_def.name])
raise DagsterInvalidDefinitionError(
f"Conflicting definitions found in repository with name '{node_def.name}'."
" Op/Graph definition names must be unique within a repository."
f" {node_def.__class__.__name__} is defined in"
f" job '{first_name}' and in"
f" job '{second_name}'."
)
def _validate_job(self, job: JobDefinition) -> JobDefinition:
return job
def _validate_schedule(self, schedule: ScheduleDefinition) -> ScheduleDefinition:
job_names = self.get_job_names()
if schedule.job_name not in job_names:
raise DagsterInvalidDefinitionError(
f'ScheduleDefinition "{schedule.name}" targets job "{schedule.job_name}" '
"which was not found in this repository."
)
return schedule
def _validate_sensor(self, sensor: SensorDefinition) -> SensorDefinition:
job_names = self.get_job_names()
if len(sensor.targets) == 0:
# skip validation when the sensor does not target a job
return sensor
for target in sensor.targets:
if target.job_name not in job_names:
raise DagsterInvalidDefinitionError(
f'SensorDefinition "{sensor.name}" targets job "{sensor.job_name}" '
"which was not found in this repository."
)
return sensor
| CachingRepositoryData |
python | getsentry__sentry | tests/sentry/mail/activity/test_release.py | {
"start": 1028,
"end": 12338
} | class ____(ActivityTestCase):
def setUp(self) -> None:
super().setUp()
self.user5_alt_email = "privateEmail@gmail.com"
self.org = self.create_organization(owner=None)
self.org.flags.allow_joinleave = False
self.org.save()
self.team = self.create_team(organization=self.org)
self.team2 = self.create_team(organization=self.org)
self.user1 = self.another_user("user1@example.com", self.team)
self.user2 = self.another_user("user2@example.com")
self.user3 = self.another_user("user3@example.com", self.team)
self.user4 = self.another_user("user4@example.com", self.team)
self.user5 = self.another_user("companyemail@example.com", self.team, self.user5_alt_email)
self.project = self.create_project(organization=self.org, teams=[self.team])
self.project2 = self.create_project(organization=self.org, teams=[self.team2])
self.environment = Environment.objects.create(
name="production", organization_id=self.org.id
)
self.release, self.deploy = self.another_release("a")
repository = Repository.objects.create(organization_id=self.org.id, name=self.project.name)
# The commits are intentionally out of order to test commit `order`.
self.commit4 = self.another_commit(3, "e", self.user5, repository, self.user5_alt_email)
self.commit1 = self.another_commit(0, "a", self.user1, repository)
self.commit2 = self.another_commit(1, "b", self.user2, repository)
self.commit3 = self.another_commit(2, "c", self.user4, repository)
with assume_test_silo_mode(SiloMode.CONTROL):
# added to make sure org default above takes precedent
NotificationSettingOption.objects.create(
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.org.id,
user_id=self.user3.id,
type=NotificationSettingEnum.DEPLOY.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
NotificationSettingProvider.objects.create(
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.org.id,
user_id=self.user3.id,
type=NotificationSettingEnum.DEPLOY.value,
provider=ExternalProviderEnum.EMAIL.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
NotificationSettingOption.objects.create(
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.org.id,
user_id=self.user4.id,
type=NotificationSettingEnum.DEPLOY.value,
value=NotificationSettingsOptionEnum.NEVER.value,
)
NotificationSettingProvider.objects.create(
scope_type=NotificationScopeEnum.ORGANIZATION.value,
scope_identifier=self.org.id,
user_id=self.user4.id,
type=NotificationSettingEnum.DEPLOY.value,
provider=ExternalProviderEnum.EMAIL.value,
value=NotificationSettingsOptionEnum.NEVER.value,
)
# added to make sure org default above takes precedent
NotificationSettingOption.objects.create(
scope_type=NotificationScopeEnum.USER.value,
scope_identifier=self.user4.id,
user_id=self.user4.id,
type=NotificationSettingEnum.DEPLOY.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
NotificationSettingProvider.objects.create(
scope_type=NotificationScopeEnum.USER.value,
scope_identifier=self.user4.id,
user_id=self.user4.id,
type=NotificationSettingEnum.DEPLOY.value,
provider=ExternalProviderEnum.EMAIL.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
def test_simple(self) -> None:
mail.outbox.clear()
email = ReleaseActivityNotification(
Activity(
project=self.project,
user_id=self.user1.id,
type=ActivityType.RELEASE.value,
data={"version": self.release.version, "deploy_id": self.deploy.id},
)
)
# user1 is included because they committed
# user2 committed but isn't in a team associated with the project.
# user3 is included because they oped into all deploy emails
# user4 committed but isn't included because they opted out of all deploy emails
# for that org -- also tests to make sure org overrides default preference
# user5 committed with another email address and is still included.
participants = (
email.get_participants_with_group_subscription_reason().get_participants_by_provider(
ExternalProviders.EMAIL
)
)
assert participants == {
(Actor.from_orm_user(self.user1), GroupSubscriptionReason.committed),
(Actor.from_orm_user(self.user3), GroupSubscriptionReason.deploy_setting),
(Actor.from_orm_user(self.user5), GroupSubscriptionReason.committed),
}
context = email.get_context()
assert context["environment"] == "production"
rpc_user_5 = user_service.get_user(user_id=self.user5.id)
assert rpc_user_5 is not None
assert context["repos"][0]["commits"] == [
(self.commit4, rpc_user_5.by_email(self.user5_alt_email)),
(self.commit3, user_service.get_user(user_id=self.user4.id)),
(self.commit2, user_service.get_user(user_id=self.user2.id)),
(self.commit1, user_service.get_user(user_id=self.user1.id)),
]
user_context = email.get_recipient_context(Actor.from_orm_user(self.user1), {})
# make sure this only includes projects user has access to
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 3
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {
self.user1.email,
self.user3.email,
self.user5.email,
}
def test_prevent_duplicate_projects(self) -> None:
email = ReleaseActivityNotification(
Activity(
project=self.project,
user_id=self.user1.id,
type=ActivityType.RELEASE.value,
data={"version": self.release.version, "deploy_id": self.deploy.id},
)
)
self.team3 = self.create_team(organization=self.org)
self.project.add_team(self.team3)
self.create_team_membership(user=self.user1, team=self.team3)
user_context = email.get_recipient_context(Actor.from_orm_user(self.user1), {})
# This project exists in multiple teams. Make sure we correctly de-dupe these and don't show
# the same project twice
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
def test_does_not_generate_on_no_release(self) -> None:
email = ReleaseActivityNotification(
Activity(
project=self.project,
user_id=self.user1.id,
type=ActivityType.RELEASE.value,
data={"version": "a", "deploy_id": 5},
)
)
assert email.release is None
def test_no_committers(self) -> None:
mail.outbox.clear()
Release.objects.all().delete()
release, deploy = self.another_release("b")
email = ReleaseActivityNotification(
Activity(
project=self.project,
user_id=self.user1.id,
type=ActivityType.RELEASE.value,
data={"version": release.version, "deploy_id": deploy.id},
)
)
# only user3 is included because they opted into all deploy emails
participants = (
email.get_participants_with_group_subscription_reason().get_participants_by_provider(
ExternalProviders.EMAIL
)
)
assert participants == {
(Actor.from_orm_user(self.user3), GroupSubscriptionReason.deploy_setting)
}
context = email.get_context()
assert context["environment"] == "production"
assert context["repos"] == []
user_context = email.get_recipient_context(Actor.from_orm_user(self.user1), {})
# make sure this only includes projects user has access to
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 1
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {self.user3.email}
def test_uses_default(self) -> None:
user6 = self.create_user()
self.create_member(user=user6, organization=self.org, teams=[self.team])
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
scope_type=NotificationScopeEnum.USER.value,
scope_identifier=user6.id,
user_id=user6.id,
type=NotificationSettingEnum.DEPLOY.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
release, deploy = self.another_release("b")
email = ReleaseActivityNotification(
Activity(
project=self.project,
user_id=self.user1.id,
type=ActivityType.RELEASE.value,
data={"version": release.version, "deploy_id": deploy.id},
)
)
mail.outbox.clear()
# user3 and user 6 are included because they oped into all deploy emails
# (one on an org level, one as their default)
participants = (
email.get_participants_with_group_subscription_reason().get_participants_by_provider(
ExternalProviders.EMAIL
)
)
assert len(participants) == 2
assert participants == {
(Actor.from_orm_user(user6), GroupSubscriptionReason.deploy_setting),
(Actor.from_orm_user(self.user3), GroupSubscriptionReason.deploy_setting),
}
context = email.get_context()
assert context["environment"] == "production"
assert context["repos"] == []
user_context = email.get_recipient_context(Actor.from_orm_user(user6), {})
# make sure this only includes projects user has access to
assert len(user_context["projects"]) == 1
assert user_context["projects"][0][0] == self.project
with self.tasks():
email.send()
assert len(mail.outbox) == 2
sent_email_addresses = {msg.to[0] for msg in mail.outbox}
assert sent_email_addresses == {self.user3.email, user6.email}
| ReleaseTestCase |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 26928,
"end": 33050
} | class ____(MaskedArraySetup):
@pytest.mark.parametrize("op", (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > "abc" # noqa: B015
@pytest.mark.parametrize("different_names", [False, True])
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype(
[(f"different_{name}", dt) for name, dt in msb.dtype.fields.items()]
)
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matmul_axes(self):
m1 = Masked(np.arange(27.0).reshape(3, 3, 3))
m2 = Masked(np.arange(-27.0, 0.0).reshape(3, 3, 3))
mxm1 = np.matmul(m1, m2)
exp = np.matmul(m1.unmasked, m2.unmasked)
assert_array_equal(mxm1.unmasked, exp)
assert_array_equal(mxm1.mask, False)
m1.mask[0, 1, 2] = True
m2.mask[0, 2, 0] = True
axes = [(0, 2), (-2, -1), (0, 1)]
mxm2 = np.matmul(m1, m2, axes=axes)
exp2 = np.matmul(m1.unmasked, m2.unmasked, axes=axes)
# Any unmasked result will have all elements contributing unity,
# while masked entries mean the total will be lower.
mask2 = (
np.matmul(
(~m1.mask).astype(int),
(~m2.mask).astype(int),
axes=axes,
)
!= m1.shape[axes[0][1]]
)
assert_array_equal(mxm2.unmasked, exp2)
assert_array_equal(mxm2.mask, mask2)
def test_matmul_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_matmul_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_matmul_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
@pytest.mark.skipif(
NUMPY_LT_2_3,
reason="np.matvec and np.vecmat are new in NumPy 2.3",
)
def test_matvec_vecmat(self):
vec = Masked(np.arange(3, like=self.a), [True, False, False])
mat_mask = np.zeros((3, 3), bool)
mat_mask[0, 0] = True
mat = Masked(
np.array([[1.0, -1.0, 2.0], [0.0, 3.0, -1.0], [-1.0, -1.0, 1.0]]),
mat_mask,
)
ref_matvec = (vec * mat).sum(-1)
res_matvec = np.matvec(mat, vec)
assert_masked_equal(res_matvec, ref_matvec)
ref_vecmat = (vec * mat.T).sum(-1)
res_vecmat = np.vecmat(vec, mat)
assert_masked_equal(res_vecmat, ref_vecmat)
| MaskedOperatorTests |
python | kamyu104__LeetCode-Solutions | Python/subtree-removal-game-with-fibonacci-tree.py | {
"start": 29,
"end": 1072
} | class ____(object):
def findGameWinner(self, n):
"""
:type n: int
:rtype: bool
"""
# a pattern appears every 6 grundy numbers in binary forms:
# 0000, (0000)01, (0000)11, ((0000)^(0000+1))10, (0000)11, (0000)11
# 0000, (0000+1)01, (0000+1)11, ((0000+1)^((0000+1)+1))10, (0000+1)11, (0000+1)11
# 0000, ((0000+1)+1)01, ((0000+1)+1)11, (((0000+1)+1)^(((0000+1)+1)+1))10, ((0000+1)+1)11, ((0000+1)+1)11
# ...
# 0000, (XXXX)01, (XXXX)11, ((XXXX)^(XXXX+1))10, (XXXX)11, (XXXX)11
# 0000, (XXXX+1)01, (XXXX+1)11, ((XXXX+1)^((XXXX+1)+1))10, (XXXX+1)11, (XXXX+1)11
# => grundy[6k+1] = 0
# grundy[6k+2] = 4k+1
# grundy[6k+3] = 4k+3
# grundy[6k+4] = 4(k^(k+1))+2
# grundy[6k+5] = 4k+3
# grundy[6k+6] = 4k+3
return n%6 != 1
# Time: O(n)
# Space: O(1)
| Solution |
python | ray-project__ray | release/nightly_tests/stress_tests/test_threaded_actors.py | {
"start": 445,
"end": 5216
} | class ____:
def __init__(self, metadata):
# -- Read only variables --
self.metadata = metadata
self.sample_batch = 1000000
# -- Variables that are accessed by mulitple threads --
self.lock = threading.Lock()
self.result_queue = Queue()
self.is_running = False
def ready(self):
pass
def run_compute(self):
self.is_running = True
sample_cnt = 0
while self.is_running:
# Compute pi
xs = np.random.uniform(low=-1.0, high=1.0, size=self.sample_batch)
ys = np.random.uniform(low=-1.0, high=1.0, size=self.sample_batch)
xys = np.stack((xs, ys), axis=-1)
inside = xs * xs + ys * ys <= 1.0
xys_inside = xys[inside]
in_circle = xys_inside.shape[0]
approx_pi = 4.0 * in_circle / self.sample_batch
# Put the result to the queue.
sample_cnt += self.sample_batch
with self.lock:
self.result_queue.put(PiResult(samples=sample_cnt, pi=approx_pi))
def stop(self):
self.is_running = False
def get_metadata(self):
return self.metadata
def get_pi(self):
result = None
while not result:
with self.lock:
if not self.result_queue.empty():
result = self.result_queue.get(block=False)
time.sleep(1)
return result
def start_actors(total_num_actors, num_nodes):
"""Create actors and run the computation loop."""
total_num_actors = int(total_num_actors)
actors_per_node = int(total_num_actors / num_nodes)
start = time.time()
nodes = []
# Place an actor per node in round-robin.
# It is added here to simulate the real user workload.
while len(nodes) < num_nodes:
nodes = [
next((r for r in n["Resources"] if "node" in r), None)
for n in ray.nodes()
if n["Alive"]
]
nodes = [n for n in nodes if n is not None]
pi_actors = [
PiCalculator.options(resources={n: 0.01}, max_concurrency=10).remote(
{"meta": 1}
)
for n in nodes
for _ in range(actors_per_node)
]
ray.get([actor.ready.remote() for actor in pi_actors])
print(f"Took {time.time() - start} to create {total_num_actors} actors")
# Start the computation loop.
for actor in pi_actors:
actor.run_compute.remote()
return pi_actors
def parse_script_args():
parser = argparse.ArgumentParser()
parser.add_argument("--kill-interval_s", type=float, default=60)
parser.add_argument("--test-runtime", type=float, default=3000)
return parser.parse_known_args()
def main():
"""The test simulates the workload with many threaded actors.
Test is doing 4 things for 1 hour.
- It first creates actors as many as num_cpus with max_concurrency=10
- Each actor computes pi and put the result to the queue.
- Driver keeps getting result & metadata from the actor.
- Every X seconds, it kills all actors and restarts them.
"""
ray.init(address="auto")
args, unknown = parse_script_args()
num_cpus = ray.cluster_resources()["CPU"]
num_nodes = sum(1 for n in ray.nodes() if n["Alive"])
print(f"Total number of actors: {num_cpus}, nodes: {num_nodes}")
monitor_actor = monitor_memory_usage()
start = time.time()
while time.time() - start < args.test_runtime:
# Step 1: Create actors and start computation loop.
print("Create actors.")
actors = start_actors(num_cpus, num_nodes)
# Step 2: Get the pi result from actors.
compute_start = time.time()
print("Start computation.")
while time.time() - compute_start < args.kill_interval_s:
# Get the metadata.
ray.get([actor.get_metadata.remote() for actor in actors])
# Get the result.
pb = ProgressBar("Computing Pi", num_cpus, "actor")
results = [actor.get_pi.remote() for actor in actors]
pb.fetch_until_complete(results)
pb.close()
# Step 3: Kill actors.
print("Kill all actors.")
for actor in actors:
ray.kill(actor)
# Report the result.
print("PASSED.")
used_gb, usage = ray.get(monitor_actor.get_peak_memory_info.remote())
print("Memory usage with failures.")
print(f"Peak memory usage: {round(used_gb, 2)}GB")
print(f"Peak memory usage per processes:\n {usage}")
# Report the result.
ray.get(monitor_actor.stop_run.remote())
result = {}
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(json.dumps(result))
if __name__ == "__main__":
main()
| PiCalculator |
python | walkccc__LeetCode | solutions/1235. Maximum Profit in Job Scheduling/1235-3.py | {
"start": 0,
"end": 647
} | class ____:
def jobScheduling(
self,
startTime: list[int],
endTime: list[int],
profit: list[int],
) -> int:
maxProfit = 0
jobs = sorted([(s, e, p) for s, e, p in zip(startTime, endTime, profit)])
minHeap = [] # (endTime, profit)
# Will use binary search to find the first available startTime
for i in range(len(startTime)):
startTime[i] = jobs[i][0]
for s, e, p in jobs:
while minHeap and s >= minHeap[0][0]:
maxProfit = max(maxProfit, heapq.heappop(minHeap)[1])
heapq.heappush(minHeap, (e, p + maxProfit))
return max(maxProfit, max(p for _, p in minHeap))
| Solution |
python | getsentry__sentry | tests/acceptance/test_trace_view_waterfall.py | {
"start": 401,
"end": 3542
} | class ____(AcceptanceTestCase, TraceTestCase, SnubaTestCase):
viewname = "sentry-api-0-organization-trace"
FEATURES = [
"organizations:visibility-explore-view",
"organizations:performance-view",
"organizations:trace-spans-format",
]
def setUp(self) -> None:
super().setUp()
self.snuba_eventstream = SnubaEventStream()
self.start = self.day_ago = before_now(days=1).replace(
hour=10, minute=0, second=0, microsecond=0
)
self.start_minus_two_minutes = self.start - timedelta(minutes=2)
self.organization = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.organization, name="Mariachi Band", members=[self.user]
)
self.project = self.create_project(
organization=self.organization, teams=[self.team], name="Bengal"
)
self.login_as(self.user)
self.page = TraceViewWaterfallPage(self.browser, self.client)
self.dismiss_assistant()
@patch("django.utils.timezone.now")
def test_trace_view_waterfall_loads(self, mock_now: MagicMock) -> None:
mock_now.return_value = self.start
assert (
self.browser.driver.get_window_size().get("width") == 1680
) # This test makes assertions based on the current default window size.
with self.feature(self.FEATURES):
self.create_event(
trace_id=self.trace_id,
transaction="root",
start_timestamp=self.start_minus_two_minutes,
spans=[
{
"same_process_as_parent": True,
"op": "http.server",
"description": f"GET gen1-{root_span_id}",
"span_id": root_span_id,
"trace_id": self.trace_id,
}
for i, root_span_id in enumerate(self.root_span_ids)
],
parent_span_id=None,
project_id=self.project.id,
milliseconds=3000,
is_eap=True,
)
# Visit the trace view and wait till waterfall loads
self.page.visit_trace_view(self.organization.slug, self.trace_id)
# Check root span row exists and has the correct text
root_span_row = self.page.get_trace_span_row("http.server", "root")
assert root_span_row is not None
normalized_text = self.page.normalize_span_row_text(root_span_row.text)
assert normalized_text == f"{len(self.root_span_ids)} http.server - root"
# Check child span rows exist and have the correct text
for span_id in self.root_span_ids:
span_row = self.page.get_trace_span_row("http.server", f"GET gen1-{span_id}")
assert span_row is not None
normalized_text = self.page.normalize_span_row_text(span_row.text)
assert normalized_text == f"http.server - GET gen1-{span_id}"
| TraceViewWaterfallTest |
python | pytorch__pytorch | torch/fx/experimental/unification/variable.py | {
"start": 223,
"end": 2057
} | class ____:
"""Logic Variable"""
_id = 1
def __new__(cls, *token):
if len(token) == 0:
token = f"_{Var._id}" # type: ignore[assignment]
Var._id += 1
elif len(token) == 1:
token = token[0]
obj = object.__new__(cls)
obj.token = token # type: ignore[attr-defined]
return obj
def __str__(self):
return "~" + str(self.token) # type: ignore[attr-defined]
__repr__ = __str__
def __eq__(self, other):
return type(self) is type(other) and self.token == other.token # type: ignore[attr-defined]
def __hash__(self):
return hash((type(self), self.token)) # type: ignore[attr-defined]
def var():
return lambda *args: Var(*args)
def vars():
return lambda n: [var() for i in range(n)]
@dispatch(Var)
def isvar(v):
return True
isvar
@dispatch(object) # type: ignore[no-redef]
def isvar(o):
return _glv and hashable(o) and o in _glv
@contextmanager
def variables(*variables):
"""
Context manager for logic variables
Example:
>>> # xdoctest: +SKIP("undefined vars")
>>> from __future__ import with_statement
>>> with variables(1):
... print(isvar(1))
True
>>> print(isvar(1))
False
>>> # Normal approach
>>> from unification import unify
>>> x = var("x")
>>> unify(x, 1)
{~x: 1}
>>> # Context Manager approach
>>> with variables("x"):
... print(unify("x", 1))
{'x': 1}
"""
old_global_logic_variables = _global_logic_variables.copy()
_global_logic_variables.update(set(variables))
try:
yield
finally:
_global_logic_variables.clear()
_global_logic_variables.update(old_global_logic_variables)
| Var |
python | ray-project__ray | rllib/algorithms/cql/cql.py | {
"start": 10152,
"end": 14430
} | class ____(SAC):
"""CQL (derived from SAC)."""
@classmethod
@override(SAC)
def get_default_config(cls) -> CQLConfig:
return CQLConfig()
@classmethod
@override(SAC)
def get_default_policy_class(
cls, config: AlgorithmConfig
) -> Optional[Type[Policy]]:
if config["framework"] == "torch":
return CQLTorchPolicy
else:
return CQLTFPolicy
@override(SAC)
def training_step(self) -> None:
# Old API stack (Policy, RolloutWorker, Connector).
if not self.config.enable_env_runner_and_connector_v2:
return self._training_step_old_api_stack()
# Sampling from offline data.
with self.metrics.log_time((TIMERS, OFFLINE_SAMPLING_TIMER)):
# If we should use an iterator in the learner(s). Note, in case of
# multiple learners we must always return a list of iterators.
return_iterator = return_iterator = (
self.config.num_learners > 0
or self.config.dataset_num_iters_per_learner != 1
)
# Return an iterator in case we are using remote learners.
batch_or_iterator = self.offline_data.sample(
num_samples=self.config.train_batch_size_per_learner,
num_shards=self.config.num_learners,
# Return an iterator, if a `Learner` should update
# multiple times per RLlib iteration.
return_iterator=return_iterator,
)
# Updating the policy.
with self.metrics.log_time((TIMERS, LEARNER_UPDATE_TIMER)):
learner_results = self.learner_group.update(
data_iterators=batch_or_iterator,
minibatch_size=self.config.train_batch_size_per_learner,
num_iters=self.config.dataset_num_iters_per_learner,
)
# Log training results.
self.metrics.aggregate(learner_results, key=LEARNER_RESULTS)
@OldAPIStack
def _training_step_old_api_stack(self) -> ResultDict:
# Collect SampleBatches from sample workers.
with self._timers[SAMPLE_TIMER]:
train_batch = synchronous_parallel_sample(worker_set=self.env_runner_group)
train_batch = train_batch.as_multi_agent()
self._counters[NUM_AGENT_STEPS_SAMPLED] += train_batch.agent_steps()
self._counters[NUM_ENV_STEPS_SAMPLED] += train_batch.env_steps()
# Postprocess batch before we learn on it.
post_fn = self.config.get("before_learn_on_batch") or (lambda b, *a: b)
train_batch = post_fn(train_batch, self.env_runner_group, self.config)
# Learn on training batch.
# Use simple optimizer (only for multi-agent or tf-eager; all other
# cases should use the multi-GPU optimizer, even if only using 1 GPU)
if self.config.get("simple_optimizer") is True:
train_results = train_one_step(self, train_batch)
else:
train_results = multi_gpu_train_one_step(self, train_batch)
# Update target network every `target_network_update_freq` training steps.
cur_ts = self._counters[
NUM_AGENT_STEPS_TRAINED
if self.config.count_steps_by == "agent_steps"
else NUM_ENV_STEPS_TRAINED
]
last_update = self._counters[LAST_TARGET_UPDATE_TS]
if cur_ts - last_update >= self.config.target_network_update_freq:
with self._timers[TARGET_NET_UPDATE_TIMER]:
to_update = self.env_runner.get_policies_to_train()
self.env_runner.foreach_policy_to_train(
lambda p, pid: pid in to_update and p.update_target()
)
self._counters[NUM_TARGET_UPDATES] += 1
self._counters[LAST_TARGET_UPDATE_TS] = cur_ts
# Update remote workers's weights after learning on local worker
# (only those policies that were actually trained).
if self.env_runner_group.num_remote_workers() > 0:
with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
self.env_runner_group.sync_weights(policies=list(train_results.keys()))
# Return all collected metrics for the iteration.
return train_results
| CQL |
python | PyCQA__pylint | tests/functional/a/arguments_differ.py | {
"start": 7261,
"end": 7398
} | class ____(BaseClass):
def method(self, arg, param1=42, *, param2=42):
print(arg, param1, param2)
| DerivedClassWithoutAnnotation |
python | getsentry__sentry | src/sentry/api/endpoints/api_token_details.py | {
"start": 998,
"end": 3370
} | class ____(Endpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
"DELETE": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.SECURITY
permission_classes = (SentryIsAuthenticated,)
@method_decorator(never_cache)
def get(self, request: Request, token_id: int) -> Response:
user_id = get_appropriate_user_id(request=request)
try:
instance = ApiToken.objects.get(id=token_id, application__isnull=True, user_id=user_id)
except ApiToken.DoesNotExist:
raise ResourceDoesNotExist(detail="Invalid token ID")
return Response(serialize(instance, request.user, include_token=False))
@method_decorator(never_cache)
def put(self, request: Request, token_id: int) -> Response:
keys = list(request.data.keys())
if any(key not in ALLOWED_FIELDS for key in keys):
return Response(
{"error": "Only auth token name can be edited after creation"}, status=403
)
serializer = ApiTokenNameSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.validated_data
user_id = get_appropriate_user_id(request=request)
try:
token_to_rename = ApiToken.objects.get(
id=token_id, application__isnull=True, user_id=user_id
)
except ApiToken.DoesNotExist:
raise ResourceDoesNotExist(detail="Invalid token ID")
token_to_rename.name = result.get("name")
token_to_rename.save()
return Response(serialize(token_to_rename, request.user, include_token=False), status=200)
@method_decorator(never_cache)
def delete(self, request: Request, token_id: int) -> Response:
user_id = get_appropriate_user_id(request=request)
try:
token_to_delete = ApiToken.objects.get(
id=token_id, application__isnull=True, user_id=user_id
)
except ApiToken.DoesNotExist:
raise ResourceDoesNotExist(detail="Invalid token ID")
token_to_delete.delete()
analytics.record(
ApiTokenDeleted(
user_id=user_id,
)
)
return Response(status=204)
| ApiTokenDetailsEndpoint |
python | realpython__materials | document-python-code-with-chatgpt/circle.py | {
"start": 81,
"end": 268
} | class ____:
def __init__(self, radius):
self.radius = radius
def calculate_area(self):
return round(math.pi * self.radius ** 2, 2)
"""
import math
# Output:
| Circle |
python | kamyu104__LeetCode-Solutions | Python/fair-distribution-of-cookies.py | {
"start": 63,
"end": 808
} | class ____(object):
def distributeCookies(self, cookies, k):
"""
:type cookies: List[int]
:type k: int
:rtype: int
"""
total = [0]*(1<<len(cookies))
for mask in xrange(1<<len(cookies)):
total[mask] = sum(cookies[i] for i in xrange(len(cookies)) if mask&(1<<i))
dp = [[float("inf")]*(1<<len(cookies)) for _ in xrange(2)]
dp[0][0] = 0
for i in xrange(k):
for mask in xrange(1<<len(cookies)):
submask = mask
while submask:
dp[(i+1)%2][mask] = min(dp[(i+1)%2][mask], max(total[submask], dp[i%2][mask^submask]))
submask = (submask-1)&mask
return dp[k%2][-1]
| Solution |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 115727,
"end": 123886
} | class ____(fixtures.TestBase):
"""test issues related to #8880, #8878, #8876"""
def test_straight_decl_usage(self, decl_base):
"""test use of assoc prox as the default descriptor for a
dataclasses.field.
"""
class User(decl_base):
__allow_unmapped__ = True
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
user_keyword_associations: Mapped[List[UserKeywordAssociation]] = (
relationship(
back_populates="user",
cascade="all, delete-orphan",
)
)
keywords: AssociationProxy[list[str]] = association_proxy(
"user_keyword_associations", "keyword"
)
UserKeywordAssociation, Keyword = self._keyword_mapping(
User, decl_base
)
self._assert_keyword_assoc_mapping(
User, UserKeywordAssociation, Keyword, init=True
)
@testing.variation("embed_in_field", [True, False])
@testing.combinations(
{},
{"repr": False},
{"repr": True},
{"kw_only": True},
{"init": False},
{"default_factory": True},
argnames="field_kw",
)
def test_dc_decl_usage(self, dc_decl_base, embed_in_field, field_kw):
"""test use of assoc prox as the default descriptor for a
dataclasses.field.
This exercises #8880
"""
if field_kw.pop("default_factory", False) and not embed_in_field:
has_default_factory = True
field_kw["default_factory"] = lambda: [
Keyword("l1"),
Keyword("l2"),
Keyword("l3"),
]
else:
has_default_factory = False
class User(dc_decl_base):
__allow_unmapped__ = True
__tablename__ = "user"
id: Mapped[int] = mapped_column(
primary_key=True, repr=True, init=False
)
user_keyword_associations: Mapped[List[UserKeywordAssociation]] = (
relationship(
back_populates="user",
cascade="all, delete-orphan",
init=False,
)
)
if embed_in_field:
# this is an incorrect form to use with
# MappedAsDataclass. However, we want to make sure it
# works as kind of a test to ensure we are being as well
# behaved as possible with an explicit dataclasses.field(),
# by testing that it uses its normal descriptor-as-default
# behavior
keywords: AssociationProxy[list[str]] = dataclasses.field(
default=association_proxy(
"user_keyword_associations", "keyword"
),
**field_kw,
)
else:
keywords: AssociationProxy[list[str]] = association_proxy(
"user_keyword_associations", "keyword", **field_kw
)
UserKeywordAssociation, Keyword = self._dc_keyword_mapping(
User, dc_decl_base
)
# simplify __qualname__ so we can test repr() more easily
User.__qualname__ = "mod.User"
UserKeywordAssociation.__qualname__ = "mod.UserKeywordAssociation"
Keyword.__qualname__ = "mod.Keyword"
init = field_kw.get("init", True)
u1 = self._assert_keyword_assoc_mapping(
User,
UserKeywordAssociation,
Keyword,
init=init,
has_default_factory=has_default_factory,
)
if field_kw.get("repr", True):
eq_(
repr(u1),
"mod.User(id=None, user_keyword_associations=["
"mod.UserKeywordAssociation(user_id=None, keyword_id=None, "
"keyword=mod.Keyword(id=None, keyword='k1'), user=...), "
"mod.UserKeywordAssociation(user_id=None, keyword_id=None, "
"keyword=mod.Keyword(id=None, keyword='k2'), user=...), "
"mod.UserKeywordAssociation(user_id=None, keyword_id=None, "
"keyword=mod.Keyword(id=None, keyword='k3'), user=...)], "
"keywords=[mod.Keyword(id=None, keyword='k1'), "
"mod.Keyword(id=None, keyword='k2'), "
"mod.Keyword(id=None, keyword='k3')])",
)
else:
eq_(
repr(u1),
"mod.User(id=None, user_keyword_associations=["
"mod.UserKeywordAssociation(user_id=None, keyword_id=None, "
"keyword=mod.Keyword(id=None, keyword='k1'), user=...), "
"mod.UserKeywordAssociation(user_id=None, keyword_id=None, "
"keyword=mod.Keyword(id=None, keyword='k2'), user=...), "
"mod.UserKeywordAssociation(user_id=None, keyword_id=None, "
"keyword=mod.Keyword(id=None, keyword='k3'), user=...)])",
)
def _assert_keyword_assoc_mapping(
self,
User,
UserKeywordAssociation,
Keyword,
*,
init,
has_default_factory=False,
):
if not init:
with expect_raises_message(
TypeError, r"got an unexpected keyword argument 'keywords'"
):
User(keywords=[Keyword("k1"), Keyword("k2"), Keyword("k3")])
if has_default_factory:
u1 = User()
eq_(u1.keywords, [Keyword("l1"), Keyword("l2"), Keyword("l3")])
eq_(
[ka.keyword.keyword for ka in u1.user_keyword_associations],
["l1", "l2", "l3"],
)
if init:
u1 = User(keywords=[Keyword("k1"), Keyword("k2"), Keyword("k3")])
else:
u1 = User()
u1.keywords = [Keyword("k1"), Keyword("k2"), Keyword("k3")]
eq_(u1.keywords, [Keyword("k1"), Keyword("k2"), Keyword("k3")])
eq_(
[ka.keyword.keyword for ka in u1.user_keyword_associations],
["k1", "k2", "k3"],
)
return u1
def _keyword_mapping(self, User, decl_base):
class UserKeywordAssociation(decl_base):
__tablename__ = "user_keyword"
user_id: Mapped[int] = mapped_column(
ForeignKey("user.id"), primary_key=True
)
keyword_id: Mapped[int] = mapped_column(
ForeignKey("keyword.id"), primary_key=True
)
user: Mapped[User] = relationship(
back_populates="user_keyword_associations",
)
keyword: Mapped[Keyword] = relationship()
def __init__(self, keyword=None, user=None):
self.user = user
self.keyword = keyword
class Keyword(ComparableMixin, decl_base):
__tablename__ = "keyword"
id: Mapped[int] = mapped_column(primary_key=True)
keyword: Mapped[str] = mapped_column()
def __init__(self, keyword):
self.keyword = keyword
return UserKeywordAssociation, Keyword
def _dc_keyword_mapping(self, User, dc_decl_base):
class UserKeywordAssociation(dc_decl_base):
__tablename__ = "user_keyword"
user_id: Mapped[int] = mapped_column(
ForeignKey("user.id"), primary_key=True, init=False
)
keyword_id: Mapped[int] = mapped_column(
ForeignKey("keyword.id"), primary_key=True, init=False
)
keyword: Mapped[Keyword] = relationship(default=None)
user: Mapped[User] = relationship(
back_populates="user_keyword_associations", default=None
)
class Keyword(dc_decl_base):
__tablename__ = "keyword"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
keyword: Mapped[str] = mapped_column(init=True)
return UserKeywordAssociation, Keyword
| DeclOrmForms |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataproc_metastore.py | {
"start": 2599,
"end": 3152
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataproc Metastore resource link."""
name = "Dataproc Metastore"
key = "conf"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
conf = self.get_config(operator, ti_key)
if not conf:
return ""
return conf["url"].format(
region=conf["region"],
service_id=conf["service_id"],
project_id=conf["project_id"],
)
| DataprocMetastoreLink |
python | plotly__plotly.py | plotly/graph_objs/waterfall/_increasing.py | {
"start": 233,
"end": 2458
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall"
_path_str = "waterfall.increasing"
_valid_props = {"marker"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.waterfall.increasing.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.waterfall.increasing.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.waterfall.increasing.Marke
r` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Increasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Increasing`
marker
:class:`plotly.graph_objects.waterfall.increasing.Marke
r` instance or dict with compatible properties
Returns
-------
Increasing
"""
super().__init__("increasing")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.Increasing
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Increasing`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Increasing |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 131777,
"end": 141481
} | class ____(stages.Executable):
__slots__ = [
"xla_executable", "_unsafe_call", "build_unsafe_call", "in_avals",
"out_avals", "_in_shardings", "_out_shardings", "_auto_spmd_lowering",
"_kept_var_idx", "_xla_in_layouts", "_dispatch_in_layouts",
"_xla_out_layouts", "_mut", "_all_args_info", "_unloaded_executable",
]
def __init__(self, xla_executable, build_unsafe_call, in_avals, out_avals,
in_shardings, out_shardings, auto_spmd_lowering, kept_var_idx,
xla_in_layouts, dispatch_in_layouts, xla_out_layouts, mut,
all_args_info: AllArgsInfo | None = None,
unloaded_executable=None):
self.xla_executable = xla_executable
self.build_unsafe_call = build_unsafe_call
# in_avals is a list of global and local avals. Aval is global if input
# is a GDA or jax.Array else local.
self.in_avals = in_avals # includes the const_args
self.out_avals = out_avals
self._unsafe_call = None
self._in_shardings = in_shardings
self._out_shardings = out_shardings
self._auto_spmd_lowering = auto_spmd_lowering
self._kept_var_idx = kept_var_idx
self._xla_in_layouts = xla_in_layouts
self._dispatch_in_layouts = dispatch_in_layouts
self._xla_out_layouts = xla_out_layouts
self._mut = mut
self._all_args_info = all_args_info
self._unloaded_executable = unloaded_executable
@property
def unsafe_call(self) -> Callable[..., Any]:
if self._unsafe_call is None:
self._unsafe_call = self.build_unsafe_call()
return self._unsafe_call # type: ignore
# -- stages.Executable overrides
def xla_extension_executable(self):
return self.xla_executable
def call(self, *args):
args_after_dce = [a for i, a in enumerate(args) if i in self._kept_var_idx]
if (self._all_args_info is not None and
self._all_args_info.debug_info.arg_names is not None):
arg_names_after_dce = [
n for i, n in enumerate(self._all_args_info.debug_info.arg_names)
if i in self._kept_var_idx]
else:
arg_names_after_dce = ("",) * len(args_after_dce)
if self._all_args_info is not None:
# We check all args before DCE
check_arg_avals_for_call(self._all_args_info.in_avals,
map(core.shaped_abstractify, args),
self._all_args_info.debug_info)
else:
# We can only check the args after DCE
check_arg_avals_for_call(self.in_avals,
map(core.shaped_abstractify, args_after_dce),
core.DebugInfo("MeshExecutable", "<unknown>",
arg_names_after_dce, None))
if not self._mut:
check_array_xla_sharding_layout_match(
args_after_dce, self._in_shardings, self._xla_in_layouts,
arg_names_after_dce)
else:
args_after_dce = [*args_after_dce, *self._mut.in_mut]
arg_names_after_dce += (("",) * len(self._mut.in_mut))
check_array_xla_sharding_layout_match(
args_after_dce, self._in_shardings, self._xla_in_layouts,
arg_names_after_dce)
return self.unsafe_call(*args) # pylint: disable=not-callable
def create_cpp_call(self, params: stages.CompiledCallParams):
if not (isinstance(self.unsafe_call, ExecuteReplicated) and
not self.unsafe_call.has_unordered_effects and
not self.unsafe_call.has_host_callbacks):
return None
def aot_cache_miss(*args, **kwargs):
# args do not include the const args.
# See https://docs.jax.dev/en/latest/internals/constants.html.
outs, out_flat, args_flat = stages.Compiled.call(params, *args, **kwargs)
if not params.is_high:
out_flat, out_tree_dispatch = reflatten_outputs_for_dispatch(
params.out_tree, out_flat)
use_fastpath = (all(isinstance(x, xc.ArrayImpl) for x in out_flat)
and not self._mut)
else:
use_fastpath = False
if use_fastpath:
out_avals = [o.aval for o in out_flat]
out_committed = [o._committed for o in out_flat]
kept_var_bitvec = [i in self._kept_var_idx
for i in range(len(params.const_args) + len(args_flat))]
in_shardings = [
sharding_impls.physical_sharding(a, s)
if a is not core.abstract_token and dtypes.issubdtype(a.dtype, dtypes.extended)
else s
for s, a in zip(self._in_shardings, self.in_avals)
]
fastpath_data = MeshExecutableFastpathData(
self.xla_executable, out_tree_dispatch, in_shardings,
self._out_shardings, out_avals, out_committed, kept_var_bitvec,
self._dispatch_in_layouts, params.const_args)
else:
fastpath_data = None
return outs, fastpath_data, False # Do not remove cache entry
return xc._xla.pjit(
self.unsafe_call.name, None, aot_cache_miss, [], [],
JitGlobalCppCacheKeys(), tree_util.dispatch_registry, cc_shard_arg)
def cc_shard_arg(x, sharding, layout):
return shard_args([sharding], [layout], [xc.ArrayCopySemantics.REUSE_INPUT],
[x])[0]
def check_arg_avals_for_call(ref_avals, arg_avals,
jaxpr_debug_info: core.DebugInfo):
if len(ref_avals) != len(arg_avals):
raise TypeError(
f"Computation compiled for {len(ref_avals)} inputs "
f"but called with {len(arg_avals)}")
arg_names = [f"'{name}'" for name in jaxpr_debug_info.safe_arg_names(len(ref_avals))]
errors = []
for ref_aval, arg_aval, name in safe_zip(ref_avals, arg_avals, arg_names):
# Don't compare shardings of avals because you can lower with
# numpy arrays + in_shardings and call compiled executable with
# sharded arrays. We also have sharding checks downstream.
if (ref_aval.shape, ref_aval.dtype) != (arg_aval.shape, arg_aval.dtype):
errors.append(
f"Argument {name} compiled with {ref_aval.str_short()} and called "
f"with {arg_aval.str_short()}")
if errors:
max_num_errors = 5
str_errors = "\n".join(errors[:max_num_errors])
if len(errors) >= max_num_errors:
num_mismatch_str = f"The first {max_num_errors} of {len(errors)}"
else:
num_mismatch_str = "The"
raise TypeError(
"Argument types differ from the types for which this computation was "
"compiled. Perhaps you are calling the compiled executable with a "
"different enable_x64 mode than when it was AOT compiled? "
f"{num_mismatch_str} mismatches are:\n{str_errors}")
def _get_metadata_jit_pmap(local_devices, num_in_shardings, num_out_shardings):
# Create replicated shardings for jit(pmap) path with local devices
# because multihost jit(pmap) is not allowed.
gs = sharding_impls.GSPMDSharding.get_replicated(local_devices)
in_shardings = [gs] * num_in_shardings
out_shardings = [gs] * num_out_shardings
# jit(pmap) will generate Arrays with multi-device sharding.
# It is unsupported for these shardings to be uncommitted, so force
# the outputs to be committed.
committed = True
return (in_shardings, out_shardings, committed,
_create_device_list(tuple(local_devices)))
def check_device_backend_on_shardings(shardings) -> bool:
for i in shardings:
if isinstance(i, (UnspecifiedValue, AUTO)):
continue
if getattr(i, '_device_backend', False):
return True
return False
def check_array_xla_sharding_layout_match(
args,
in_shardings: Sequence[JSharding],
in_layouts: Sequence[Layout],
arg_names: Sequence[str]
) -> None:
errors = []
num_errors = 5
for arg, xs, xl, name in zip(args, in_shardings, in_layouts, arg_names):
if not isinstance(arg, array.ArrayImpl):
continue
if isinstance(xs, (UnspecifiedValue, AUTO)):
continue
db_xs = check_device_backend_on_shardings([xs])
if (not db_xs and arg._committed and
not arg.sharding.is_equivalent_to(xs, arg.ndim)):
errors.append((
f"Argument {name} with shape {arg.aval.str_short()}:\n"
f" Passed sharding: {arg.sharding}\n"
f" Required sharding: {xs}",
"sharding"))
if (not db_xs and arg._committed and
arg.format.layout is not None and xl is not None and
arg.format.layout != xl):
errors.append((
f"Argument {name} with shape {arg.aval.str_short()}:\n"
f" Passed layout: {arg.format.layout}\n"
f" Required layout: {xl}",
"layout"))
if errors:
first_errors, error_kinds = unzip2(errors[:num_errors])
str_errors = '\n'.join(first_errors)
if all(k == 'sharding' for k in error_kinds):
kind_str = r'shardings'
elif all(k == 'layout' for k in error_kinds):
kind_str = 'layouts'
else:
kind_str = 'shardings and layouts'
num_mismatch_str = (
f"the {len(errors)} mismatches" if len(errors) < num_errors else
f"{num_errors} mismatches out of {len(errors)}")
raise ValueError(
f"Computation was compiled for input {kind_str} that disagree with the "
f"{kind_str} of arguments passed to it. "
f"Here are {num_mismatch_str}:\n{str_errors}")
def batch_spec(spec, dim, val):
too_short = dim - len(spec)
if too_short > 0:
spec += (None,) * too_short
new_partitions = tuple_insert(spec, dim, val) # type: ignore
return PartitionSpec(*new_partitions)
def get_array_mapping(pspec: PartitionSpec) -> ArrayMappingOrAutoOrUnspecified:
pspec = sharding_impls.prepare_axis_resources(pspec, "pspec to array_mapping")
return _get_array_mapping(pspec)
| MeshExecutable |
python | spack__spack | lib/spack/spack/test/util/package_hash.py | {
"start": 7446,
"end": 9074
} | class ____:
for variant in ["+foo", "+bar", "+baz"]:
conflicts("quux" + variant)
for variant in ["+foo", "+bar", "+baz"]:
# logic in the loop prevents our dumb analyzer from having it removed. This
# is uncommon so we don't (yet?) implement logic to detect that spec is unused.
print("oops can't remove this.")
conflicts("quux" + variant)
# Hard to make a while loop that makes sense, so ignore the infinite loop here.
# Likely nobody uses while instead of for, but we test it just in case.
while x <= 10:
depends_on("garply@%d.0" % x)
# all of these should go away, as they only contain directives
with when("@10.0"):
depends_on("foo")
with when("+bar"):
depends_on("bar")
with when("+baz"):
depends_on("baz")
# this whole statement should disappear
if sys.platform == "linux":
conflicts("baz@9.0")
# the else block here should disappear
if sys.platform == "linux":
print("foo")
else:
conflicts("foo@9.0")
# both blocks of this statement should disappear
if sys.platform == "darwin":
conflicts("baz@10.0")
else:
conflicts("bar@10.0")
# This one is complicated as the body goes away but the else block doesn't.
# Again, this could be optimized, but we're just testing removal logic here.
if sys.platform() == "darwin":
conflicts("baz@10.0")
else:
print("oops can't remove this.")
conflicts("bar@10.0")
"""
complex_package_logic_filtered = """\
| ComplexPackageLogic |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 63945,
"end": 64642
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(self, name: str, destination_path: str):
"""Airbyte Destination for Csv.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/csv
Args:
name (str): The name of the destination.
destination_path (str): Path to the directory where csv files will be written. The destination uses the local mount "/local" and any data files will be placed inside that local mount. For more information check out our docs
"""
self.destination_path = check.str_param(destination_path, "destination_path")
super().__init__("Csv", name)
| CsvDestination |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_no_member_7631.py | {
"start": 250,
"end": 343
} | class ____(Parent):
attr = 2
def __init__(self):
self.attr = self.attr | 4
| Child |
python | lazyprogrammer__machine_learning_examples | unsupervised_class2/vanishing.py | {
"start": 960,
"end": 3936
} | class ____(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, Y, learning_rate=0.01, mu=0.99, epochs=30, batch_sz=100):
# cast to float32
learning_rate = np.float32(learning_rate)
mu = np.float32(mu)
N, D = X.shape
K = len(set(Y))
self.hidden_layers = []
mi = D
for mo in self.hidden_layer_sizes:
h = HiddenLayer(mi, mo)
self.hidden_layers.append(h)
mi = mo
# initialize logistic regression layer
W = init_weights((mo, K))
b = np.zeros(K, dtype=np.float32)
self.W = theano.shared(W)
self.b = theano.shared(b)
self.params = [self.W, self.b]
self.allWs = []
for h in self.hidden_layers:
self.params += h.params
self.allWs.append(h.W)
self.allWs.append(self.W)
X_in = T.matrix('X_in')
targets = T.ivector('Targets')
pY = self.forward(X_in)
cost = -T.mean( T.log(pY[T.arange(pY.shape[0]), targets]) )
prediction = self.predict(X_in)
updates = momentum_updates(cost, self.params, mu, learning_rate)
train_op = theano.function(
inputs=[X_in, targets],
outputs=[cost, prediction],
updates=updates,
)
n_batches = N // batch_sz
costs = []
lastWs = [W.get_value() for W in self.allWs]
W_changes = []
print("supervised training...")
for i in range(epochs):
print("epoch:", i)
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz + batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz + batch_sz)]
c, p = train_op(Xbatch, Ybatch)
if j % 100 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", c, "error:", error_rate(p, Ybatch))
costs.append(c)
# log changes in all Ws
W_change = [np.abs(W.get_value() - lastW).mean() for W, lastW in zip(self.allWs, lastWs)]
W_changes.append(W_change)
lastWs = [W.get_value() for W in self.allWs]
W_changes = np.array(W_changes)
plt.subplot(2,1,1)
for i in range(W_changes.shape[1]):
plt.plot(W_changes[:,i], label='layer %s' % i)
plt.legend()
# plt.show()
plt.subplot(2,1,2)
plt.plot(costs)
plt.show()
def predict(self, X):
return T.argmax(self.forward(X), axis=1)
def forward(self, X):
Z = X
for h in self.hidden_layers:
Z = h.forward(Z)
Y = T.nnet.softmax(Z.dot(self.W) + self.b)
return Y
def main():
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
dnn = ANN([1000, 750, 500])
dnn.fit(Xtrain, Ytrain)
if __name__ == '__main__':
main()
| ANN |
python | walkccc__LeetCode | solutions/2053. Kth Distinct String in an Array/2053.py | {
"start": 0,
"end": 220
} | class ____:
def kthDistinct(self, arr: list[str], k: int) -> str:
count = collections.Counter(arr)
for a in arr:
if count[a] == 1:
k -= 1
if k == 0:
return a
return ''
| Solution |
python | django__django | tests/cache/tests.py | {
"start": 46129,
"end": 50657
} | class ____(BaseCacheTests, TransactionTestCase):
available_apps = ["cache"]
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
self.addCleanup(self.drop_table)
def create_table(self):
management.call_command("createcachetable", verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name("test cache table")
cursor.execute("DROP TABLE %s" % table_name)
def test_get_many_num_queries(self):
cache.set_many({"a": 1, "b": 2})
cache.set("expired", "expired", 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(["a", "b"]), {"a": 1, "b": 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(["a", "b", "expired"]), {"a": 1, "b": 2})
def test_delete_many_num_queries(self):
cache.set_many({"a": 1, "b": 2, "c": 3})
with self.assertNumQueries(1):
cache.delete_many(["a", "b", "c"])
def test_cull_queries(self):
old_max_entries = cache._max_entries
# Force _cull to delete on first cached record.
cache._max_entries = -1
with CaptureQueriesContext(connection) as captured_queries:
try:
cache.set("force_cull", "value", 1000)
finally:
cache._max_entries = old_max_entries
num_count_queries = sum("COUNT" in query["sql"] for query in captured_queries)
self.assertEqual(num_count_queries, 1)
# Column names are quoted.
for query in captured_queries:
sql = query["sql"]
if "expires" in sql:
self.assertIn(connection.ops.quote_name("expires"), sql)
if "cache_key" in sql:
self.assertIn(connection.ops.quote_name("cache_key"), sql)
def test_delete_cursor_rowcount(self):
"""
The rowcount attribute should not be checked on a closed cursor.
"""
class MockedCursorWrapper(CursorWrapper):
is_closed = False
def close(self):
self.cursor.close()
self.is_closed = True
@property
def rowcount(self):
if self.is_closed:
raise Exception("Cursor is closed.")
return self.cursor.rowcount
cache.set_many({"a": 1, "b": 2})
with mock.patch("django.db.backends.utils.CursorWrapper", MockedCursorWrapper):
self.assertIs(cache.delete("a"), True)
def test_zero_cull(self):
self._perform_cull_test("zero_cull", 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command("createcachetable", stdout=out)
self.assertEqual(
out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES),
)
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.db.DatabaseCache",
# Use another table name to avoid the 'table already exists'
# message.
LOCATION="createcachetable_dry_run_mode",
)
)
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command("createcachetable", dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
"createcachetable",
"test cache table",
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
def test_has_key_query_columns_quoted(self):
with CaptureQueriesContext(connection) as captured_queries:
cache.has_key("key")
self.assertEqual(len(captured_queries), 1)
sql = captured_queries[0]["sql"]
# Column names are quoted.
self.assertIn(connection.ops.quote_name("expires"), sql)
self.assertIn(connection.ops.quote_name("cache_key"), sql)
@override_settings(USE_TZ=True)
| DBCacheTests |
python | charliermarsh__ruff | scripts/ty_benchmark/src/benchmark/__init__.py | {
"start": 476,
"end": 2331
} | class ____(NamedTuple):
name: str
"""The benchmark to run."""
commands: list[Command]
"""The commands to benchmark."""
warmup: int
"""The number of warmup runs to perform."""
min_runs: int
"""The minimum number of runs to perform."""
verbose: bool
"""Whether to print verbose output."""
json: bool
"""Whether to export results to JSON."""
def run(self, *, cwd: Path | None = None, env: Mapping[str, str]) -> None:
"""Run the benchmark using `hyperfine`."""
args = [
"hyperfine",
# Ignore any warning/error diagnostics but fail if there are any fatal errors, incorrect configuration, etc.
# mypy exit codes: https://github.com/python/mypy/issues/14615#issuecomment-1420163253
# pyright exit codes: https://docs.basedpyright.com/v1.31.6/configuration/command-line/#pyright-exit-codes
# pyrefly exit codes: Not documented
# ty: https://docs.astral.sh/ty/reference/exit-codes/
"--ignore-failure=1",
]
# Export to JSON.
if self.json:
args.extend(["--export-json", f"{self.name}.json"])
# Preamble: benchmark-wide setup.
if self.verbose:
args.append("--show-output")
args.extend(["--warmup", str(self.warmup), "--min-runs", str(self.min_runs)])
# Add all command names,
for command in self.commands:
args.extend(["--command-name", command.name])
# Add all prepare statements.
for command in self.commands:
args.extend(["--prepare", command.prepare or ""])
# Add all commands.
for command in self.commands:
args.append(shlex.join(command.command))
logging.info(f"Running {args}")
subprocess.run(args, cwd=cwd, env=env)
| Hyperfine |
python | getsentry__sentry | tests/snuba/metrics/test_units.py | {
"start": 165,
"end": 1826
} | class ____(TestCase):
def test_format_value_using_unit(self) -> None:
assert format_value_using_unit(543200, "nanosecond") == "0.54 ms"
assert format_value_using_unit(54320, "microsecond") == "54.32 ms"
assert format_value_using_unit(123456, "millisecond") == "2.06 m"
assert format_value_using_unit(1, "second") == "1 s"
assert format_value_using_unit(0.55823414213, "second") == "558.23 ms"
assert format_value_using_unit(45, "minute") == "45 m"
assert format_value_using_unit(24, "hour") == "1 d"
assert format_value_using_unit(3, "day") == "3 d"
assert format_value_using_unit(1, "week") == "1 wk"
assert format_value_using_unit(600, "byte") == "600 B"
assert format_value_using_unit(2048, "kibibyte") == "1.95 MB"
assert format_value_using_unit(3072, "mebibyte") == "2.86 GB"
assert format_value_using_unit(3072, "gibibyte") == "2.79 TB"
assert format_value_using_unit(4096, "tebibyte") == "3.64 PB"
assert format_value_using_unit(51, "pebibyte") == "45.30 PB"
assert format_value_using_unit(1, "exbibyte") == "888.18 PB"
assert format_value_using_unit(4096, "kilobyte") == "4.00 MB"
assert format_value_using_unit(3145728, "megabyte") == "3.00 TB"
assert format_value_using_unit(3072, "megabyte") == "3.00 GB"
assert format_value_using_unit(4096, "gigabyte") == "4.00 TB"
assert format_value_using_unit(5120, "terabyte") == "5.00 PB"
assert format_value_using_unit(6144, "petabyte") == "6.00 EB"
assert format_value_using_unit(7168, "exabyte") == "7.00 ZB"
| TestUnitsUtils |
python | ethereum__web3.py | web3/_utils/encoding.py | {
"start": 8739,
"end": 9563
} | class ____(json.JSONEncoder):
def default(self, obj: Any) -> dict[Any, Any] | HexStr:
if isinstance(obj, AttributeDict):
return obj.__dict__
elif isinstance(obj, (HexBytes, bytes)):
return to_hex(obj)
elif isinstance(obj, BaseModel):
# TODO: For now we can assume all BaseModel objects behave this way, but
# internally we will start to use the CamelModel from eth-utils. Perhaps
# we should check for that type instead.
return obj.model_dump(by_alias=True)
return json.JSONEncoder.default(self, obj)
def to_json(obj: dict[Any, Any]) -> str:
"""
Convert a complex object (like a transaction object) to a JSON string
"""
return FriendlyJsonSerde().json_encode(obj, cls=Web3JsonEncoder)
| Web3JsonEncoder |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 5782,
"end": 6068
} | class ____:
params = [[10**3, 10**4, 10**5], ["int", "uint", "float", "object"]]
param_names = ["N", "dtype"]
def setup(self, N, dtype):
self.s = Series(np.random.randint(0, N, size=10 * N)).astype(dtype)
def time_mode(self, N, dtype):
self.s.mode()
| Mode |
python | pytorch__pytorch | test/inductor/test_smoke.py | {
"start": 637,
"end": 1836
} | class ____(TestCase):
@unittest.skipIf(not HAS_GPU, "Triton is not available")
def test_mlp(self):
torch._logging.set_logs(
dynamo=logging.DEBUG, inductor=logging.DEBUG, aot=logging.DEBUG
)
mlp = torch.compile(MLP().to(GPU_TYPE))
for _ in range(3):
mlp(torch.randn(1, device=GPU_TYPE))
# set back to defaults
torch._logging.set_logs()
@unittest.skipIf(not HAS_GPU, "Triton is not available")
def test_compile_decorator(self):
@torch.compile
def foo(x):
return torch.sin(x) + x.min()
@torch.compile(mode="reduce-overhead")
def bar(x):
return x * x
for _ in range(3):
foo(torch.full((3, 4), 0.7, device=GPU_TYPE))
bar(torch.rand((2, 2), device=GPU_TYPE))
def test_compile_invalid_options(self):
with self.assertRaises(RuntimeError):
torch.compile(_test_f, mode="ha")
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if IS_LINUX and HAS_GPU:
if (not HAS_CUDA_AND_TRITON) or torch.cuda.get_device_properties(0).major <= 5:
run_tests()
| SmokeTest |
python | sympy__sympy | sympy/physics/quantum/hilbert.py | {
"start": 970,
"end": 2856
} | class ____(Basic):
"""An abstract Hilbert space for quantum mechanics.
In short, a Hilbert space is an abstract vector space that is complete
with inner products defined [1]_.
Examples
========
>>> from sympy.physics.quantum.hilbert import HilbertSpace
>>> hs = HilbertSpace()
>>> hs
H
References
==========
.. [1] https://en.wikipedia.org/wiki/Hilbert_space
"""
def __new__(cls):
obj = Basic.__new__(cls)
return obj
@property
def dimension(self):
"""Return the Hilbert dimension of the space."""
raise NotImplementedError('This Hilbert space has no dimension.')
def __add__(self, other):
return DirectSumHilbertSpace(self, other)
def __radd__(self, other):
return DirectSumHilbertSpace(other, self)
def __mul__(self, other):
return TensorProductHilbertSpace(self, other)
def __rmul__(self, other):
return TensorProductHilbertSpace(other, self)
def __pow__(self, other, mod=None):
if mod is not None:
raise ValueError('The third argument to __pow__ is not supported \
for Hilbert spaces.')
return TensorPowerHilbertSpace(self, other)
def __contains__(self, other):
"""Is the operator or state in this Hilbert space.
This is checked by comparing the classes of the Hilbert spaces, not
the instances. This is to allow Hilbert Spaces with symbolic
dimensions.
"""
if other.hilbert_space.__class__ == self.__class__:
return True
else:
return False
def _sympystr(self, printer, *args):
return 'H'
def _pretty(self, printer, *args):
ustr = '\N{LATIN CAPITAL LETTER H}'
return prettyForm(ustr)
def _latex(self, printer, *args):
return r'\mathcal{H}'
| HilbertSpace |
python | django__django | django/db/migrations/serializer.py | {
"start": 7733,
"end": 8286
} | class ____(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
| IterableSerializer |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/objects.py | {
"start": 2203,
"end": 2591
} | class ____(Enum):
# An error that occurs while executing framework code
FRAMEWORK_ERROR = "FRAMEWORK_ERROR"
# An error that occurs while executing user code
USER_CODE_ERROR = "USER_CODE_ERROR"
# An error occurred at an unexpected time
UNEXPECTED_ERROR = "UNEXPECTED_ERROR"
# Execution was interrupted
INTERRUPT = "INTERRUPT"
@whitelist_for_serdes
| ErrorSource |
python | ray-project__ray | python/ray/util/multiprocessing/pool.py | {
"start": 5276,
"end": 12770
} | class ____(threading.Thread):
"""Thread that collects results from distributed actors.
It winds down when either:
- A pre-specified number of objects has been processed
- When the END_SENTINEL (submitted through self.add_object_ref())
has been received and all objects received before that have been
processed.
Initialize the thread with total_object_refs = float('inf') to wait for the
END_SENTINEL.
Args:
object_refs (List[RayActorObjectRefs]): ObjectRefs to Ray Actor calls.
Thread tracks whether they are ready. More ObjectRefs may be added
with add_object_ref (or _add_object_ref internally) until the object
count reaches total_object_refs.
single_result: Should be True if the thread is managing function
with a single result (like apply_async). False if the thread is managing
a function with a List of results.
callback: called only once at the end of the thread
if no results were errors. If single_result=True, and result is
not an error, callback is invoked with the result as the only
argument. If single_result=False, callback is invoked with
a list of all the results as the only argument.
error_callback: called only once on the first result
that errors. Should take an Exception as the only argument.
If no result errors, this callback is not called.
total_object_refs: Number of ObjectRefs that this thread
expects to be ready. May be more than len(object_refs) since
more ObjectRefs can be submitted after the thread starts.
If None, defaults to len(object_refs). If float("inf"), thread runs
until END_SENTINEL (submitted through self.add_object_ref())
has been received and all objects received before that have
been processed.
"""
END_SENTINEL = None
def __init__(
self,
object_refs: list,
single_result: bool = False,
callback: callable = None,
error_callback: callable = None,
total_object_refs: Optional[int] = None,
):
threading.Thread.__init__(self, daemon=True)
self._got_error = False
self._object_refs = []
self._num_ready = 0
self._results = []
self._ready_index_queue = queue.Queue()
self._single_result = single_result
self._callback = callback
self._error_callback = error_callback
self._total_object_refs = total_object_refs or len(object_refs)
self._indices = {}
# Thread-safe queue used to add ObjectRefs to fetch after creating
# this thread (used to lazily submit for imap and imap_unordered).
self._new_object_refs = queue.Queue()
for object_ref in object_refs:
self._add_object_ref(object_ref)
def _add_object_ref(self, object_ref):
self._indices[object_ref] = len(self._object_refs)
self._object_refs.append(object_ref)
self._results.append(None)
def add_object_ref(self, object_ref):
self._new_object_refs.put(object_ref)
def run(self):
unready = copy.copy(self._object_refs)
aggregated_batch_results = []
# Run for a specific number of objects if self._total_object_refs is finite.
# Otherwise, process all objects received prior to the stop signal, given by
# self.add_object(END_SENTINEL).
while self._num_ready < self._total_object_refs:
# Get as many new IDs from the queue as possible without blocking,
# unless we have no IDs to wait on, in which case we block.
ready_id = None
while ready_id is None:
try:
block = len(unready) == 0
new_object_ref = self._new_object_refs.get(block=block)
if new_object_ref is self.END_SENTINEL:
# Receiving the END_SENTINEL object is the signal to stop.
# Store the total number of objects.
self._total_object_refs = len(self._object_refs)
else:
self._add_object_ref(new_object_ref)
unready.append(new_object_ref)
except queue.Empty:
# queue.Empty means no result was retrieved if block=False.
pass
# Check if any of the available IDs are done. The timeout is required
# here to periodically check for new IDs from self._new_object_refs.
# NOTE(edoakes): the choice of a 100ms timeout here is arbitrary. Too
# low of a timeout would cause higher overhead from busy spinning and
# too high would cause higher tail latency to fetch the first result in
# some cases.
ready, unready = ray.wait(unready, num_returns=1, timeout=0.1)
if len(ready) > 0:
ready_id = ready[0]
try:
batch = ray.get(ready_id)
except ray.exceptions.RayError as e:
batch = [e]
# The exception callback is called only once on the first result
# that errors. If no result errors, it is never called.
if not self._got_error:
for result in batch:
if isinstance(result, Exception):
self._got_error = True
if self._error_callback is not None:
self._error_callback(result)
break
else:
aggregated_batch_results.append(result)
self._num_ready += 1
self._results[self._indices[ready_id]] = batch
self._ready_index_queue.put(self._indices[ready_id])
# The regular callback is called only once on the entire List of
# results as long as none of the results were errors. If any results
# were errors, the regular callback is never called; instead, the
# exception callback is called on the first erroring result.
#
# This callback is called outside the while loop to ensure that it's
# called on the entire list of results– not just a single batch.
if not self._got_error and self._callback is not None:
if not self._single_result:
self._callback(aggregated_batch_results)
else:
# On a thread handling a function with a single result
# (e.g. apply_async), we call the callback on just that result
# instead of on a list encaspulating that result
self._callback(aggregated_batch_results[0])
def got_error(self):
# Should only be called after the thread finishes.
return self._got_error
def result(self, index):
# Should only be called on results that are ready.
return self._results[index]
def results(self):
# Should only be called after the thread finishes.
return self._results
def next_ready_index(self, timeout=None):
try:
return self._ready_index_queue.get(timeout=timeout)
except queue.Empty:
# queue.Queue signals a timeout by raising queue.Empty.
raise TimeoutError
| ResultThread |
python | numba__numba | numba/core/byteflow.py | {
"start": 12158,
"end": 69087
} | class ____(object):
"""Trace runner contains the states for the trace and the opcode dispatch.
"""
def __init__(self, debug_filename):
self.debug_filename = debug_filename
self.pending = deque()
self.finished = set()
def get_debug_loc(self, lineno):
return Loc(self.debug_filename, lineno)
def dispatch(self, state):
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
if state._blockstack:
state: State
while state._blockstack:
topblk = state._blockstack[-1]
blk_end = topblk['end']
if blk_end is not None and blk_end <= state.pc_initial:
state._blockstack.pop()
else:
break
elif PYVERSION in ((3, 10),):
pass
else:
raise NotImplementedError(PYVERSION)
inst = state.get_inst()
if inst.opname != "CACHE":
_logger.debug("dispatch pc=%s, inst=%s", state._pc, inst)
_logger.debug("stack %s", state._stack)
fn = getattr(self, "op_{}".format(inst.opname), None)
if fn is not None:
fn(state, inst)
else:
msg = "Use of unsupported opcode (%s) found" % inst.opname
raise UnsupportedBytecodeError(msg,
loc=self.get_debug_loc(inst.lineno))
def _adjust_except_stack(self, state):
"""
Adjust stack when entering an exception handler to match expectation
by the bytecode.
"""
tryblk = state.get_top_block('TRY')
state.pop_block_and_above(tryblk)
nstack = state.stack_depth
kwargs = {}
expected_depth = tryblk['stack_depth']
if nstack > expected_depth:
# Pop extra item in the stack
kwargs['npop'] = nstack - expected_depth
# Set extra stack itemcount due to the exception values.
extra_stack = 1
if tryblk['push_lasti']:
extra_stack += 1
kwargs['npush'] = extra_stack
state.fork(pc=tryblk['end'], **kwargs)
def op_NOP(self, state, inst):
state.append(inst)
if PYVERSION in ((3,14), ):
# New in 3.14
op_NOT_TAKEN = op_NOP
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_RESUME(self, state, inst):
state.append(inst)
def op_CACHE(self, state, inst):
state.append(inst)
def op_PRECALL(self, state, inst):
state.append(inst)
def op_PUSH_NULL(self, state, inst):
state.push(state.make_null())
state.append(inst)
def op_RETURN_GENERATOR(self, state, inst):
# This impl doesn't follow what CPython does. CPython is hacking
# the frame stack in the interpreter. From usage, it always
# has a POP_TOP after it so we push a dummy value to the stack.
#
# Example bytecode:
# > 0 NOP(arg=None, lineno=80)
# 2 RETURN_GENERATOR(arg=None, lineno=80)
# 4 POP_TOP(arg=None, lineno=80)
# 6 RESUME(arg=0, lineno=80)
state.push(state.make_temp())
state.append(inst)
if PYVERSION in ((3, 13), (3, 14)):
def op_FORMAT_SIMPLE(self, state, inst):
value = state.pop()
strvar = state.make_temp()
res = state.make_temp()
state.append(inst, value=value, res=res, strvar=strvar)
state.push(res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_FORMAT_VALUE(self, state, inst):
"""
FORMAT_VALUE(flags): flags argument specifies format spec which is
not supported yet. Currently, we just call str() on the value.
Pops a value from stack and pushes results back.
Required for supporting f-strings.
https://docs.python.org/3/library/dis.html#opcode-FORMAT_VALUE
"""
if inst.arg != 0:
msg = "format spec in f-strings not supported yet"
raise UnsupportedBytecodeError(msg,
loc=self.get_debug_loc(inst.lineno))
value = state.pop()
strvar = state.make_temp()
res = state.make_temp()
state.append(inst, value=value, res=res, strvar=strvar)
state.push(res)
def op_BUILD_STRING(self, state, inst):
"""
BUILD_STRING(count): Concatenates count strings from the stack and
pushes the resulting string onto the stack.
Required for supporting f-strings.
https://docs.python.org/3/library/dis.html#opcode-BUILD_STRING
"""
count = inst.arg
strings = list(reversed([state.pop() for _ in range(count)]))
# corner case: f""
if count == 0:
tmps = [state.make_temp()]
else:
tmps = [state.make_temp() for _ in range(count - 1)]
state.append(inst, strings=strings, tmps=tmps)
state.push(tmps[-1])
def op_POP_TOP(self, state, inst):
state.pop()
if PYVERSION in ((3, 14), ):
# New in 3.14
op_POP_ITER = op_POP_TOP
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 13), (3,14)):
def op_TO_BOOL(self, state, inst):
res = state.make_temp()
tos = state.pop()
state.append(inst, val=tos, res=res)
state.push(res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 13), (3, 14)):
def op_LOAD_GLOBAL(self, state, inst):
# Ordering of the global value and NULL is swapped in Py3.13
res = state.make_temp()
idx = inst.arg >> 1
state.append(inst, idx=idx, res=res)
state.push(res)
# ignoring the NULL
if inst.arg & 1:
state.push(state.make_null())
elif PYVERSION in ((3, 11), (3, 12)):
def op_LOAD_GLOBAL(self, state, inst):
res = state.make_temp()
idx = inst.arg >> 1
state.append(inst, idx=idx, res=res)
# ignoring the NULL
if inst.arg & 1:
state.push(state.make_null())
state.push(res)
elif PYVERSION in ((3, 10),):
def op_LOAD_GLOBAL(self, state, inst):
res = state.make_temp()
state.append(inst, res=res)
state.push(res)
else:
raise NotImplementedError(PYVERSION)
def op_COPY_FREE_VARS(self, state, inst):
state.append(inst)
def op_MAKE_CELL(self, state, inst):
state.append(inst)
def op_LOAD_DEREF(self, state, inst):
res = state.make_temp()
state.append(inst, res=res)
state.push(res)
def op_LOAD_CONST(self, state, inst):
# append const index for interpreter to read the const value
res = state.make_temp("const") + f".{inst.arg}"
state.push(res)
state.append(inst, res=res)
if PYVERSION in ((3, 14), ):
# New in 3.14
def op_LOAD_SMALL_INT(self, state, inst):
assert 0 <= inst.arg < 256
res = state.make_temp("const") + f".{inst.arg}"
state.push(res)
state.append(inst, res=res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_LOAD_ATTR(self, state, inst):
item = state.pop()
res = state.make_temp()
if PYVERSION in ((3, 13), (3, 14)):
state.push(res) # the attr
if inst.arg & 1:
state.push(state.make_null())
elif PYVERSION in ((3, 12),):
if inst.arg & 1:
state.push(state.make_null())
state.push(res)
elif PYVERSION in ((3, 10), (3, 11)):
state.push(res)
else:
raise NotImplementedError(PYVERSION)
state.append(inst, item=item, res=res)
def op_LOAD_FAST(self, state, inst):
if PYVERSION in ((3, 13), (3, 14)):
try:
name = state.get_varname(inst)
except IndexError: # oparg is out of range
# Handle this like a LOAD_DEREF
# Assume MAKE_CELL and COPY_FREE_VARS has correctly setup the
# states.
# According to https://github.com/python/cpython/blob/9ac606080a0074cdf7589d9b7c9413a73e0ddf37/Objects/codeobject.c#L730C9-L759 # noqa E501
# localsplus is locals + cells + freevars
bc = state._bytecode
num_varnames = len(bc.co_varnames)
num_freevars = len(bc.co_freevars)
num_cellvars = len(bc.co_cellvars)
max_fast_local = num_cellvars + num_freevars
assert 0 <= inst.arg - num_varnames < max_fast_local
res = state.make_temp()
state.append(inst, res=res, as_load_deref=True)
state.push(res)
return
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
name = state.get_varname(inst)
else:
raise NotImplementedError(PYVERSION)
res = state.make_temp(name)
state.append(inst, res=res)
state.push(res)
if PYVERSION in ((3, 13), (3, 14)):
def op_LOAD_FAST_LOAD_FAST(self, state, inst):
oparg = inst.arg
oparg1 = oparg >> 4
oparg2 = oparg & 15
name1 = state.get_varname_by_arg(oparg1)
name2 = state.get_varname_by_arg(oparg2)
res1 = state.make_temp(name1)
res2 = state.make_temp(name2)
state.append(inst, res1=res1, res2=res2)
state.push(res1)
state.push(res2)
def op_STORE_FAST_LOAD_FAST(self, state, inst):
oparg = inst.arg
# oparg1 = oparg >> 4 # not needed
oparg2 = oparg & 15
store_value = state.pop()
load_name = state.get_varname_by_arg(oparg2)
load_res = state.make_temp(load_name)
state.append(inst, store_value=store_value, load_res=load_res)
state.push(load_res)
def op_STORE_FAST_STORE_FAST(self, state, inst):
value1 = state.pop()
value2 = state.pop()
state.append(inst, value1=value1, value2=value2)
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
op_LOAD_FAST_CHECK = op_LOAD_FAST
op_LOAD_FAST_AND_CLEAR = op_LOAD_FAST
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 14),):
# New in 3.14.
op_LOAD_FAST_BORROW = op_LOAD_FAST
op_LOAD_FAST_BORROW_LOAD_FAST_BORROW = op_LOAD_FAST_LOAD_FAST
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_DELETE_FAST(self, state, inst):
state.append(inst)
def op_DELETE_ATTR(self, state, inst):
target = state.pop()
state.append(inst, target=target)
def op_STORE_ATTR(self, state, inst):
target = state.pop()
value = state.pop()
state.append(inst, target=target, value=value)
def op_STORE_DEREF(self, state, inst):
value = state.pop()
state.append(inst, value=value)
def op_STORE_FAST(self, state, inst):
value = state.pop()
state.append(inst, value=value)
def op_SLICE_1(self, state, inst):
"""
TOS = TOS1[TOS:]
"""
tos = state.pop()
tos1 = state.pop()
res = state.make_temp()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
start=tos,
res=res,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
state.push(res)
def op_SLICE_2(self, state, inst):
"""
TOS = TOS1[:TOS]
"""
tos = state.pop()
tos1 = state.pop()
res = state.make_temp()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
stop=tos,
res=res,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
state.push(res)
def op_SLICE_3(self, state, inst):
"""
TOS = TOS2[TOS1:TOS]
"""
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
res = state.make_temp()
slicevar = state.make_temp()
indexvar = state.make_temp()
state.append(
inst,
base=tos2,
start=tos1,
stop=tos,
res=res,
slicevar=slicevar,
indexvar=indexvar,
)
state.push(res)
def op_STORE_SLICE_0(self, state, inst):
"""
TOS[:] = TOS1
"""
tos = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos,
value=value,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_STORE_SLICE_1(self, state, inst):
"""
TOS1[TOS:] = TOS2
"""
tos = state.pop()
tos1 = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
start=tos,
slicevar=slicevar,
value=value,
indexvar=indexvar,
nonevar=nonevar,
)
def op_STORE_SLICE_2(self, state, inst):
"""
TOS1[:TOS] = TOS2
"""
tos = state.pop()
tos1 = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
stop=tos,
value=value,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_STORE_SLICE_3(self, state, inst):
"""
TOS2[TOS1:TOS] = TOS3
"""
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
state.append(
inst,
base=tos2,
start=tos1,
stop=tos,
value=value,
slicevar=slicevar,
indexvar=indexvar,
)
def op_DELETE_SLICE_0(self, state, inst):
"""
del TOS[:]
"""
tos = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst, base=tos, slicevar=slicevar, indexvar=indexvar,
nonevar=nonevar,
)
def op_DELETE_SLICE_1(self, state, inst):
"""
del TOS1[TOS:]
"""
tos = state.pop()
tos1 = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
start=tos,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_DELETE_SLICE_2(self, state, inst):
"""
del TOS1[:TOS]
"""
tos = state.pop()
tos1 = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
stop=tos,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_DELETE_SLICE_3(self, state, inst):
"""
del TOS2[TOS1:TOS]
"""
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
state.append(
inst, base=tos2, start=tos1, stop=tos, slicevar=slicevar,
indexvar=indexvar
)
def op_BUILD_SLICE(self, state, inst):
"""
slice(TOS1, TOS) or slice(TOS2, TOS1, TOS)
"""
argc = inst.arg
if argc == 2:
tos = state.pop()
tos1 = state.pop()
start = tos1
stop = tos
step = None
elif argc == 3:
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
start = tos2
stop = tos1
step = tos
else:
raise Exception("unreachable")
slicevar = state.make_temp()
res = state.make_temp()
state.append(
inst, start=start, stop=stop, step=step, res=res, slicevar=slicevar
)
state.push(res)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_BINARY_SLICE(self, state, inst):
end = state.pop()
start = state.pop()
container = state.pop()
temp_res = state.make_temp()
res = state.make_temp()
slicevar = state.make_temp()
state.append(
inst, start=start, end=end, container=container, res=res,
slicevar=slicevar, temp_res=temp_res
)
state.push(res)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_STORE_SLICE(self, state, inst):
end = state.pop()
start = state.pop()
container = state.pop()
value = state.pop()
slicevar = state.make_temp()
res = state.make_temp()
state.append(
inst, start=start, end=end, container=container, value=value,
res=res, slicevar=slicevar,
)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def _op_POP_JUMP_IF(self, state, inst):
pred = state.pop()
state.append(inst, pred=pred)
target_inst = inst.get_jump_target()
next_inst = inst.next
# if the next inst and the jump target are the same location, issue one
# fork else issue a fork for the next and the target.
state.fork(pc=next_inst)
if target_inst != next_inst:
state.fork(pc=target_inst)
op_POP_JUMP_IF_TRUE = _op_POP_JUMP_IF
op_POP_JUMP_IF_FALSE = _op_POP_JUMP_IF
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
op_POP_JUMP_IF_NONE = _op_POP_JUMP_IF
op_POP_JUMP_IF_NOT_NONE = _op_POP_JUMP_IF
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def _op_JUMP_IF_OR_POP(self, state, inst):
pred = state.get_tos()
state.append(inst, pred=pred)
state.fork(pc=inst.next, npop=1)
state.fork(pc=inst.get_jump_target())
op_JUMP_IF_FALSE_OR_POP = _op_JUMP_IF_OR_POP
op_JUMP_IF_TRUE_OR_POP = _op_JUMP_IF_OR_POP
def op_POP_JUMP_FORWARD_IF_NONE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_FORWARD_IF_NOT_NONE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_BACKWARD_IF_NONE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_BACKWARD_IF_NOT_NONE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_FORWARD_IF_FALSE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_FORWARD_IF_TRUE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_BACKWARD_IF_FALSE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_POP_JUMP_BACKWARD_IF_TRUE(self, state, inst):
self._op_POP_JUMP_IF(state, inst)
def op_JUMP_FORWARD(self, state, inst):
state.append(inst)
state.fork(pc=inst.get_jump_target())
def op_JUMP_BACKWARD(self, state, inst):
state.append(inst)
state.fork(pc=inst.get_jump_target())
op_JUMP_BACKWARD_NO_INTERRUPT = op_JUMP_BACKWARD
def op_JUMP_ABSOLUTE(self, state, inst):
state.append(inst)
state.fork(pc=inst.get_jump_target())
def op_BREAK_LOOP(self, state, inst):
# NOTE: bytecode removed since py3.8
end = state.get_top_block('LOOP')['end']
state.append(inst, end=end)
state.pop_block()
state.fork(pc=end)
def op_RETURN_VALUE(self, state, inst):
state.append(inst, retval=state.pop(), castval=state.make_temp())
state.terminate()
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_RETURN_CONST(self, state, inst):
res = state.make_temp("const")
state.append(inst, retval=res, castval=state.make_temp())
state.terminate()
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_YIELD_VALUE(self, state, inst):
val = state.pop()
res = state.make_temp()
state.append(inst, value=val, res=res)
state.push(res)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_RAISE_VARARGS(self, state, inst):
if inst.arg == 0:
exc = None
# No re-raising within a try-except block.
# But we allow bare reraise.
if state.has_active_try():
raise UnsupportedBytecodeError(
"The re-raising of an exception is not yet supported.",
loc=self.get_debug_loc(inst.lineno),
)
elif inst.arg == 1:
exc = state.pop()
else:
raise ValueError("Multiple argument raise is not supported.")
state.append(inst, exc=exc)
if state.has_active_try():
self._adjust_except_stack(state)
else:
state.terminate()
elif PYVERSION in ((3, 10),):
def op_RAISE_VARARGS(self, state, inst):
in_exc_block = any([
state.get_top_block("EXCEPT") is not None,
state.get_top_block("FINALLY") is not None
])
if inst.arg == 0:
exc = None
if in_exc_block:
raise UnsupportedBytecodeError(
"The re-raising of an exception is not yet supported.",
loc=self.get_debug_loc(inst.lineno),
)
elif inst.arg == 1:
exc = state.pop()
else:
raise ValueError("Multiple argument raise is not supported.")
state.append(inst, exc=exc)
state.terminate()
else:
raise NotImplementedError(PYVERSION)
def op_BEGIN_FINALLY(self, state, inst):
temps = []
for i in range(_EXCEPT_STACK_OFFSET):
tmp = state.make_temp()
temps.append(tmp)
state.push(tmp)
state.append(inst, temps=temps)
def op_END_FINALLY(self, state, inst):
blk = state.pop_block()
state.reset_stack(blk['entry_stack'])
if PYVERSION in ((3, 13), (3, 14)):
def op_END_FOR(self, state, inst):
state.pop()
elif PYVERSION in ((3, 12),):
def op_END_FOR(self, state, inst):
state.pop()
state.pop()
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_POP_FINALLY(self, state, inst):
# we don't emulate the exact stack behavior
if inst.arg != 0:
msg = ('Unsupported use of a bytecode related to try..finally'
' or a with-context')
raise UnsupportedBytecodeError(msg,
loc=self.get_debug_loc(inst.lineno))
def op_CALL_FINALLY(self, state, inst):
pass
def op_WITH_EXCEPT_START(self, state, inst):
state.terminate() # do not support
def op_WITH_CLEANUP_START(self, state, inst):
# we don't emulate the exact stack behavior
state.append(inst)
def op_WITH_CLEANUP_FINISH(self, state, inst):
# we don't emulate the exact stack behavior
state.append(inst)
def op_SETUP_LOOP(self, state, inst):
# NOTE: bytecode removed since py3.8
state.push_block(
state.make_block(
kind='LOOP',
end=inst.get_jump_target(),
)
)
if PYVERSION in ((3, 14), ):
# Replaced by LOAD_SPECIAL in 3.14.
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def op_BEFORE_WITH(self, state, inst):
# Almost the same as py3.10 SETUP_WITH just lacking the finally
# block.
cm = state.pop() # the context-manager
yielded = state.make_temp()
exitfn = state.make_temp(prefix='setup_with_exitfn')
state.push(exitfn)
state.push(yielded)
# Gather all exception entries for this WITH. There maybe multiple
# entries; esp. for nested WITHs.
bc = state._bytecode
ehhead = bc.find_exception_entry(inst.next)
ehrelated = [ehhead]
for eh in bc.exception_entries:
if eh.target == ehhead.target:
ehrelated.append(eh)
end = max(eh.end for eh in ehrelated)
state.append(inst, contextmanager=cm, exitfn=exitfn, end=end)
state.push_block(
state.make_block(
kind='WITH',
end=end,
)
)
# Forces a new block
state.fork(pc=inst.next)
else:
raise NotImplementedError(PYVERSION)
def op_SETUP_WITH(self, state, inst):
cm = state.pop() # the context-manager
yielded = state.make_temp()
exitfn = state.make_temp(prefix='setup_with_exitfn')
state.append(inst, contextmanager=cm, exitfn=exitfn)
state.push(exitfn)
state.push(yielded)
state.push_block(
state.make_block(
kind='WITH',
end=inst.get_jump_target(),
)
)
# Forces a new block
state.fork(pc=inst.next)
def _setup_try(self, kind, state, next, end):
# Forces a new block
# Fork to the body of the finally
handler_block = state.make_block(
kind=kind,
end=None,
reset_stack=False,
)
# Forces a new block
# Fork to the body of the finally
state.fork(
pc=next,
extra_block=state.make_block(
kind='TRY',
end=end,
reset_stack=False,
handler=handler_block,
)
)
def op_PUSH_EXC_INFO(self, state, inst):
tos = state.pop()
state.push(state.make_temp("exception"))
state.push(tos)
def op_SETUP_FINALLY(self, state, inst):
state.append(inst)
self._setup_try(
'FINALLY', state, next=inst.next, end=inst.get_jump_target(),
)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def op_POP_EXCEPT(self, state, inst):
state.pop()
elif PYVERSION in ((3, 10),):
def op_POP_EXCEPT(self, state, inst):
blk = state.pop_block()
if blk['kind'] not in {BlockKind('EXCEPT'), BlockKind('FINALLY')}:
raise UnsupportedBytecodeError(
f"POP_EXCEPT got an unexpected block: {blk['kind']}",
loc=self.get_debug_loc(inst.lineno),
)
state.pop()
state.pop()
state.pop()
# Forces a new block
state.fork(pc=inst.next)
else:
raise NotImplementedError(PYVERSION)
def op_POP_BLOCK(self, state, inst):
blk = state.pop_block()
if blk['kind'] == BlockKind('TRY'):
state.append(inst, kind='try')
elif blk['kind'] == BlockKind('WITH'):
state.append(inst, kind='with')
state.fork(pc=inst.next)
if PYVERSION in ((3, 14),):
# Removed in 3.14 -- replaced with BINARY_OP and []
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def op_BINARY_SUBSCR(self, state, inst):
index = state.pop()
target = state.pop()
res = state.make_temp()
state.append(inst, index=index, target=target, res=res)
state.push(res)
else:
raise NotImplementedError(PYVERSION)
def op_STORE_SUBSCR(self, state, inst):
index = state.pop()
target = state.pop()
value = state.pop()
state.append(inst, target=target, index=index, value=value)
def op_DELETE_SUBSCR(self, state, inst):
index = state.pop()
target = state.pop()
state.append(inst, target=target, index=index)
def op_CALL(self, state, inst):
narg = inst.arg
args = list(reversed([state.pop() for _ in range(narg)]))
if PYVERSION in ((3, 13), (3, 14)):
null_or_self = state.pop()
# position of the callable is fixed
callable = state.pop()
if not _is_null_temp_reg(null_or_self):
args = [null_or_self, *args]
kw_names = None
elif PYVERSION < (3, 13):
callable_or_firstarg = state.pop()
null_or_callable = state.pop()
if _is_null_temp_reg(null_or_callable):
callable = callable_or_firstarg
else:
callable = null_or_callable
args = [callable_or_firstarg, *args]
kw_names = state.pop_kw_names()
res = state.make_temp()
state.append(inst, func=callable, args=args, kw_names=kw_names, res=res)
state.push(res)
def op_KW_NAMES(self, state, inst):
state.set_kw_names(inst.arg)
def op_CALL_FUNCTION(self, state, inst):
narg = inst.arg
args = list(reversed([state.pop() for _ in range(narg)]))
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, args=args, res=res)
state.push(res)
def op_CALL_FUNCTION_KW(self, state, inst):
narg = inst.arg
names = state.pop() # tuple of names
args = list(reversed([state.pop() for _ in range(narg)]))
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, args=args, names=names, res=res)
state.push(res)
if PYVERSION in ((3, 13), (3, 14)):
def op_CALL_KW(self, state, inst):
narg = inst.arg
kw_names = state.pop()
args = list(reversed([state.pop() for _ in range(narg)]))
null_or_firstarg = state.pop()
callable = state.pop()
if not _is_null_temp_reg(null_or_firstarg):
args = [null_or_firstarg, *args]
res = state.make_temp()
state.append(inst, func=callable, args=args, kw_names=kw_names,
res=res)
state.push(res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 14), ):
def op_CALL_FUNCTION_EX(self, state, inst):
# (func, unused, callargs, kwargs -- result))
# In 3.14 CALL_FUNCTION_EX always take a kwargs argument
# https://github.com/python/cpython/pull/129226
varkwarg = state.pop()
if _is_null_temp_reg(varkwarg):
varkwarg = None
vararg = state.pop()
state.pop() # unused
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, vararg=vararg, varkwarg=varkwarg,
res=res)
state.push(res)
elif PYVERSION in ((3, 13), ):
def op_CALL_FUNCTION_EX(self, state, inst):
# (func, unused, callargs, kwargs if (oparg & 1) -- result))
if inst.arg & 1:
varkwarg = state.pop()
else:
varkwarg = None
vararg = state.pop()
state.pop() # unused
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, vararg=vararg, varkwarg=varkwarg,
res=res)
state.push(res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12)):
def op_CALL_FUNCTION_EX(self, state, inst):
if inst.arg & 1:
varkwarg = state.pop()
else:
varkwarg = None
vararg = state.pop()
func = state.pop()
if PYVERSION in ((3, 11), (3, 12)):
if _is_null_temp_reg(state.peek(1)):
state.pop() # pop NULL, it's not used
elif PYVERSION in ((3, 10),):
pass
else:
raise NotImplementedError(PYVERSION)
res = state.make_temp()
state.append(inst, func=func, vararg=vararg, varkwarg=varkwarg,
res=res)
state.push(res)
else:
raise NotImplementedError(PYVERSION)
def _dup_topx(self, state, inst, count):
orig = [state.pop() for _ in range(count)]
orig.reverse()
# We need to actually create new temporaries if we want the
# IR optimization pass to work correctly (see issue #580)
duped = [state.make_temp() for _ in range(count)]
state.append(inst, orig=orig, duped=duped)
for val in orig:
state.push(val)
for val in duped:
state.push(val)
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_CALL_INTRINSIC_1(self, state, inst):
# See https://github.com/python/cpython/blob/v3.12.0rc2/Include/
# internal/pycore_intrinsics.h#L3-L17C36
try:
operand = CALL_INTRINSIC_1_Operand(inst.arg)
except TypeError:
msg = f"op_CALL_INTRINSIC_1({inst.arg})"
loc = self.get_debug_loc(inst.lineno)
raise UnsupportedBytecodeError(msg, loc=loc)
if operand == ci1op.INTRINSIC_STOPITERATION_ERROR:
state.append(inst, operand=operand)
state.terminate()
return
elif operand == ci1op.UNARY_POSITIVE:
val = state.pop()
res = state.make_temp()
state.append(inst, operand=operand,
value=val, res=res)
state.push(res)
return
elif operand == ci1op.INTRINSIC_LIST_TO_TUPLE:
tos = state.pop()
res = state.make_temp()
state.append(inst, operand=operand,
const_list=tos, res=res)
state.push(res)
return
else:
raise NotImplementedError(operand)
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_DUP_TOPX(self, state, inst):
count = inst.arg
assert 1 <= count <= 5, "Invalid DUP_TOPX count"
self._dup_topx(state, inst, count)
def op_DUP_TOP(self, state, inst):
self._dup_topx(state, inst, count=1)
def op_DUP_TOP_TWO(self, state, inst):
self._dup_topx(state, inst, count=2)
def op_COPY(self, state, inst):
state.push(state.peek(inst.arg))
def op_SWAP(self, state, inst):
state.swap(inst.arg)
def op_ROT_TWO(self, state, inst):
first = state.pop()
second = state.pop()
state.push(first)
state.push(second)
def op_ROT_THREE(self, state, inst):
first = state.pop()
second = state.pop()
third = state.pop()
state.push(first)
state.push(third)
state.push(second)
def op_ROT_FOUR(self, state, inst):
first = state.pop()
second = state.pop()
third = state.pop()
forth = state.pop()
state.push(first)
state.push(forth)
state.push(third)
state.push(second)
def op_UNPACK_SEQUENCE(self, state, inst):
count = inst.arg
iterable = state.pop()
stores = [state.make_temp() for _ in range(count)]
tupleobj = state.make_temp()
state.append(inst, iterable=iterable, stores=stores, tupleobj=tupleobj)
for st in reversed(stores):
state.push(st)
def op_BUILD_TUPLE(self, state, inst):
count = inst.arg
items = list(reversed([state.pop() for _ in range(count)]))
tup = state.make_temp()
state.append(inst, items=items, res=tup)
state.push(tup)
def _build_tuple_unpack(self, state, inst):
# Builds tuple from other tuples on the stack
tuples = list(reversed([state.pop() for _ in range(inst.arg)]))
temps = [state.make_temp() for _ in range(len(tuples) - 1)]
# if the unpack is assign-like, e.g. x = (*y,), it needs handling
# differently.
is_assign = len(tuples) == 1
if is_assign:
temps = [state.make_temp(),]
state.append(inst, tuples=tuples, temps=temps, is_assign=is_assign)
# The result is in the last temp var
state.push(temps[-1])
def op_BUILD_TUPLE_UNPACK_WITH_CALL(self, state, inst):
# just unpack the input tuple, call inst will be handled afterwards
self._build_tuple_unpack(state, inst)
def op_BUILD_TUPLE_UNPACK(self, state, inst):
self._build_tuple_unpack(state, inst)
def op_LIST_TO_TUPLE(self, state, inst):
# "Pops a list from the stack and pushes a tuple containing the same
# values."
tos = state.pop()
res = state.make_temp() # new tuple var
state.append(inst, const_list=tos, res=res)
state.push(res)
def op_BUILD_CONST_KEY_MAP(self, state, inst):
keys = state.pop()
vals = list(reversed([state.pop() for _ in range(inst.arg)]))
keytmps = [state.make_temp() for _ in range(inst.arg)]
res = state.make_temp()
state.append(inst, keys=keys, keytmps=keytmps, values=vals, res=res)
state.push(res)
def op_BUILD_LIST(self, state, inst):
count = inst.arg
items = list(reversed([state.pop() for _ in range(count)]))
lst = state.make_temp()
state.append(inst, items=items, res=lst)
state.push(lst)
def op_LIST_APPEND(self, state, inst):
value = state.pop()
index = inst.arg
target = state.peek(index)
appendvar = state.make_temp()
res = state.make_temp()
state.append(inst, target=target, value=value, appendvar=appendvar,
res=res)
def op_LIST_EXTEND(self, state, inst):
value = state.pop()
index = inst.arg
target = state.peek(index)
extendvar = state.make_temp()
res = state.make_temp()
state.append(inst, target=target, value=value, extendvar=extendvar,
res=res)
def op_BUILD_MAP(self, state, inst):
dct = state.make_temp()
count = inst.arg
items = []
# In 3.5+, BUILD_MAP takes <count> pairs from the stack
for i in range(count):
v, k = state.pop(), state.pop()
items.append((k, v))
state.append(inst, items=items[::-1], size=count, res=dct)
state.push(dct)
def op_MAP_ADD(self, state, inst):
TOS = state.pop()
TOS1 = state.pop()
key, value = (TOS1, TOS)
index = inst.arg
target = state.peek(index)
setitemvar = state.make_temp()
res = state.make_temp()
state.append(inst, target=target, key=key, value=value,
setitemvar=setitemvar, res=res)
def op_BUILD_SET(self, state, inst):
count = inst.arg
# Note: related python bug http://bugs.python.org/issue26020
items = list(reversed([state.pop() for _ in range(count)]))
res = state.make_temp()
state.append(inst, items=items, res=res)
state.push(res)
def op_SET_ADD(self, state, inst):
value = state.pop()
target = state.get_tos()
addvar = state.make_temp()
res = state.make_temp()
state.append(inst, value=value, target=target, addvar=addvar, res=res)
def op_SET_UPDATE(self, state, inst):
value = state.pop()
index = inst.arg
target = state.peek(index)
updatevar = state.make_temp()
res = state.make_temp()
state.append(inst, target=target, value=value, updatevar=updatevar,
res=res)
def op_DICT_UPDATE(self, state, inst):
value = state.pop()
index = inst.arg
target = state.peek(index)
updatevar = state.make_temp()
res = state.make_temp()
state.append(inst, target=target, value=value, updatevar=updatevar,
res=res)
def op_GET_ITER(self, state, inst):
value = state.pop()
res = state.make_temp()
state.append(inst, value=value, res=res)
state.push(res)
def op_FOR_ITER(self, state, inst):
iterator = state.get_tos()
pair = state.make_temp()
indval = state.make_temp()
pred = state.make_temp()
state.append(inst, iterator=iterator, pair=pair, indval=indval,
pred=pred)
state.push(indval)
end = inst.get_jump_target()
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
# Changed in version 3.12: Up until 3.11 the iterator was
# popped when it was exhausted. Now this is handled using END_FOR
# op code.
state.fork(pc=end)
elif PYVERSION in ((3, 10), (3, 11)):
state.fork(pc=end, npop=2)
else:
raise NotImplementedError(PYVERSION)
state.fork(pc=inst.next)
def op_GEN_START(self, state, inst):
"""Pops TOS. If TOS was not None, raises an exception. The kind
operand corresponds to the type of generator or coroutine and
determines the error message. The legal kinds are 0 for generator,
1 for coroutine, and 2 for async generator.
New in version 3.10.
"""
# no-op in Numba
pass
def op_BINARY_OP(self, state, inst):
op = dis._nb_ops[inst.arg][1]
rhs = state.pop()
lhs = state.pop()
if op == '[]':
# Special case 3.14 -- body of BINARY_SUBSCR now here
assert PYVERSION == (3, 14)
res = state.make_temp()
state.append(inst, op=op, lhs=lhs, rhs=rhs, res=res)
state.push(res)
else:
op_name = ALL_BINOPS_TO_OPERATORS[op].__name__
res = state.make_temp(prefix=f"binop_{op_name}")
state.append(inst, op=op, lhs=lhs, rhs=rhs, res=res)
state.push(res)
def _unaryop(self, state, inst):
val = state.pop()
res = state.make_temp()
state.append(inst, value=val, res=res)
state.push(res)
op_UNARY_NEGATIVE = _unaryop
op_UNARY_POSITIVE = _unaryop
op_UNARY_NOT = _unaryop
op_UNARY_INVERT = _unaryop
def _binaryop(self, state, inst):
rhs = state.pop()
lhs = state.pop()
res = state.make_temp()
state.append(inst, lhs=lhs, rhs=rhs, res=res)
state.push(res)
op_COMPARE_OP = _binaryop
op_IS_OP = _binaryop
op_CONTAINS_OP = _binaryop
op_INPLACE_ADD = _binaryop
op_INPLACE_SUBTRACT = _binaryop
op_INPLACE_MULTIPLY = _binaryop
op_INPLACE_DIVIDE = _binaryop
op_INPLACE_TRUE_DIVIDE = _binaryop
op_INPLACE_FLOOR_DIVIDE = _binaryop
op_INPLACE_MODULO = _binaryop
op_INPLACE_POWER = _binaryop
op_INPLACE_MATRIX_MULTIPLY = _binaryop
op_INPLACE_LSHIFT = _binaryop
op_INPLACE_RSHIFT = _binaryop
op_INPLACE_AND = _binaryop
op_INPLACE_OR = _binaryop
op_INPLACE_XOR = _binaryop
op_BINARY_ADD = _binaryop
op_BINARY_SUBTRACT = _binaryop
op_BINARY_MULTIPLY = _binaryop
op_BINARY_DIVIDE = _binaryop
op_BINARY_TRUE_DIVIDE = _binaryop
op_BINARY_FLOOR_DIVIDE = _binaryop
op_BINARY_MODULO = _binaryop
op_BINARY_POWER = _binaryop
op_BINARY_MATRIX_MULTIPLY = _binaryop
op_BINARY_LSHIFT = _binaryop
op_BINARY_RSHIFT = _binaryop
op_BINARY_AND = _binaryop
op_BINARY_OR = _binaryop
op_BINARY_XOR = _binaryop
def op_MAKE_FUNCTION(self, state, inst, MAKE_CLOSURE=False):
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
# https://github.com/python/cpython/commit/2f180ce
# name set via co_qualname
name = None
elif PYVERSION in ((3, 10),):
name = state.pop()
else:
raise NotImplementedError(PYVERSION)
code = state.pop()
closure = annotations = annotate = kwdefaults = defaults = None
if PYVERSION in ((3, 13), (3, 14)):
assert inst.arg is None
# SET_FUNCTION_ATTRIBUTE is responsible for setting
# closure, annotations, annotate, kwdefaults and defaults.
else:
if inst.arg & 0x8:
closure = state.pop()
if inst.arg & 0x4:
annotations = state.pop()
if inst.arg & 0x2:
kwdefaults = state.pop()
if inst.arg & 0x1:
defaults = state.pop()
res = state.make_temp()
state.append(
inst,
name=name,
code=code,
closure=closure,
annotations=annotations,
annotate=annotate,
kwdefaults=kwdefaults,
defaults=defaults,
res=res,
)
state.push(res)
def op_SET_FUNCTION_ATTRIBUTE(self, state, inst):
assert PYVERSION in ((3, 13), (3, 14))
make_func_stack = state.pop()
data = state.pop()
if inst.arg & 0x01:
# 0x01 a tuple of default values for positional-only and
# positional-or-keyword parameters in positional order
state.set_function_attribute(make_func_stack, defaults=data)
elif inst.arg & 0x02:
# 0x02 a tuple of strings containing parameters’ annotations
state.set_function_attribute(make_func_stack, kwdefaults=data)
elif inst.arg & 0x04:
# 0x04 a tuple of strings containing parameters’ annotations
state.set_function_attribute(make_func_stack, annotations=data)
elif inst.arg & 0x08:
# 0x08 a tuple containing cells for free variables, making a closure
state.set_function_attribute(make_func_stack, closure=data)
elif inst.arg & 0x10:
# In 3.14 a new flag was added it has the value 0x10/16
# Numba report: https://github.com/numba/numba/issues/10319
state.set_function_attribute(make_func_stack, annotate=data)
else:
raise AssertionError("unreachable")
state.push(make_func_stack)
def op_MAKE_CLOSURE(self, state, inst):
self.op_MAKE_FUNCTION(state, inst, MAKE_CLOSURE=True)
def op_LOAD_CLOSURE(self, state, inst):
res = state.make_temp()
state.append(inst, res=res)
state.push(res)
if PYVERSION in ((3, 14), ):
# Removed in 3.14
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def op_LOAD_ASSERTION_ERROR(self, state, inst):
res = state.make_temp("assertion_error")
state.append(inst, res=res)
state.push(res)
else:
raise NotImplementedError(PYVERSION)
def op_CHECK_EXC_MATCH(self, state, inst):
pred = state.make_temp("predicate")
tos = state.pop()
tos1 = state.get_tos()
state.append(inst, pred=pred, tos=tos, tos1=tos1)
state.push(pred)
def op_JUMP_IF_NOT_EXC_MATCH(self, state, inst):
# Tests whether the second value on the stack is an exception matching
# TOS, and jumps if it is not. Pops two values from the stack.
pred = state.make_temp("predicate")
tos = state.pop()
tos1 = state.pop()
state.append(inst, pred=pred, tos=tos, tos1=tos1)
state.fork(pc=inst.next)
state.fork(pc=inst.get_jump_target())
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3,14)):
def op_RERAISE(self, state, inst):
# This isn't handled, but the state is set up anyway
exc = state.pop()
if inst.arg != 0:
state.pop() # lasti
state.append(inst, exc=exc)
if state.has_active_try():
self._adjust_except_stack(state)
else:
state.terminate()
elif PYVERSION in ((3, 10),):
def op_RERAISE(self, state, inst):
# This isn't handled, but the state is set up anyway
exc = state.pop()
state.append(inst, exc=exc)
state.terminate()
else:
raise NotImplementedError(PYVERSION)
# NOTE: Please see notes in `interpreter.py` surrounding the implementation
# of LOAD_METHOD and CALL_METHOD.
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
# LOAD_METHOD has become a pseudo-instruction in 3.12
pass
elif PYVERSION in ((3, 11), ):
def op_LOAD_METHOD(self, state, inst):
item = state.pop()
extra = state.make_null()
state.push(extra)
res = state.make_temp()
state.append(inst, item=item, res=res)
state.push(res)
elif PYVERSION in ((3, 10),):
def op_LOAD_METHOD(self, state, inst):
self.op_LOAD_ATTR(state, inst)
else:
raise NotImplementedError(PYVERSION)
def op_CALL_METHOD(self, state, inst):
self.op_CALL_FUNCTION(state, inst)
if PYVERSION in ((3, 14), ):
# New in 3.14, replaces BEFORE_WITH.
def op_LOAD_SPECIAL(self, state, inst):
# The "special" methods mapping for LOAD_SPECIAL is:
#
# special_methods = {
# 0: '__enter__',
# 1: '__exit__',
# 2: '__aenter__',
# 3: '__aexit__',
# }
if (dis._special_method_names[inst.arg]
not in ['__enter__', '__exit__']):
raise NotImplementedError(
"async special methods not supported")
# This implementation of LOAD_SPECIAL is somewhat unusual for a
# Numba bytecode handler.
#
# Essentially this comes from the insight, that with Python 3.14
# the bytecode `BEFORE_WITH` is replaced by `LOAD_SPECIAL`. But
# this is not a simple replacement. In fact, the single
# `BEFORE_WITH` is replaced by the following sequence of bytecodes:
#
# COPY(arg=1, lineno=X)
# LOAD_SPECIAL(arg=1, lineno=X) -- loading __exit__ --
# SWAP(arg=2, lineno=X)
# SWAP(arg=3, lineno=X)
# LOAD_SPECIAL(arg=0, lineno=X) -- loading __enter__ --
# CALL(arg=0, lineno=X) -- calling __enter__ --
# POP_TOP(arg=None, lineno=X)
#
# Basically, after seeing the first `LOAD_SPECIAL` we can consume
# all the bytecodes up until the `POP_TOP` and leave the stack
# with a single copy of current TOS + the temp var to denote the
# `__exit__` function. We can also reject syntax of the form:
#
# with context as c:
# pass
#
# Using the fact that the `CALL` to `__enter__` must be followed by
# as POP_TOP, otherwise it's using `with as`.
# Pop top of stack once for first LOAD_SPECIAL
tos = state.pop()
# Pop top of stack second time for second LOAD_SPECIAL
_ = state.pop()
# Fake an exit method, will not be called and removed from Numba IR
# before final processing is complete. Only need this such that the
# `CALL` op when closing the with-block can be simulated as a stack
# effect.
method = state.make_temp(prefix='setup_with_exitfn')
# Cache current instruction (the LOAD_SPECIAL).
old_inst = inst
# Now we need to consume the instructions in the known sequence.
for i,a in (('SWAP', 2),
('SWAP', 3),
('LOAD_SPECIAL', 0),
('CALL', 0),
):
state.advance_pc()
inst = state.get_inst()
if (inst.opname != i or inst.arg != a):
raise UnsupportedBytecodeError(
"Unsupported bytecode pattern for 'LOAD_SPECIAL'.")
# POP_TOP
state.advance_pc()
inst = state.get_inst()
# Special case, the `CALL` must be followed by a `POP_TOP`.
# Otherwise this is an unsupported construct.
#
# See: `_guard_with_as` for how this is handled for 3.13 and below.
if inst.opname != 'POP_TOP':
msg = ("The 'with (context manager) as "
"(variable):' construct is not "
"supported.")
raise UnsupportedBytecodeError(msg)
assert inst.arg is None
# Finished consuming bytecode pattern.
# Find the end of the with-block using the exception tables using
# the instruction offset of the `POP_TOP` instruction.
bc = state._bytecode
ehhead = bc.find_exception_entry(inst.offset)
ehrelated = [ehhead]
for eh in bc.exception_entries:
if eh.target == ehhead.target:
ehrelated.append(eh)
end = max(eh.end for eh in ehrelated)
# Push the `__exit__` method (or null) to the stack,
# followed by the original tos (which is the instatiated context
# manager). This is such that the `CALL` after the with-block can
# be simulated.
state.push(method)
state.push(tos)
# Record the instruction.
state.append(old_inst, contextmanager=tos, exit_method=method,
block_end=end)
# Insert WITH-block.
state.push_block(
state.make_block(
kind='WITH',
end=end,
)
)
# And fork to force a new block.
state.fork(pc=inst.next)
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
if PYVERSION in ((3, 14), ):
def op_LOAD_COMMON_CONSTANT(self, state, inst):
oparg = inst.arg
if dis._common_constants[oparg] == AssertionError:
name = 'assertion_error'
else:
raise NotImplementedError
res = state.make_temp(name)
state.append(inst, res=res, idx=oparg)
state.push(res)
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
pass
else:
raise NotImplementedError(PYVERSION)
@total_ordering
| TraceRunner |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-chroma/unit_tests/test_destination.py | {
"start": 319,
"end": 3665
} | class ____(unittest.TestCase):
def setUp(self):
self.config = {
"processing": {"text_fields": ["str_col"], "metadata_fields": [], "chunk_size": 1000},
"embedding": {"mode": "openai", "openai_key": "mykey"},
"indexing": {
"auth_method": {"mode": "persistent_client", "path": "./path"},
"collection_name": "test2",
},
}
self.config_model = ConfigModel.parse_obj(self.config)
self.logger = logging.getLogger("airbyte")
@patch("destination_chroma.destination.ChromaIndexer")
@patch("destination_chroma.destination.create_from_config")
def test_check(self, MockedEmbedder, MockedChromaIndexer):
mock_embedder = Mock()
mock_indexer = Mock()
MockedChromaIndexer.return_value = mock_indexer
MockedEmbedder.return_value = mock_embedder
mock_embedder.check.return_value = None
mock_indexer.check.return_value = None
destination = DestinationChroma()
result = destination.check(self.logger, self.config)
self.assertEqual(result.status, Status.SUCCEEDED)
mock_embedder.check.assert_called_once()
mock_indexer.check.assert_called_once()
@patch("destination_chroma.destination.ChromaIndexer")
@patch("destination_chroma.destination.create_from_config")
def test_check_with_errors(self, MockedEmbedder, MockedChromaIndexer):
mock_embedder = Mock()
mock_indexer = Mock()
MockedChromaIndexer.return_value = mock_indexer
MockedEmbedder.return_value = mock_embedder
embedder_error_message = "Embedder Error"
indexer_error_message = "Indexer Error"
mock_embedder.check.return_value = embedder_error_message
mock_indexer.check.return_value = indexer_error_message
destination = DestinationChroma()
result = destination.check(self.logger, self.config)
self.assertEqual(result.status, Status.FAILED)
self.assertEqual(result.message, f"{embedder_error_message}\n{indexer_error_message}")
mock_embedder.check.assert_called_once()
mock_indexer.check.assert_called_once()
@patch("destination_chroma.destination.Writer")
@patch("destination_chroma.destination.ChromaIndexer")
@patch("destination_chroma.destination.create_from_config")
def test_write(self, MockedEmbedder, MockedChromaIndexer, MockedWriter):
mock_embedder = Mock()
mock_indexer = Mock()
mock_writer = Mock()
MockedChromaIndexer.return_value = mock_indexer
MockedWriter.return_value = mock_writer
MockedEmbedder.return_value = mock_embedder
mock_writer.write.return_value = []
configured_catalog = MagicMock()
input_messages = []
destination = DestinationChroma()
list(destination.write(self.config, configured_catalog, input_messages))
MockedWriter.assert_called_once_with(self.config_model.processing, mock_indexer, mock_embedder, batch_size=128, omit_raw_text=False)
mock_writer.write.assert_called_once_with(configured_catalog, input_messages)
def test_spec(self):
destination = DestinationChroma()
result = destination.spec()
self.assertIsInstance(result, ConnectorSpecification)
| TestDestinationChroma |
python | fluentpython__example-code | 05-1class-func/bingocall.py | {
"start": 168,
"end": 544
} | class ____:
def __init__(self, items):
self._items = list(items) # <1>
random.shuffle(self._items) # <2>
def pick(self): # <3>
try:
return self._items.pop()
except IndexError:
raise LookupError('pick from empty BingoCage') # <4>
def __call__(self): # <5>
return self.pick()
# END BINGO
| BingoCage |
python | ray-project__ray | python/ray/serve/tests/unit/test_schema.py | {
"start": 13770,
"end": 22714
} | class ____:
def get_valid_serve_application_schema(self):
return {
"import_path": "module.graph",
"runtime_env": {},
"deployments": [
{
"name": "shallow",
"num_replicas": 2,
"route_prefix": "/shallow",
"max_ongoing_requests": 32,
"user_config": None,
"autoscaling_config": None,
"graceful_shutdown_wait_loop_s": 17,
"graceful_shutdown_timeout_s": 49,
"health_check_period_s": 11,
"health_check_timeout_s": 11,
"ray_actor_options": {
"runtime_env": {
"working_dir": TEST_MODULE_PINNED_URI,
"py_modules": [TEST_DEPLOY_GROUP_PINNED_URI],
},
"num_cpus": 3,
"num_gpus": 4.2,
"memory": 5,
"resources": {"custom_asic": 8},
"accelerator_type": NVIDIA_TESLA_P4,
},
},
{
"name": "deep",
},
],
}
def test_valid_serve_application_schema(self):
# Ensure a valid ServeApplicationSchema can be generated
serve_application_schema = self.get_valid_serve_application_schema()
ServeApplicationSchema.parse_obj(serve_application_schema)
def test_extra_fields_invalid_serve_application_schema(self):
# Undefined fields should be forbidden in the schema
serve_application_schema = self.get_valid_serve_application_schema()
# Schema should be createable with valid fields
ServeApplicationSchema.parse_obj(serve_application_schema)
# Schema should NOT raise error when extra field is included
serve_application_schema["extra_field"] = None
ServeApplicationSchema.parse_obj(serve_application_schema)
@pytest.mark.parametrize("env", get_valid_runtime_envs())
def test_serve_application_valid_runtime_env(self, env):
# Test valid runtime_env configurations
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["runtime_env"] = env
original_runtime_env = copy.deepcopy(env)
schema = ServeApplicationSchema.parse_obj(serve_application_schema)
# Make sure runtime environment is unchanged by the validation
assert schema.runtime_env == original_runtime_env
@pytest.mark.parametrize("env", get_invalid_runtime_envs())
def test_serve_application_invalid_runtime_env(self, env):
# Test invalid runtime_env configurations
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["runtime_env"] = env
with pytest.raises(ValueError):
ServeApplicationSchema.parse_obj(serve_application_schema)
# By default, runtime_envs with local URIs should be rejected.
with pytest.raises(ValueError):
ServeApplicationSchema.parse_obj(serve_application_schema)
@pytest.mark.parametrize("path", get_valid_import_paths())
def test_serve_application_valid_import_path(self, path):
# Test valid import path formats
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["import_path"] = path
ServeApplicationSchema.parse_obj(serve_application_schema)
@pytest.mark.parametrize("path", get_invalid_import_paths())
def test_serve_application_invalid_import_path(self, path):
# Test invalid import path formats
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["import_path"] = path
with pytest.raises(ValidationError):
ServeApplicationSchema.parse_obj(serve_application_schema)
def test_serve_application_import_path_required(self):
# If no import path is specified, this should not parse successfully
with pytest.raises(ValidationError):
ServeApplicationSchema.parse_obj({"host": "127.0.0.1", "port": 8000})
def test_external_scaler_enabled_defaults_to_false(self):
# Ensure external_scaler_enabled defaults to False
serve_application_schema = self.get_valid_serve_application_schema()
schema = ServeApplicationSchema.parse_obj(serve_application_schema)
assert schema.external_scaler_enabled is False
def test_external_scaler_enabled_with_fixed_replicas(self):
# external_scaler_enabled=True should work with fixed num_replicas
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["external_scaler_enabled"] = True
serve_application_schema["deployments"] = [
{
"name": "deployment1",
"num_replicas": 5,
},
{
"name": "deployment2",
"num_replicas": 3,
},
]
# This should parse successfully
schema = ServeApplicationSchema.parse_obj(serve_application_schema)
assert schema.external_scaler_enabled is True
def test_external_scaler_enabled_conflicts_with_autoscaling(self):
# external_scaler_enabled=True should conflict with autoscaling_config
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["external_scaler_enabled"] = True
serve_application_schema["deployments"] = [
{
"name": "deployment1",
"num_replicas": None,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"target_ongoing_requests": 5,
},
},
]
# This should raise a validation error
with pytest.raises(ValueError) as exc_info:
ServeApplicationSchema.parse_obj(serve_application_schema)
error_message = str(exc_info.value)
assert "external_scaler_enabled is set to True" in error_message
assert "deployment1" in error_message
def test_external_scaler_enabled_conflicts_with_multiple_deployments(self):
# Test that validation catches multiple deployments with autoscaling
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["external_scaler_enabled"] = True
serve_application_schema["deployments"] = [
{
"name": "deployment1",
"num_replicas": 5, # Fixed replicas - OK
},
{
"name": "deployment2",
"num_replicas": None,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"target_ongoing_requests": 5,
},
},
{
"name": "deployment3",
"autoscaling_config": {
"min_replicas": 2,
"max_replicas": 20,
},
},
]
# This should raise a validation error mentioning both problematic deployments
with pytest.raises(ValueError) as exc_info:
ServeApplicationSchema.parse_obj(serve_application_schema)
error_message = str(exc_info.value)
assert "external_scaler_enabled is set to True" in error_message
assert "deployment2" in error_message
assert "deployment3" in error_message
# deployment1 should not be mentioned since it doesn't have autoscaling
assert (
"deployment1" not in error_message or '"deployment1"' not in error_message
)
def test_external_scaler_enabled_with_num_replicas_auto(self):
# external_scaler_enabled=True with num_replicas="auto" should conflict
# since "auto" implies autoscaling
serve_application_schema = self.get_valid_serve_application_schema()
serve_application_schema["external_scaler_enabled"] = True
serve_application_schema["deployments"] = [
{
"name": "deployment1",
"num_replicas": "auto",
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
},
},
]
# This should raise a validation error
with pytest.raises(ValueError) as exc_info:
ServeApplicationSchema.parse_obj(serve_application_schema)
error_message = str(exc_info.value)
assert "external_scaler_enabled is set to True" in error_message
assert "deployment1" in error_message
| TestServeApplicationSchema |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/gcs.py | {
"start": 12260,
"end": 18984
} | class ____(GCSPrefixBlobTrigger):
"""
Return Trigger Event if the inactivity period has passed with no increase in the number of objects.
:param bucket: The Google Cloud Storage bucket where the objects are expected.
:param prefix: The name of the prefix to check in the Google cloud storage bucket.
:param poke_interval: polling period in seconds to check
:param inactivity_period: The total seconds of inactivity to designate
an upload session is over. Note, this mechanism is not real time and
this operator may not return until a interval after this period
has passed with no additional objects sensed.
:param min_objects: The minimum number of objects needed for upload session
to be considered valid.
:param previous_objects: The set of object ids found during the last poke.
:param allow_delete: Should this sensor consider objects being deleted
between intervals valid behavior. If true a warning message will be logged
when this happens. If false an error will be raised.
:param google_cloud_conn_id: The connection ID to use when connecting
to Google Cloud Storage.
"""
def __init__(
self,
bucket: str,
prefix: str,
poke_interval: float,
google_cloud_conn_id: str,
hook_params: dict[str, Any],
inactivity_period: float = 60 * 60,
min_objects: int = 1,
previous_objects: set[str] | None = None,
allow_delete: bool = True,
):
super().__init__(
bucket=bucket,
prefix=prefix,
poke_interval=poke_interval,
google_cloud_conn_id=google_cloud_conn_id,
hook_params=hook_params,
)
self.inactivity_period = inactivity_period
self.min_objects = min_objects
self.previous_objects = previous_objects or set()
self.inactivity_seconds = 0.0
self.allow_delete = allow_delete
self.last_activity_time: datetime | None = None
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize GCSUploadSessionTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.gcs.GCSUploadSessionTrigger",
{
"bucket": self.bucket,
"prefix": self.prefix,
"poke_interval": self.poke_interval,
"google_cloud_conn_id": self.google_cloud_conn_id,
"hook_params": self.hook_params,
"inactivity_period": self.inactivity_period,
"min_objects": self.min_objects,
"previous_objects": self.previous_objects,
"allow_delete": self.allow_delete,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until no new files or deleted files in list blob for the inactivity_period."""
try:
hook = self._get_async_hook()
while True:
list_blobs = await self._list_blobs_with_prefix(
hook=hook, bucket_name=self.bucket, prefix=self.prefix
)
res = self._is_bucket_updated(set(list_blobs))
if res["status"] in ("success", "error"):
yield TriggerEvent(res)
return
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
def _get_time(self) -> datetime:
"""
Get current local date and time.
This is just a wrapper of datetime.datetime.now to simplify mocking in the unittests.
"""
return datetime.now()
def _is_bucket_updated(self, current_objects: set[str]) -> dict[str, str]:
"""
Check whether new objects have been uploaded and the inactivity_period has passed; update the state.
:param current_objects: set of object ids in bucket during last check.
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next check interval.
self.log.info(
"New objects found at %s resetting last_activity_time.",
os.path.join(self.bucket, self.prefix),
)
self.log.debug("New objects: %s", "\n".join(current_objects - self.previous_objects))
self.last_activity_time = self._get_time()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return {"status": "pending"}
if self.previous_objects - current_objects:
# During the last interval check objects were deleted.
if self.allow_delete:
self.previous_objects = current_objects
self.last_activity_time = self._get_time()
self.log.warning(
"%s Objects were deleted during the last interval."
" Updating the file counter and resetting last_activity_time.",
self.previous_objects - current_objects,
)
return {"status": "pending"}
return {
"status": "error",
"message": "Illegal behavior: objects were deleted in between check intervals",
}
if self.last_activity_time:
self.inactivity_seconds = (self._get_time() - self.last_activity_time).total_seconds()
else:
# Handles the first check where last inactivity time is None.
self.last_activity_time = self._get_time()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket, self.prefix)
if current_num_objects >= self.min_objects:
success_message = (
"SUCCESS: Sensor found %s objects at %s. Waited at least %s "
"seconds, with no new objects dropped."
)
self.log.info(success_message, current_num_objects, path, self.inactivity_seconds)
return {
"status": "success",
"message": success_message % (current_num_objects, path, self.inactivity_seconds),
}
error_message = "FAILURE: Inactivity Period passed, not enough objects found in %s"
self.log.error(error_message, path)
return {"status": "error", "message": error_message % path}
return {"status": "pending"}
| GCSUploadSessionTrigger |
python | huggingface__transformers | tests/models/visual_bert/test_modeling_visual_bert.py | {
"start": 11500,
"end": 23578
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
VisualBertModel,
VisualBertForMultipleChoice,
VisualBertForVisualReasoning,
VisualBertForRegionToPhraseAlignment,
VisualBertForQuestionAnswering,
VisualBertForPreTraining,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {}
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class == VisualBertForMultipleChoice:
for key in inputs_dict:
value = inputs_dict[key]
if isinstance(value, torch.Tensor) and value.ndim > 1:
if key != "visual_embeds":
inputs_dict[key] = (
inputs_dict[key].unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous()
)
else:
inputs_dict[key] = (
inputs_dict[key]
.unsqueeze(1)
.expand(-1, self.model_tester.num_choices, -1, self.model_tester.visual_embedding_dim)
.contiguous()
)
elif model_class == VisualBertForRegionToPhraseAlignment:
total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
batch_size = self.model_tester.batch_size
inputs_dict["region_to_phrase_position"] = torch.zeros(
(batch_size, total_length),
dtype=torch.long,
device=torch_device,
)
if return_labels:
if model_class == VisualBertForMultipleChoice:
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class == VisualBertForPreTraining:
total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
batch_size = self.model_tester.batch_size
inputs_dict["labels"] = torch.zeros(
(batch_size, total_length),
dtype=torch.long,
device=torch_device,
)
inputs_dict["sentence_image_labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
# Flickr expects float labels
elif model_class == VisualBertForRegionToPhraseAlignment:
batch_size = self.model_tester.batch_size
total_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
inputs_dict["labels"] = torch.ones(
(
batch_size,
total_length,
self.model_tester.visual_seq_length,
),
dtype=torch.float,
device=torch_device,
)
# VQA expects float labels
elif model_class == VisualBertForQuestionAnswering:
inputs_dict["labels"] = torch.ones(
(self.model_tester.batch_size, self.model_tester.num_labels),
dtype=torch.float,
device=torch_device,
)
elif model_class == VisualBertForVisualReasoning:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = VisualBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=VisualBertConfig, hidden_size=37)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
visual_seq_len = getattr(self.model_tester, "visual_seq_length", None)
encoder_seq_length = (seq_len if seq_len is not None else 0) + (
visual_seq_len if visual_seq_len is not None else 0
)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
chunk_length = getattr(self.model_tester, "chunk_length", None)
if chunk_length is not None and hasattr(self.model_tester, "num_hashes"):
encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
if chunk_length is not None:
self.assertListEqual(
list(self_attentions[0].shape[-4:]),
[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length],
)
else:
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length + self.model_tester.visual_seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_pretraining()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_model_for_vqa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_vqa()
self.model_tester.create_and_check_for_vqa(*config_and_inputs)
def test_model_for_nlvr(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_nlvr()
self.model_tester.create_and_check_for_nlvr(*config_and_inputs)
def test_model_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_multiple_choice()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_model_for_flickr(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr()
self.model_tester.create_and_check_for_flickr(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "uclanlp/visualbert-vqa"
model = VisualBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@require_torch
| VisualBertModelTest |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 1244,
"end": 1408
} | class ____(factory.django.DjangoModelFactory):
class Meta:
model = models.StandardModel
foo = factory.Sequence(lambda n: "foo%d" % n)
| StandardFactory |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/via_attribute_name.py | {
"start": 262,
"end": 839
} | class ____:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def test_tito_attribute_x():
c = TitoAttributes(**_test_source())
_test_sink(c.x)
def test_tito_attribute_y():
c = TitoAttributes(**_test_source())
_test_sink(c.y)
def test_tito_attribute_z_with_tag():
c = TitoAttributes(**_test_source())
_test_sink(c.z)
def test_tito_attribute_join():
c = TitoAttributes(**_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
@dataclass
| TitoAttributes |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable_py312.py | {
"start": 378,
"end": 440
} | class ____[T](Parent[T, S]): # [undefined-variable]
...
| Child |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 26292,
"end": 26407
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Neon'
strategy_class = FileStrategy
| NeonHostname |
python | pypa__pipenv | pipenv/vendor/tomlkit/items.py | {
"start": 27284,
"end": 28162
} | class ____:
__slots__ = ("value", "indent", "comma", "comment")
def __init__(
self,
value: Item | None = None,
indent: Whitespace | None = None,
comma: Whitespace | None = None,
comment: Comment | None = None,
) -> None:
self.value = value
self.indent = indent
self.comma = comma
self.comment = comment
def __iter__(self) -> Iterator[Item]:
return filter(
lambda x: x is not None, (self.indent, self.value, self.comma, self.comment)
)
def __repr__(self) -> str:
return repr(tuple(self))
def is_whitespace(self) -> bool:
return self.value is None and self.comment is None
def __bool__(self) -> bool:
try:
next(iter(self))
except StopIteration:
return False
return True
| _ArrayItemGroup |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 7804,
"end": 12603
} | class ____(greentest.TestCase):
def test_minimal_id(self):
g = gevent.spawn(lambda: 1)
self.assertGreaterEqual(g.minimal_ident, 0)
self.assertGreaterEqual(g.parent.minimal_ident, 0)
g.join() # don't leave dangling, breaks the leak checks
def test_wait_noerrors(self):
x = gevent.spawn(lambda: 1)
y = gevent.spawn(lambda: 2)
z = gevent.spawn(lambda: 3)
gevent.joinall([x, y, z], raise_error=True)
self.assertEqual([x.value, y.value, z.value], [1, 2, 3])
e = AsyncResult()
x.link(e)
self.assertEqual(e.get(), 1)
x.unlink(e)
e = AsyncResult()
x.link(e)
self.assertEqual(e.get(), 1)
@ignores_leakcheck
def test_wait_error(self):
def x():
sleep(DELAY)
return 1
x = gevent.spawn(x)
y = gevent.spawn(lambda: getcurrent().throw(ExpectedError('test_wait_error')))
self.assertRaises(ExpectedError, gevent.joinall, [x, y], raise_error=True)
self.assertRaises(ExpectedError, gevent.joinall, [y], raise_error=True)
x.join()
@ignores_leakcheck
def test_joinall_exception_order(self):
# if there're several exceptions raised, the earliest one must be raised by joinall
def first():
sleep(0.1)
raise ExpectedError('first')
a = gevent.spawn(first)
b = gevent.spawn(lambda: getcurrent().throw(ExpectedError('second')))
with self.assertRaisesRegex(ExpectedError, 'second'):
gevent.joinall([a, b], raise_error=True)
gevent.joinall([a, b])
def test_joinall_count_raise_error(self):
# When joinall is asked not to raise an error, the 'count' param still
# works.
def raises_but_ignored():
raise ExpectedError("count")
def sleep_forever():
while True:
sleep(0.1)
sleeper = gevent.spawn(sleep_forever)
raiser = gevent.spawn(raises_but_ignored)
gevent.joinall([sleeper, raiser], raise_error=False, count=1)
self.assert_greenlet_ready(raiser)
self.assert_greenlet_not_ready(sleeper)
# Clean up our mess
sleeper.kill()
self.assert_greenlet_ready(sleeper)
def test_multiple_listeners_error(self):
# if there was an error while calling a callback
# it should not prevent the other listeners from being called
# also, all of the errors should be logged, check the output
# manually that they are
p = gevent.spawn(lambda: 5)
results = []
def listener1(*_args):
results.append(10)
raise ExpectedError('listener1')
def listener2(*_args):
results.append(20)
raise ExpectedError('listener2')
def listener3(*_args):
raise ExpectedError('listener3')
p.link(listener1)
p.link(listener2)
p.link(listener3)
sleep(DELAY * 10)
self.assertIn(results, [[10, 20], [20, 10]])
p = gevent.spawn(lambda: getcurrent().throw(ExpectedError('test_multiple_listeners_error')))
results = []
p.link(listener1)
p.link(listener2)
p.link(listener3)
sleep(DELAY * 10)
self.assertIn(results, [[10, 20], [20, 10]])
class Results(object):
def __init__(self):
self.results = []
def listener1(self, p):
p.unlink(self.listener2)
self.results.append(5)
raise ExpectedError('listener1')
def listener2(self, p):
p.unlink(self.listener1)
self.results.append(5)
raise ExpectedError('listener2')
def listener3(self, _p):
raise ExpectedError('listener3')
def _test_multiple_listeners_error_unlink(self, _p, link):
# notification must not happen after unlink even
# though notification process has been already started
results = self.Results()
link(results.listener1)
link(results.listener2)
link(results.listener3)
sleep(DELAY * 10)
self.assertEqual([5], results.results)
def test_multiple_listeners_error_unlink_Greenlet_link(self):
p = gevent.spawn(lambda: 5)
self._test_multiple_listeners_error_unlink(p, p.link)
p.kill()
def test_multiple_listeners_error_unlink_Greenlet_rawlink(self):
p = gevent.spawn(lambda: 5)
self._test_multiple_listeners_error_unlink(p, p.rawlink)
def test_multiple_listeners_error_unlink_AsyncResult_rawlink(self):
e = AsyncResult()
gevent.spawn(e.set, 6)
self._test_multiple_listeners_error_unlink(e, e.rawlink)
def dummy_test_func(*_args):
pass
| TestStuff |
python | plotly__plotly.py | plotly/missing_anywidget.py | {
"start": 40,
"end": 512
} | class ____(BaseFigure):
"""
FigureWidget stand-in for use when anywidget is not installed. The only purpose
of this class is to provide something to import as
`plotly.graph_objs.FigureWidget` when anywidget is not installed. This class
simply raises an informative error message when the constructor is called
"""
def __init__(self, *args, **kwargs):
raise ImportError("Please install anywidget to use the FigureWidget class")
| FigureWidget |
python | pydata__xarray | xarray/tests/test_conventions.py | {
"start": 4135,
"end": 10732
} | class ____:
def test_incompatible_attributes(self) -> None:
invalid_vars = [
Variable(
["t"], pd.date_range("2000-01-01", periods=3), {"units": "foobar"}
),
Variable(["t"], pd.to_timedelta(["1 day"]), {"units": "foobar"}), # type: ignore[arg-type, unused-ignore]
Variable(["t"], [0, 1, 2], {"add_offset": 0}, {"add_offset": 2}),
Variable(["t"], [0, 1, 2], {"_FillValue": 0}, {"_FillValue": 2}),
]
for var in invalid_vars:
with pytest.raises(ValueError):
conventions.encode_cf_variable(var)
def test_missing_fillvalue(self) -> None:
v = Variable(["x"], np.array([np.nan, 1, 2, 3]))
v.encoding = {"dtype": "int16"}
# Expect both the SerializationWarning and the RuntimeWarning from numpy
with pytest.warns(Warning) as record:
conventions.encode_cf_variable(v)
# Check we got the expected warnings
warning_messages = [str(w.message) for w in record]
assert any(
"floating point data as an integer" in msg for msg in warning_messages
)
assert any(
"invalid value encountered in cast" in msg for msg in warning_messages
)
def test_multidimensional_coordinates(self) -> None:
# regression test for GH1763
# Set up test case with coordinates that have overlapping (but not
# identical) dimensions.
zeros1 = np.zeros((1, 5, 3))
zeros2 = np.zeros((1, 6, 3))
zeros3 = np.zeros((1, 5, 4))
orig = Dataset(
{
"lon1": (["x1", "y1"], zeros1.squeeze(0), {}),
"lon2": (["x2", "y1"], zeros2.squeeze(0), {}),
"lon3": (["x1", "y2"], zeros3.squeeze(0), {}),
"lat1": (["x1", "y1"], zeros1.squeeze(0), {}),
"lat2": (["x2", "y1"], zeros2.squeeze(0), {}),
"lat3": (["x1", "y2"], zeros3.squeeze(0), {}),
"foo1": (["time", "x1", "y1"], zeros1, {"coordinates": "lon1 lat1"}),
"foo2": (["time", "x2", "y1"], zeros2, {"coordinates": "lon2 lat2"}),
"foo3": (["time", "x1", "y2"], zeros3, {"coordinates": "lon3 lat3"}),
"time": ("time", [0.0], {"units": "hours since 2017-01-01"}),
}
)
orig = conventions.decode_cf(orig)
# Encode the coordinates, as they would be in a netCDF output file.
enc, attrs = conventions.encode_dataset_coordinates(orig)
# Make sure we have the right coordinates for each variable.
foo1_coords = enc["foo1"].attrs.get("coordinates", "")
foo2_coords = enc["foo2"].attrs.get("coordinates", "")
foo3_coords = enc["foo3"].attrs.get("coordinates", "")
assert foo1_coords == "lon1 lat1"
assert foo2_coords == "lon2 lat2"
assert foo3_coords == "lon3 lat3"
# Should not have any global coordinates.
assert "coordinates" not in attrs
def test_var_with_coord_attr(self) -> None:
# regression test for GH6310
# don't overwrite user-defined "coordinates" attributes
orig = Dataset(
{"values": ("time", np.zeros(2), {"coordinates": "time lon lat"})},
coords={
"time": ("time", np.zeros(2)),
"lat": ("time", np.zeros(2)),
"lon": ("time", np.zeros(2)),
},
)
# Encode the coordinates, as they would be in a netCDF output file.
enc, attrs = conventions.encode_dataset_coordinates(orig)
# Make sure we have the right coordinates for each variable.
values_coords = enc["values"].attrs.get("coordinates", "")
assert values_coords == "time lon lat"
# Should not have any global coordinates.
assert "coordinates" not in attrs
def test_do_not_overwrite_user_coordinates(self) -> None:
# don't overwrite user-defined "coordinates" encoding
orig = Dataset(
coords={"x": [0, 1, 2], "y": ("x", [5, 6, 7]), "z": ("x", [8, 9, 10])},
data_vars={"a": ("x", [1, 2, 3]), "b": ("x", [3, 5, 6])},
)
orig["a"].encoding["coordinates"] = "y"
orig["b"].encoding["coordinates"] = "z"
enc, _ = conventions.encode_dataset_coordinates(orig)
assert enc["a"].attrs["coordinates"] == "y"
assert enc["b"].attrs["coordinates"] == "z"
orig["a"].attrs["coordinates"] = "foo"
with pytest.raises(ValueError, match=r"'coordinates' found in both attrs"):
conventions.encode_dataset_coordinates(orig)
def test_deterministic_coords_encoding(self) -> None:
# the coordinates attribute is sorted when set by xarray.conventions ...
# ... on a variable's coordinates attribute
ds = Dataset({"foo": 0}, coords={"baz": 0, "bar": 0})
vars, attrs = conventions.encode_dataset_coordinates(ds)
assert vars["foo"].attrs["coordinates"] == "bar baz"
assert attrs.get("coordinates") is None
# ... on the global coordinates attribute
ds = ds.drop_vars("foo")
vars, attrs = conventions.encode_dataset_coordinates(ds)
assert attrs["coordinates"] == "bar baz"
def test_emit_coordinates_attribute_in_attrs(self) -> None:
orig = Dataset(
{"a": 1, "b": 1},
coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)},
)
orig["a"].attrs["coordinates"] = None
enc, _ = conventions.encode_dataset_coordinates(orig)
# check coordinate attribute emitted for 'a'
assert "coordinates" not in enc["a"].attrs
assert "coordinates" not in enc["a"].encoding
# check coordinate attribute not emitted for 'b'
assert enc["b"].attrs.get("coordinates") == "t"
assert "coordinates" not in enc["b"].encoding
def test_emit_coordinates_attribute_in_encoding(self) -> None:
orig = Dataset(
{"a": 1, "b": 1},
coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)},
)
orig["a"].encoding["coordinates"] = None
enc, _ = conventions.encode_dataset_coordinates(orig)
# check coordinate attribute emitted for 'a'
assert "coordinates" not in enc["a"].attrs
assert "coordinates" not in enc["a"].encoding
# check coordinate attribute not emitted for 'b'
assert enc["b"].attrs.get("coordinates") == "t"
assert "coordinates" not in enc["b"].encoding
@requires_cftime
| TestEncodeCFVariable |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/plugins/databricks_workflow.py | {
"start": 18980,
"end": 20020
} | class ____(AirflowPlugin):
"""
Databricks Workflows plugin for Airflow.
.. seealso::
For more information on how to use this plugin, take a look at the guide:
:ref:`howto/plugin:DatabricksWorkflowPlugin`
"""
name = "databricks_workflow"
# Conditionally set operator_extra_links based on Airflow version
if AIRFLOW_V_3_0_PLUS:
# In Airflow 3, disable the links for repair functionality until it is figured out it can be supported
operator_extra_links = [
WorkflowJobRunLink(),
]
else:
# In Airflow 2.x, keep all links including repair all failed tasks
operator_extra_links = [
WorkflowJobRepairAllFailedLink(),
WorkflowJobRepairSingleTaskLink(),
WorkflowJobRunLink(),
]
repair_databricks_view = RepairDatabricksTasks()
repair_databricks_package = {
"view": repair_databricks_view,
}
appbuilder_views = [repair_databricks_package]
| DatabricksWorkflowPlugin |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chartsheet01.py | {
"start": 315,
"end": 1429
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chartsheet01.xlsx")
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [79858304, 79860096]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.set_chart(chart)
chartsheet.activate()
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/options/store.py | {
"start": 1699,
"end": 12698
} | class ____:
"""
Abstraction for the Option storage logic that should be driven
by the OptionsManager.
OptionsStore is gooey and raw. It provides no protection over
what goes into the store. It only knows that it's reading/writing
to the right place. If using the OptionsStore directly, it's your
job to do validation of the data. You should probably go through
OptionsManager instead, unless you need raw access to something.
"""
def __init__(self, cache=None, ttl=None):
self.cache = cache
self.ttl = ttl
self.flush_local_cache()
@property
def model(self):
return self.model_cls()
@classmethod
def model_cls(cls):
from sentry.models.options import ControlOption, Option
from sentry.silo.base import SiloMode
if SiloMode.get_current_mode() == SiloMode.CONTROL:
return ControlOption
return Option
def get(self, key, silent=False):
"""
Fetches a value from the options store.
"""
result = self.get_cache(key, silent=silent)
if result is not None:
return result
should_log = random() < LOGGING_SAMPLE_RATE
if should_log:
# Log some percentage of our cache misses for option retrieval to
# help triage excessive queries against the store.
logger.info(
"sentry_options_store.cache_miss",
extra={"key": key.name, "cache_configured": self.cache is not None},
)
result = self.get_store(key, silent=silent)
if result is not None:
return result
# As a last ditch effort, let's hope we have a key
# in local cache that's possibly stale
return self.get_local_cache(key, force_grace=True)
def get_cache(self, key, silent=False):
"""
First check against our local in-process cache, falling
back to the network cache.
"""
assert (
self.cache is not None
), f"Option '{key.name}' requested before cache initialization, which could result in excessive store queries"
value = self.get_local_cache(key)
if value is not None:
return value
if self.cache is None:
return None
cache_key = key.cache_key
try:
value = self.cache.get(cache_key)
except Exception:
if not silent:
logger.warning(CACHE_FETCH_ERR, key.name, extra={"key": key.name}, exc_info=True)
value = None
if value is not None and key.ttl > 0:
self._local_cache[cache_key] = _make_cache_value(key, value)
return value
def get_local_cache(self, key, force_grace=False):
"""
Attempt to fetch a key out of the local cache.
If the key exists, but is beyond expiration, we only
return it if grace=True. This forces the key to be returned
in a disaster scenario as long as we're still holding onto it.
This allows the OptionStore to pave over potential network hiccups
by returning a stale value.
"""
try:
value, expires, grace = self._local_cache[key.cache_key]
except KeyError:
return None
now = int(time())
# Key is within normal expiry window, so just return it
if now < expires:
return value
# If we're able to accept within grace window, return it
if force_grace and now < grace:
return value
# Let's clean up values if we're beyond grace.
if now > grace:
try:
del self._local_cache[key.cache_key]
except KeyError:
# This could only exist in a race condition
# where another thread has already deleted this key,
# but we'll guard ourselves against it Justin Case.
# In this case, it's also possible that another thread
# has updated the value at this key, causing us to evict
# it prematurely. This isn't ideal, but not terrible
# since I don't want to introduce locking to prevent this.
# Even if it did happen, the consequence is just another
# network hop.
pass
# If we're outside the grace window, even if we ask for it
# in grace, too bad. The value is considered bad.
return None
def get_store(self, key, silent=False):
"""
Attempt to fetch value from the database. If successful,
also set it back in the cache.
Returns None in both cases, if the key doesn't actually exist,
or if we errored fetching it.
NOTE: This behavior should probably be improved to differentiate
between a miss vs error, but not worth it now since the value
is limited at the moment.
"""
try:
# NOTE: To greatly reduce test bugs due to cache leakage, we don't enforce cross db constraints
# because in practice the option query is consistent with the process level silo mode.
# If you do change the way the option class model is picked, keep in mind it may not be deeply
# tested due to the core assumption it should be stable per process in practice.
with in_test_hide_transaction_boundary():
value = self.model.objects.get(key=key.name).value
except (self.model.DoesNotExist, ProgrammingError, OperationalError):
value = None
except Exception:
if settings.SENTRY_OPTIONS_COMPLAIN_ON_ERRORS:
raise
elif not silent:
logger.exception("option.failed-lookup", extra={"key": key.name})
value = None
else:
# we only attempt to populate the cache if we were previously
# able to successfully talk to the backend
# NOTE: There is definitely a race condition here between updating
# the store and the cache
try:
self.set_cache(key, value)
except Exception:
if not silent:
logger.warning(
CACHE_UPDATE_ERR, key.name, extra={"key": key.name}, exc_info=True
)
return value
def get_last_update_channel(self, key) -> UpdateChannel | None:
"""
Gets how the option was last updated to check for drift.
"""
try:
option = self.model.objects.get(key=key.name)
except self.model.DoesNotExist:
return None
return UpdateChannel(option.last_updated_by)
def set(self, key, value, channel: UpdateChannel):
"""
Store a value in the option store. Value must get persisted to database first,
then attempt caches. If it fails database, the entire operation blows up.
If cache fails, we ignore silently since it'll get repaired later by sync_options.
A boolean is returned to indicate if the network cache was set successfully.
"""
assert self.cache is not None, "cache must be configured before mutating options"
self.set_store(key, value, channel)
return self.set_cache(key, value)
def set_store(self, key, value, channel: UpdateChannel):
self.model.objects.update_or_create(
key=key.name,
defaults={
"value": value,
"last_updated": timezone.now(),
"last_updated_by": channel.value,
},
)
def set_cache(self, key, value):
if self.cache is None:
return None
cache_key = key.cache_key
if key.ttl > 0:
self._local_cache[cache_key] = _make_cache_value(key, value)
try:
self.cache.set(cache_key, value, self.ttl)
return True
except Exception:
logger.warning(CACHE_UPDATE_ERR, key.name, extra={"key": key.name}, exc_info=True)
return False
def delete(self, key):
"""
Remove key out of option stores. This operation must succeed on the
database first. If database fails, an exception is raised.
If database succeeds, caches are then allowed to fail silently.
A boolean is returned to indicate if the network deletion succeeds.
"""
assert self.cache is not None, "cache must be configured before mutating options"
self.delete_store(key)
return self.delete_cache(key)
def delete_store(self, key):
self.model.objects.filter(key=key.name).delete()
def delete_cache(self, key):
cache_key = key.cache_key
try:
del self._local_cache[cache_key]
except KeyError:
pass
try:
self.cache.delete(cache_key)
return True
except Exception:
logger.warning(CACHE_UPDATE_ERR, key.name, extra={"key": key.name}, exc_info=True)
return False
def clean_local_cache(self):
"""
Iterate over our local cache items, and
remove the keys that are beyond their grace time.
"""
to_expire = []
now = int(time())
try:
for k, (_, _, grace) in self._local_cache.items():
if now > grace:
to_expire.append(k)
except RuntimeError:
# It's possible for the dictionary to be mutated in another thread
# while iterating, but this case is rare, so instead of making a
# copy and iterating that, it's more efficient to just let it fail
# gracefully. It'll just get re-run later.
return
for k in to_expire:
try:
del self._local_cache[k]
except KeyError:
# This could only exist in a race condition
# where another thread has already deleted this key,
# but we'll guard ourselves against it Justin Case.
pass
def flush_local_cache(self):
"""
Empty store's local in-process cache.
"""
self._local_cache = {}
def maybe_clean_local_cache(self, **kwargs):
# Periodically force an expire on the local cache.
# This cleanup is purely to keep memory low and garbage collect
# old values. It's not required to run to keep things consistent.
# Internally, if an option is fetched and it's expired, it gets
# evicted immediately. This is purely for options that haven't
# been fetched since they've expired.
if not self._local_cache:
return
if random() < 0.25:
self.clean_local_cache()
def close(self) -> None:
self.clean_local_cache()
def set_cache_impl(self, cache) -> None:
self.cache = cache
| OptionsStore |
python | pymupdf__PyMuPDF | pipcl.py | {
"start": 1312,
"end": 93264
} | class ____:
'''
Our constructor takes a definition of a Python package similar to that
passed to `distutils.core.setup()` or `setuptools.setup()` (name, version,
summary etc) plus callbacks for building, getting a list of sdist
filenames, and cleaning.
We provide methods that can be used to implement a Python package's
`setup.py` supporting PEP-517.
We also support basic command line handling for use
with a legacy (pre-PEP-517) pip, as implemented
by legacy distutils/setuptools and described in:
https://pip.pypa.io/en/stable/reference/build-system/setup-py/
The file pyproject.toml must exist; this is checked if/when fn_build() is
called.
Here is a `doctest` example of using pipcl to create a SWIG extension
module. Requires `swig`.
Create an empty test directory:
>>> import os
>>> import shutil
>>> shutil.rmtree('pipcl_test', ignore_errors=1)
>>> os.mkdir('pipcl_test')
Create a `setup.py` which uses `pipcl` to define an extension module.
>>> import textwrap
>>> with open('pipcl_test/setup.py', 'w') as f:
... _ = f.write(textwrap.dedent("""
... import sys
... import pipcl
...
... def build():
... so_leaf = pipcl.build_extension(
... name = 'foo',
... path_i = 'foo.i',
... outdir = 'build',
... )
... return [
... ('build/foo.py', 'foo/__init__.py'),
... ('cli.py', 'foo/__main__.py'),
... (f'build/{so_leaf}', f'foo/'),
... ('README', '$dist-info/'),
... (b'Hello world', 'foo/hw.txt'),
... ]
...
... def sdist():
... return [
... 'foo.i',
... 'bar.i',
... 'setup.py',
... 'pipcl.py',
... 'wdev.py',
... 'README',
... (b'Hello word2', 'hw2.txt'),
... ]
...
... p = pipcl.Package(
... name = 'foo',
... version = '1.2.3',
... fn_build = build,
... fn_sdist = sdist,
... entry_points = (
... { 'console_scripts': [
... 'foo_cli = foo.__main__:main',
... ],
... }),
... )
...
... build_wheel = p.build_wheel
... build_sdist = p.build_sdist
...
... # Handle old-style setup.py command-line usage:
... if __name__ == '__main__':
... p.handle_argv(sys.argv)
... """))
Create the files required by the above `setup.py` - the SWIG `.i` input
file, the README file, and copies of `pipcl.py` and `wdev.py`.
>>> with open('pipcl_test/foo.i', 'w') as f:
... _ = f.write(textwrap.dedent("""
... %include bar.i
... %{
... #include <stdio.h>
... #include <string.h>
... int bar(const char* text)
... {
... printf("bar(): text: %s\\\\n", text);
... int len = (int) strlen(text);
... printf("bar(): len=%i\\\\n", len);
... fflush(stdout);
... return len;
... }
... %}
... int bar(const char* text);
... """))
>>> with open('pipcl_test/bar.i', 'w') as f:
... _ = f.write( '\\n')
>>> with open('pipcl_test/README', 'w') as f:
... _ = f.write(textwrap.dedent("""
... This is Foo.
... """))
>>> with open('pipcl_test/cli.py', 'w') as f:
... _ = f.write(textwrap.dedent("""
... def main():
... print('pipcl_test:main().')
... if __name__ == '__main__':
... main()
... """))
>>> root = os.path.dirname(__file__)
>>> _ = shutil.copy2(f'{root}/pipcl.py', 'pipcl_test/pipcl.py')
>>> _ = shutil.copy2(f'{root}/wdev.py', 'pipcl_test/wdev.py')
Use `setup.py`'s command-line interface to build and install the extension
module into root `pipcl_test/install`.
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} setup.py --root install install',
... shell=1, check=1)
The actual install directory depends on `sysconfig.get_path('platlib')`:
>>> if windows():
... install_dir = 'pipcl_test/install'
... else:
... install_dir = f'pipcl_test/install/{sysconfig.get_path("platlib").lstrip(os.sep)}'
>>> assert os.path.isfile( f'{install_dir}/foo/__init__.py')
Create a test script which asserts that Python function call `foo.bar(s)`
returns the length of `s`, and run it with `PYTHONPATH` set to the install
directory:
>>> with open('pipcl_test/test.py', 'w') as f:
... _ = f.write(textwrap.dedent("""
... import sys
... import foo
... text = 'hello'
... print(f'test.py: calling foo.bar() with text={text!r}')
... sys.stdout.flush()
... l = foo.bar(text)
... print(f'test.py: foo.bar() returned: {l}')
... assert l == len(text)
... """))
>>> r = subprocess.run(
... f'{sys.executable} pipcl_test/test.py',
... shell=1, check=1, text=1,
... stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
... env=os.environ | dict(PYTHONPATH=install_dir),
... )
>>> print(r.stdout)
test.py: calling foo.bar() with text='hello'
bar(): text: hello
bar(): len=5
test.py: foo.bar() returned: 5
<BLANKLINE>
Check that building sdist and wheel succeeds. For now we don't attempt to
check that the sdist and wheel actually work.
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} setup.py sdist',
... shell=1, check=1)
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} setup.py bdist_wheel',
... shell=1, check=1)
Check that rebuild does nothing.
>>> t0 = os.path.getmtime('pipcl_test/build/foo.py')
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} setup.py bdist_wheel',
... shell=1, check=1)
>>> t = os.path.getmtime('pipcl_test/build/foo.py')
>>> assert t == t0
Check that touching bar.i forces rebuild.
>>> os.utime('pipcl_test/bar.i')
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} setup.py bdist_wheel',
... shell=1, check=1)
>>> t = os.path.getmtime('pipcl_test/build/foo.py')
>>> assert t > t0
Check that touching foo.i.cpp does not run swig, but does recompile/link.
>>> t0 = time.time()
>>> os.utime('pipcl_test/build/foo.i.cpp')
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} setup.py bdist_wheel',
... shell=1, check=1)
>>> assert os.path.getmtime('pipcl_test/build/foo.py') <= t0
>>> so = glob.glob('pipcl_test/build/*.so')
>>> assert len(so) == 1
>>> so = so[0]
>>> assert os.path.getmtime(so) > t0
Check `entry_points` causes creation of command `foo_cli` when we install
from our wheel using pip. [As of 2024-02-24 using pipcl's CLI interface
directly with `setup.py install` does not support entry points.]
>>> print('Creating venv.', file=sys.stderr)
>>> _ = subprocess.run(
... f'cd pipcl_test && {sys.executable} -m venv pylocal',
... shell=1, check=1)
>>> print('Installing from wheel into venv using pip.', file=sys.stderr)
>>> _ = subprocess.run(
... f'. pipcl_test/pylocal/bin/activate && pip install pipcl_test/dist/*.whl',
... shell=1, check=1)
>>> print('Running foo_cli.', file=sys.stderr)
>>> _ = subprocess.run(
... f'. pipcl_test/pylocal/bin/activate && foo_cli',
... shell=1, check=1)
Wheels and sdists
Wheels:
We generate wheels according to:
https://packaging.python.org/specifications/binary-distribution-format/
* `{name}-{version}.dist-info/RECORD` uses sha256 hashes.
* We do not generate other `RECORD*` files such as
`RECORD.jws` or `RECORD.p7s`.
* `{name}-{version}.dist-info/WHEEL` has:
* `Wheel-Version: 1.0`
* `Root-Is-Purelib: false`
* No support for signed wheels.
Sdists:
We generate sdist's according to:
https://packaging.python.org/specifications/source-distribution-format/
'''
def __init__(self,
name,
version,
*,
platform = None,
supported_platform = None,
summary = None,
description = None,
description_content_type = None,
keywords = None,
home_page = None,
download_url = None,
author = None,
author_email = None,
maintainer = None,
maintainer_email = None,
license = None,
classifier = None,
requires_dist = None,
requires_python = None,
requires_external = None,
project_url = None,
provides_extra = None,
entry_points = None,
root = None,
fn_build = None,
fn_clean = None,
fn_sdist = None,
tag_python = None,
tag_abi = None,
tag_platform = None,
py_limited_api = None,
wheel_compression = zipfile.ZIP_DEFLATED,
wheel_compresslevel = None,
):
'''
The initial args before `entry_points` define the
package metadata and closely follow the definitions in:
https://packaging.python.org/specifications/core-metadata/
Args:
name:
Used for metadata `Name`.
A string, the name of the Python package.
version:
Used for metadata `Version`.
A string, the version of the Python package. Also see PEP-440
`Version Identification and Dependency Specification`.
platform:
Used for metadata `Platform`.
A string or list of strings.
supported_platform:
Used for metadata `Supported-Platform`.
A string or list of strings.
summary:
Used for metadata `Summary`.
A string, short description of the package.
description:
Used for metadata `Description`.
A string. If contains newlines, a detailed description of the
package. Otherwise the path of a file containing the detailed
description of the package.
description_content_type:
Used for metadata `Description-Content-Type`.
A string describing markup of `description` arg. For example
`text/markdown; variant=GFM`.
keywords:
Used for metadata `Keywords`.
A string containing comma-separated keywords.
home_page:
Used for metadata `Home-page`.
URL of home page.
download_url:
Used for metadata `Download-URL`.
Where this version can be downloaded from.
author:
Used for metadata `Author`.
Author.
author_email:
Used for metadata `Author-email`.
Author email.
maintainer:
Used for metadata `Maintainer`.
Maintainer.
maintainer_email:
Used for metadata `Maintainer-email`.
Maintainer email.
license:
Used for metadata `License`.
A string containing the license text. Written into metadata
file `COPYING`. Is also written into metadata itself if not
multi-line.
classifier:
Used for metadata `Classifier`.
A string or list of strings. Also see:
* https://pypi.org/pypi?%3Aaction=list_classifiers
* https://pypi.org/classifiers/
requires_dist:
Used for metadata `Requires-Dist`.
A string or list of strings, Python packages required
at runtime. None items are ignored.
requires_python:
Used for metadata `Requires-Python`.
A string or list of strings.
requires_external:
Used for metadata `Requires-External`.
A string or list of strings.
project_url:
Used for metadata `Project-URL`.
A string or list of strings, each of the form: `{name},
{url}`.
provides_extra:
Used for metadata `Provides-Extra`.
A string or list of strings.
entry_points:
String or dict specifying *.dist-info/entry_points.txt, for
example:
```
[console_scripts]
foo_cli = foo.__main__:main
```
or:
{ 'console_scripts': [
'foo_cli = foo.__main__:main',
],
}
See: https://packaging.python.org/en/latest/specifications/entry-points/
root:
Root of package, defaults to current directory.
fn_build:
A function taking no args, or a single `config_settings` dict
arg (as described in PEP-517), that builds the package.
Should return a list of items; each item should be a tuple
`(from_, to_)`, or a single string `path` which is treated as
the tuple `(path, path)`.
`from_` can be a string or a `bytes`. If a string it should
be the path to a file; a relative path is treated as relative
to `root`. If a `bytes` it is the contents of the file to be
added.
`to_` identifies what the file should be called within a wheel
or when installing. If `to_` is empty or `/` we set it to the
leaf of `from_` (`from_` must not be a `bytes`) - i.e. we place
the file in the root directory of the wheel; otherwise if
`to_` ends with `/` the leaf of `from_` is appended to it (and
`from_` must not be a `bytes`).
Initial `$dist-info/` in `_to` is replaced by
`{name}-{version}.dist-info/`; this is useful for license files
etc.
Initial `$data/` in `_to` is replaced by
`{name}-{version}.data/`. We do not enforce particular
subdirectories, instead it is up to `fn_build()` to specify
specific subdirectories such as `purelib`, `headers`,
`scripts`, `data` etc.
If we are building a wheel (e.g. `python setup.py bdist_wheel`,
or PEP-517 pip calls `self.build_wheel()`), we add file `from_`
to the wheel archive with name `to_`.
If we are installing (e.g. `install` command in
the argv passed to `self.handle_argv()`), then
we copy `from_` to `{sitepackages}/{to_}`, where
`sitepackages` is the installation directory, the
default being `sysconfig.get_path('platlib')` e.g.
`myvenv/lib/python3.9/site-packages/`.
When calling this function, we assert that the file
pyproject.toml exists in the current directory. (We do this
here rather than in pipcl.Package's constructor, as otherwise
importing setup.py from non-package-related code could fail.)
fn_clean:
A function taking a single arg `all_` that cleans generated
files. `all_` is true iff `--all` is in argv.
For safety and convenience, can also returns a list of
files/directory paths to be deleted. Relative paths are
interpreted as relative to `root`. All paths are asserted to be
within `root`.
fn_sdist:
A function taking no args, or a single `config_settings` dict
arg (as described in PEP517), that returns a list of items to
be copied into the sdist. The list should be in the same format
as returned by `fn_build`.
It can be convenient to use `pipcl.git_items()`.
The specification for sdists requires that the list contains
`pyproject.toml`; we enforce this with a Python assert.
tag_python:
First element of wheel tag defined in PEP-425. If None we use
`cp{version}`.
For example if code works with any Python version, one can use
'py3'.
tag_abi:
Second element of wheel tag defined in PEP-425. If None we use
`none`.
tag_platform:
Third element of wheel tag defined in PEP-425. Default
is `os.environ('AUDITWHEEL_PLAT')` if set, otherwise
derived from `sysconfig.get_platform()` (was
`setuptools.distutils.util.get_platform(), before that
`distutils.util.get_platform()` as specified in the PEP), e.g.
`openbsd_7_0_amd64`.
For pure python packages use: `tag_platform=any`
py_limited_api:
If true we build wheels that use the Python Limited API. We use
the version of `sys.executable` to define `Py_LIMITED_API` when
compiling extensions, and use ABI tag `abi3` in the wheel name
if argument `tag_abi` is None.
wheel_compression:
Used as `zipfile.ZipFile()`'s `compression` parameter when
creating wheels.
wheel_compresslevel:
Used as `zipfile.ZipFile()`'s `compresslevel` parameter when
creating wheels.
Occurrences of `None` in lists are ignored.
'''
assert name
assert version
def assert_str( v):
if v is not None:
assert isinstance( v, str), f'Not a string: {v!r}'
def assert_str_or_multi( v):
if v is not None:
assert isinstance( v, (str, tuple, list)), f'Not a string, tuple or list: {v!r}'
assert_str( name)
assert_str( version)
assert_str_or_multi( platform)
assert_str_or_multi( supported_platform)
assert_str( summary)
assert_str( description)
assert_str( description_content_type)
assert_str( keywords)
assert_str( home_page)
assert_str( download_url)
assert_str( author)
assert_str( author_email)
assert_str( maintainer)
assert_str( maintainer_email)
assert_str( license)
assert_str_or_multi( classifier)
assert_str_or_multi( requires_dist)
assert_str( requires_python)
assert_str_or_multi( requires_external)
assert_str_or_multi( project_url)
assert_str_or_multi( provides_extra)
assert re.match('^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])\\Z', name, re.IGNORECASE), (
f'Invalid package name'
f' (https://packaging.python.org/en/latest/specifications/name-normalization/)'
f': {name!r}'
)
# https://packaging.python.org/en/latest/specifications/core-metadata/.
assert re.match('([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$', name, re.IGNORECASE), \
f'Bad name: {name!r}'
_assert_version_pep_440(version)
# https://packaging.python.org/en/latest/specifications/binary-distribution-format/
if tag_python:
assert '-' not in tag_python
if tag_abi:
assert '-' not in tag_abi
if tag_platform:
assert '-' not in tag_platform
self.name = name
self.version = version
self.platform = platform
self.supported_platform = supported_platform
self.summary = summary
self.description = description
self.description_content_type = description_content_type
self.keywords = keywords
self.home_page = home_page
self.download_url = download_url
self.author = author
self.author_email = author_email
self.maintainer = maintainer
self.maintainer_email = maintainer_email
self.license = license
self.classifier = classifier
self.requires_dist = requires_dist
self.requires_python = requires_python
self.requires_external = requires_external
self.project_url = project_url
self.provides_extra = provides_extra
self.entry_points = entry_points
self.root = os.path.abspath(root if root else os.getcwd())
self.fn_build = fn_build
self.fn_clean = fn_clean
self.fn_sdist = fn_sdist
self.tag_python_ = tag_python
self.tag_abi_ = tag_abi
self.tag_platform_ = tag_platform
self.py_limited_api = py_limited_api
self.wheel_compression = wheel_compression
self.wheel_compresslevel = wheel_compresslevel
# If true and we are building for graal, we set PIPCL_PYTHON_CONFIG to
# a command that will print includes/libs from graal_py's sysconfig.
#
self.graal_legacy_python_config = True
def build_wheel(self,
wheel_directory,
config_settings=None,
metadata_directory=None,
):
'''
A PEP-517 `build_wheel()` function.
Also called by `handle_argv()` to handle the `bdist_wheel` command.
Returns leafname of generated wheel within `wheel_directory`.
'''
log2(
f' wheel_directory={wheel_directory!r}'
f' config_settings={config_settings!r}'
f' metadata_directory={metadata_directory!r}'
)
if os.environ.get('CIBUILDWHEEL') == '1':
# Don't special-case graal builds when running under cibuildwheel.
pass
elif sys.implementation.name == 'graalpy':
# We build for Graal by building a native Python wheel with Graal
# Python's include paths and library directory. We then rename the
# wheel to contain graal's tag etc.
#
log0(f'### Graal build: deferring to cpython.')
python_native = os.environ.get('PIPCL_GRAAL_PYTHON')
assert python_native, f'Graal build requires that PIPCL_GRAAL_PYTHON is set.'
env_extra = dict(
PIPCL_SYSCONFIG_PATH_include = sysconfig.get_path('include'),
PIPCL_SYSCONFIG_PATH_platinclude = sysconfig.get_path('platinclude'),
PIPCL_SYSCONFIG_CONFIG_VAR_LIBDIR = sysconfig.get_config_var('LIBDIR'),
)
# Tell native build to run pipcl.py itself to get python-config
# information about include paths etc.
if self.graal_legacy_python_config:
env_extra['PIPCL_PYTHON_CONFIG'] = f'{python_native} {os.path.abspath(__file__)} --graal-legacy-python-config'
# Create venv.
venv_name = os.environ.get('PIPCL_GRAAL_NATIVE_VENV')
if venv_name:
log1(f'Graal using pre-existing {venv_name=}')
else:
venv_name = 'venv-pipcl-graal-native'
run(f'{shlex.quote(python_native)} -m venv {venv_name}')
log1(f'Graal using {venv_name=}')
newfiles = NewFiles(f'{wheel_directory}/*.whl')
run(
f'. {venv_name}/bin/activate && python setup.py --dist-dir {shlex.quote(wheel_directory)} bdist_wheel',
env_extra = env_extra,
prefix = f'pipcl.py graal {python_native}: ',
)
wheel = newfiles.get_one()
wheel_leaf = os.path.basename(wheel)
python_major_minor = run(f'{shlex.quote(python_native)} -c "import platform; import sys; sys.stdout.write(str().join(platform.python_version_tuple()[:2]))"', capture=1)
cpabi = f'cp{python_major_minor}-abi3'
assert cpabi in wheel_leaf, f'Expected wheel to be for {cpabi=}, but {wheel=}.'
graalpy_ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
log1(f'{graalpy_ext_suffix=}')
m = re.match(r'\.graalpy(\d+[^\-]*)-(\d+)', graalpy_ext_suffix)
gpver = m[1]
cpver = m[2]
graalpy_wheel_tag = f'graalpy{cpver}-graalpy{gpver}_{cpver}_native'
name = wheel_leaf.replace(cpabi, graalpy_wheel_tag)
destination = f'{wheel_directory}/{name}'
log0(f'### Graal build: copying {wheel=} to {destination=}')
# Copying results in two wheels which appears to confuse pip, showing:
# Found multiple .whl files; unspecified behaviour. Will call build_wheel.
os.rename(wheel, destination)
log1(f'Returning {name=}.')
return name
wheel_name = self.wheel_name()
path = f'{wheel_directory}/{wheel_name}'
# Do a build and get list of files to copy into the wheel.
#
items = list()
if self.fn_build:
items = self._call_fn_build(config_settings)
log2(f'Creating wheel: {path}')
os.makedirs(wheel_directory, exist_ok=True)
record = _Record()
with zipfile.ZipFile(path, 'w', self.wheel_compression, self.wheel_compresslevel) as z:
def add(from_, to_):
if isinstance(from_, str):
z.write(from_, to_)
record.add_file(from_, to_)
elif isinstance(from_, bytes):
z.writestr(to_, from_)
record.add_content(from_, to_)
else:
assert 0
def add_str(content, to_):
add(content.encode('utf8'), to_)
dist_info_dir = self._dist_info_dir()
# Add the files returned by fn_build().
#
for item in items:
from_, (to_abs, to_rel) = self._fromto(item)
add(from_, to_rel)
# Add <name>-<version>.dist-info/WHEEL.
#
add_str(
f'Wheel-Version: 1.0\n'
f'Generator: pipcl\n'
f'Root-Is-Purelib: false\n'
f'Tag: {self.wheel_tag_string()}\n'
,
f'{dist_info_dir}/WHEEL',
)
# Add <name>-<version>.dist-info/METADATA.
#
add_str(self._metainfo(), f'{dist_info_dir}/METADATA')
# Add <name>-<version>.dist-info/COPYING.
if self.license:
add_str(self.license, f'{dist_info_dir}/COPYING')
# Add <name>-<version>.dist-info/entry_points.txt.
entry_points_text = self._entry_points_text()
if entry_points_text:
add_str(entry_points_text, f'{dist_info_dir}/entry_points.txt')
# Update <name>-<version>.dist-info/RECORD. This must be last.
#
z.writestr(f'{dist_info_dir}/RECORD', record.get(f'{dist_info_dir}/RECORD'))
st = os.stat(path)
log1( f'Have created wheel size={st.st_size:,}: {path}')
if g_verbose >= 2:
with zipfile.ZipFile(path, compression=self.wheel_compression) as z:
log2(f'Contents are:')
for zi in sorted(z.infolist(), key=lambda z: z.filename):
log2(f' {zi.file_size: 10,d} {zi.filename}')
return os.path.basename(path)
def build_sdist(self,
sdist_directory,
formats,
config_settings=None,
):
'''
A PEP-517 `build_sdist()` function.
Also called by `handle_argv()` to handle the `sdist` command.
Returns leafname of generated archive within `sdist_directory`.
'''
assert self.fn_sdist, f'fn_sdist() not provided.'
log2(
f' sdist_directory={sdist_directory!r}'
f' formats={formats!r}'
f' config_settings={config_settings!r}'
)
if formats and formats != 'gztar':
raise Exception( f'Unsupported: formats={formats}')
items = list()
if inspect.signature(self.fn_sdist).parameters:
items = self.fn_sdist(config_settings)
else:
items = self.fn_sdist()
prefix = f'{_normalise2(self.name)}-{self.version}'
os.makedirs(sdist_directory, exist_ok=True)
tarpath = f'{sdist_directory}/{prefix}.tar.gz'
log2(f'Creating sdist: {tarpath}')
with tarfile.open(tarpath, 'w:gz') as tar:
names_in_tar = list()
def check_name(name):
if name in names_in_tar:
raise Exception(f'Name specified twice: {name}')
names_in_tar.append(name)
def add(from_, name):
check_name(name)
if isinstance(from_, str):
log2( f'Adding file: {os.path.relpath(from_)} => {name}')
tar.add( from_, f'{prefix}/{name}', recursive=False)
elif isinstance(from_, bytes):
log2( f'Adding: {name}')
ti = tarfile.TarInfo(f'{prefix}/{name}')
ti.size = len(from_)
ti.mtime = time.time()
tar.addfile(ti, io.BytesIO(from_))
else:
assert 0
def add_string(text, name):
textb = text.encode('utf8')
return add(textb, name)
found_pyproject_toml = False
for item in items:
from_, (to_abs, to_rel) = self._fromto(item)
if isinstance(from_, bytes):
add(from_, to_rel)
else:
if from_.startswith(f'{os.path.abspath(sdist_directory)}/'):
# Source files should not be inside <sdist_directory>.
assert 0, f'Path is inside sdist_directory={sdist_directory}: {from_!r}'
assert os.path.exists(from_), f'Path does not exist: {from_!r}'
assert os.path.isfile(from_), f'Path is not a file: {from_!r}'
add(from_, to_rel)
if to_rel == 'pyproject.toml':
found_pyproject_toml = True
assert found_pyproject_toml, f'Cannot create sdist because file not specified: pyproject.toml'
# Always add a PKG-INFO file.
add_string(self._metainfo(), 'PKG-INFO')
if self.license:
if 'COPYING' in names_in_tar:
log2(f'Not writing .license because file already in sdist: COPYING')
else:
add_string(self.license, 'COPYING')
log1( f'Have created sdist: {tarpath}')
return os.path.basename(tarpath)
def wheel_tag_string(self):
'''
Returns <tag_python>-<tag_abi>-<tag_platform>.
'''
return f'{self.tag_python()}-{self.tag_abi()}-{self.tag_platform()}'
def tag_python(self):
'''
Get two-digit python version, e.g. 'cp3.8' for python-3.8.6.
'''
if self.tag_python_:
ret = self.tag_python_
else:
ret = 'cp' + ''.join(platform.python_version().split('.')[:2])
assert '-' not in ret
return ret
def tag_abi(self):
'''
ABI tag.
'''
if self.tag_abi_:
return self.tag_abi_
elif self.py_limited_api:
return 'abi3'
else:
return 'none'
def tag_platform(self):
'''
Find platform tag used in wheel filename.
'''
ret = self.tag_platform_
log0(f'From self.tag_platform_: {ret=}.')
if not ret:
# Prefer this to PEP-425. Appears to be undocumented,
# but set in manylinux docker images and appears
# to be used by cibuildwheel and auditwheel, e.g.
# https://github.com/rapidsai/shared-action-workflows/issues/80
ret = os.environ.get( 'AUDITWHEEL_PLAT')
log0(f'From AUDITWHEEL_PLAT: {ret=}.')
if not ret:
# Notes:
#
# PEP-425. On Linux gives `linux_x86_64` which is rejected by
# pypi.org.
#
# On local MacOS/arm64 mac-mini have seen sysconfig.get_platform()
# unhelpfully return `macosx-10.9-universal2` if `python3` is the
# system Python /usr/bin/python3; this happens if we source `.
# /etc/profile`.
#
ret = sysconfig.get_platform()
ret = ret.replace('-', '_').replace('.', '_').lower()
log0(f'From sysconfig.get_platform(): {ret=}.')
ret = _macos_fixup_platform_tag(ret)
log0( f'tag_platform(): returning {ret=}.')
assert '-' not in ret
return ret
def wheel_name(self):
ret = f'{_normalise2(self.name)}-{self.version}-{self.tag_python()}-{self.tag_abi()}-{self.tag_platform()}.whl'
assert ret.count('-') == 4, f'Expected 4 dash characters in {ret=}.'
return ret
def wheel_name_match(self, wheel):
'''
Returns true if `wheel` matches our wheel. We basically require the
name to be the same, except that we accept platform tags that contain
extra items (see pep-0600/), for example we return true with:
self: foo-cp38-none-manylinux2014_x86_64.whl
wheel: foo-cp38-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl
'''
log2(f'{wheel=}')
assert wheel.endswith('.whl')
wheel2 = wheel[:-len('.whl')]
name, version, tag_python, tag_abi, tag_platform = wheel2.split('-')
py_limited_api_compatible = False
if self.py_limited_api and tag_abi == 'abi3':
# Allow lower tag_python number.
m = re.match('cp([0-9]+)', tag_python)
tag_python_int = int(m.group(1))
m = re.match('cp([0-9]+)', self.tag_python())
tag_python_int_self = int(m.group(1))
if tag_python_int <= tag_python_int_self:
# This wheel uses Python stable ABI same or older than ours, so
# we can use it.
log2(f'py_limited_api; {tag_python=} compatible with {self.tag_python()=}.')
py_limited_api_compatible = True
log2(f'{_normalise2(self.name) == name=}')
log2(f'{self.version == version=}')
log2(f'{self.tag_python() == tag_python=} {self.tag_python()=} {tag_python=}')
log2(f'{py_limited_api_compatible=}')
log2(f'{self.tag_abi() == tag_abi=}')
log2(f'{self.tag_platform() in tag_platform.split(".")=}')
log2(f'{self.tag_platform()=}')
log2(f'{tag_platform.split(".")=}')
ret = (1
and _normalise2(self.name) == name
and self.version == version
and (self.tag_python() == tag_python or py_limited_api_compatible)
and self.tag_abi() == tag_abi
and self.tag_platform() in tag_platform.split('.')
)
log2(f'Returning {ret=}.')
return ret
def _entry_points_text(self):
if self.entry_points:
if isinstance(self.entry_points, str):
return self.entry_points
ret = ''
for key, values in self.entry_points.items():
ret += f'[{key}]\n'
for value in values:
ret += f'{value}\n'
return ret
def _call_fn_build( self, config_settings=None):
assert self.fn_build
assert os.path.isfile('pyproject.toml'), (
'Cannot create package because file does not exist: pyproject.toml'
)
log2(f'calling self.fn_build={self.fn_build}')
if inspect.signature(self.fn_build).parameters:
ret = self.fn_build(config_settings)
else:
ret = self.fn_build()
assert isinstance( ret, (list, tuple)), \
f'Expected list/tuple from {self.fn_build} but got: {ret!r}'
# Check that any extensions that we have built, have same
# py_limited_api value. If package is marked with py_limited_api=True
# then non-py_limited_api extensions seem to fail at runtime on
# Windows.
#
# (We could possibly allow package py_limited_api=False and extensions
# py_limited_api=True, but haven't tested this, and it seems simpler to
# be strict.)
for item in ret:
from_, (to_abs, to_rel) = self._fromto(item)
from_abs = os.path.abspath(from_)
is_py_limited_api = _extensions_to_py_limited_api.get(from_abs)
if is_py_limited_api is not None:
assert bool(self.py_limited_api) == bool(is_py_limited_api), (
f'Extension was built with'
f' py_limited_api={is_py_limited_api} but pipcl.Package'
f' name={self.name!r} has'
f' py_limited_api={self.py_limited_api}:'
f' {from_abs!r}'
)
return ret
def _argv_clean(self, all_):
'''
Called by `handle_argv()`.
'''
if not self.fn_clean:
return
paths = self.fn_clean(all_)
if paths:
if isinstance(paths, str):
paths = paths,
for path in paths:
if not os.path.isabs(path):
path = ps.path.join(self.root, path)
path = os.path.abspath(path)
assert path.startswith(self.root+os.sep), \
f'path={path!r} does not start with root={self.root+os.sep!r}'
log2(f'Removing: {path}')
shutil.rmtree(path, ignore_errors=True)
def install(self, record_path=None, root=None):
'''
Called by `handle_argv()` to handle `install` command..
'''
log2( f'{record_path=} {root=}')
# Do a build and get list of files to install.
#
items = list()
if self.fn_build:
items = self._call_fn_build( dict())
root2 = install_dir(root)
log2( f'{root2=}')
log1( f'Installing into: {root2!r}')
dist_info_dir = self._dist_info_dir()
if not record_path:
record_path = f'{root2}/{dist_info_dir}/RECORD'
record = _Record()
def add_file(from_, to_abs, to_rel):
os.makedirs( os.path.dirname( to_abs), exist_ok=True)
if isinstance(from_, bytes):
log2(f'Copying content into {to_abs}.')
with open(to_abs, 'wb') as f:
f.write(from_)
record.add_content(from_, to_rel)
else:
log0(f'{from_=}')
log2(f'Copying from {os.path.relpath(from_, self.root)} to {to_abs}')
shutil.copy2( from_, to_abs)
record.add_file(from_, to_rel)
def add_str(content, to_abs, to_rel):
log2( f'Writing to: {to_abs}')
os.makedirs( os.path.dirname( to_abs), exist_ok=True)
with open( to_abs, 'w') as f:
f.write( content)
record.add_content(content, to_rel)
for item in items:
from_, (to_abs, to_rel) = self._fromto(item)
log0(f'{from_=} {to_abs=} {to_rel=}')
to_abs2 = f'{root2}/{to_rel}'
add_file( from_, to_abs2, to_rel)
add_str( self._metainfo(), f'{root2}/{dist_info_dir}/METADATA', f'{dist_info_dir}/METADATA')
if self.license:
add_str( self.license, f'{root2}/{dist_info_dir}/COPYING', f'{dist_info_dir}/COPYING')
entry_points_text = self._entry_points_text()
if entry_points_text:
add_str(
entry_points_text,
f'{root2}/{dist_info_dir}/entry_points.txt',
f'{dist_info_dir}/entry_points.txt',
)
log2( f'Writing to: {record_path}')
with open(record_path, 'w') as f:
f.write(record.get())
log2(f'Finished.')
def _argv_dist_info(self, root):
'''
Called by `handle_argv()`. There doesn't seem to be any documentation
for `setup.py dist_info`, but it appears to be like `egg_info` except
it writes to a slightly different directory.
'''
if root is None:
root = f'{normalise2(self.name)}-{self.version}.dist-info'
self._write_info(f'{root}/METADATA')
if self.license:
with open( f'{root}/COPYING', 'w') as f:
f.write( self.license)
def _argv_egg_info(self, egg_base):
'''
Called by `handle_argv()`.
'''
if egg_base is None:
egg_base = '.'
self._write_info(f'{egg_base}/.egg-info')
def _write_info(self, dirpath=None):
'''
Writes egg/dist info to files in directory `dirpath` or `self.root` if
`None`.
'''
if dirpath is None:
dirpath = self.root
log2(f'Creating files in directory {dirpath}')
os.makedirs(dirpath, exist_ok=True)
with open(os.path.join(dirpath, 'PKG-INFO'), 'w') as f:
f.write(self._metainfo())
# These don't seem to be required?
#
#with open(os.path.join(dirpath, 'SOURCES.txt', 'w') as f:
# pass
#with open(os.path.join(dirpath, 'dependency_links.txt', 'w') as f:
# pass
#with open(os.path.join(dirpath, 'top_level.txt', 'w') as f:
# f.write(f'{self.name}\n')
#with open(os.path.join(dirpath, 'METADATA', 'w') as f:
# f.write(self._metainfo())
def handle_argv(self, argv):
'''
Attempt to handles old-style (pre PEP-517) command line passed by
old releases of pip to a `setup.py` script, and manual running of
`setup.py`.
This is partial support at best.
'''
global g_verbose
#log2(f'argv: {argv}')
class ArgsRaise:
pass
class Args:
'''
Iterates over argv items.
'''
def __init__( self, argv):
self.items = iter( argv)
def next( self, eof=ArgsRaise):
'''
Returns next arg. If no more args, we return <eof> or raise an
exception if <eof> is ArgsRaise.
'''
try:
return next( self.items)
except StopIteration:
if eof is ArgsRaise:
raise Exception('Not enough args')
return eof
command = None
opt_all = None
opt_dist_dir = 'dist'
opt_egg_base = None
opt_formats = None
opt_install_headers = None
opt_record = None
opt_root = None
args = Args(argv[1:])
while 1:
arg = args.next(None)
if arg is None:
break
elif arg in ('-h', '--help', '--help-commands'):
log0(textwrap.dedent('''
Usage:
[<options>...] <command> [<options>...]
Commands:
bdist_wheel
Creates a wheel called
<dist-dir>/<name>-<version>-<details>.whl, where
<dist-dir> is "dist" or as specified by --dist-dir,
and <details> encodes ABI and platform etc.
clean
Cleans build files.
dist_info
Creates files in <name>-<version>.dist-info/ or
directory specified by --egg-base.
egg_info
Creates files in .egg-info/ or directory
directory specified by --egg-base.
install
Builds and installs. Writes installation
information to <record> if --record was
specified.
sdist
Make a source distribution:
<dist-dir>/<name>-<version>.tar.gz
Options:
--all
Used by "clean".
--compile
Ignored.
--dist-dir | -d <dist-dir>
Default is "dist".
--egg-base <egg-base>
Used by "egg_info".
--formats <formats>
Used by "sdist".
--install-headers <directory>
Ignored.
--python-tag <python-tag>
Ignored.
--record <record>
Used by "install".
--root <path>
Used by "install".
--single-version-externally-managed
Ignored.
--verbose -v
Extra diagnostics.
Other:
windows-vs [-y <year>] [-v <version>] [-g <grade] [--verbose]
Windows only; looks for matching Visual Studio.
windows-python [-v <version>] [--verbose]
Windows only; looks for matching Python.
'''))
return
elif arg in ('bdist_wheel', 'clean', 'dist_info', 'egg_info', 'install', 'sdist'):
assert command is None, 'Two commands specified: {command} and {arg}.'
command = arg
elif arg in ('windows-vs', 'windows-python', 'show-sysconfig'):
assert command is None, 'Two commands specified: {command} and {arg}.'
command = arg
elif arg == '--all': opt_all = True
elif arg == '--compile': pass
elif arg == '--dist-dir' or arg == '-d': opt_dist_dir = args.next()
elif arg == '--egg-base': opt_egg_base = args.next()
elif arg == '--formats': opt_formats = args.next()
elif arg == '--install-headers': opt_install_headers = args.next()
elif arg == '--python-tag': pass
elif arg == '--record': opt_record = args.next()
elif arg == '--root': opt_root = args.next()
elif arg == '--single-version-externally-managed': pass
elif arg == '--verbose' or arg == '-v': g_verbose += 1
else:
raise Exception(f'Unrecognised arg: {arg}')
assert command, 'No command specified'
log1(f'Handling command={command}')
if 0: pass
elif command == 'bdist_wheel': self.build_wheel(opt_dist_dir)
elif command == 'clean': self._argv_clean(opt_all)
elif command == 'dist_info': self._argv_dist_info(opt_egg_base)
elif command == 'egg_info': self._argv_egg_info(opt_egg_base)
elif command == 'install': self.install(opt_record, opt_root)
elif command == 'sdist': self.build_sdist(opt_dist_dir, opt_formats)
elif command == 'windows-python':
version = None
while 1:
arg = args.next(None)
if arg is None:
break
elif arg == '-v':
version = args.next()
elif arg == '--verbose':
g_verbose += 1
else:
assert 0, f'Unrecognised {arg=}'
python = wdev.WindowsPython(version=version)
print(f'Python is:\n{python.description_ml(" ")}')
elif command == 'windows-vs':
grade = None
version = None
year = None
while 1:
arg = args.next(None)
if arg is None:
break
elif arg == '-g':
grade = args.next()
elif arg == '-v':
version = args.next()
elif arg == '-y':
year = args.next()
elif arg == '--verbose':
g_verbose += 1
else:
assert 0, f'Unrecognised {arg=}'
vs = wdev.WindowsVS(year=year, grade=grade, version=version)
print(f'Visual Studio is:\n{vs.description_ml(" ")}')
elif command == 'show-sysconfig':
show_sysconfig()
for mod in platform, sys:
log0(f'{mod.__name__}:')
for n in dir(mod):
if n.startswith('_'):
continue
log0(f'{mod.__name__}.{n}')
if mod is platform and n == 'uname':
continue
if mod is platform and n == 'pdb':
continue
if mod is sys and n in ('breakpointhook', 'exit'):
# We don't want to call these.
continue
v = getattr(mod, n)
if callable(v):
try:
v = v()
except Exception:
pass
else:
#print(f'{n=}', flush=1)
try:
print(f' {mod.__name__}.{n}()={v!r}')
except Exception:
print(f' Failed to print value of {mod.__name__}.{n}().')
else:
try:
print(f' {mod.__name__}.{n}={v!r}')
except Exception:
print(f' Failed to print value of {mod.__name__}.{n}.')
else:
assert 0, f'Unrecognised command: {command}'
log2(f'Finished handling command: {command}')
def __str__(self):
return ('{'
f'name={self.name!r}'
f' version={self.version!r}'
f' platform={self.platform!r}'
f' supported_platform={self.supported_platform!r}'
f' summary={self.summary!r}'
f' description={self.description!r}'
f' description_content_type={self.description_content_type!r}'
f' keywords={self.keywords!r}'
f' home_page={self.home_page!r}'
f' download_url={self.download_url!r}'
f' author={self.author!r}'
f' author_email={self.author_email!r}'
f' maintainer={self.maintainer!r}'
f' maintainer_email={self.maintainer_email!r}'
f' license={self.license!r}'
f' classifier={self.classifier!r}'
f' requires_dist={self.requires_dist!r}'
f' requires_python={self.requires_python!r}'
f' requires_external={self.requires_external!r}'
f' project_url={self.project_url!r}'
f' provides_extra={self.provides_extra!r}'
f' root={self.root!r}'
f' fn_build={self.fn_build!r}'
f' fn_sdist={self.fn_sdist!r}'
f' fn_clean={self.fn_clean!r}'
f' tag_python={self.tag_python_!r}'
f' tag_abi={self.tag_abi_!r}'
f' tag_platform={self.tag_platform_!r}'
'}'
)
def _dist_info_dir( self):
return f'{_normalise2(self.name)}-{self.version}.dist-info'
def _metainfo(self):
'''
Returns text for `.egg-info/PKG-INFO` file, or `PKG-INFO` in an sdist
`.tar.gz` file, or `...dist-info/METADATA` in a wheel.
'''
# 2021-04-30: Have been unable to get multiline content working on
# test.pypi.org so we currently put the description as the body after
# all the other headers.
#
ret = ['']
def add(key, value):
if value is None:
return
if isinstance( value, (tuple, list)):
for v in value:
if v is not None:
add( key, v)
return
if key == 'License' and '\n' in value:
# This is ok because we write `self.license` into
# *.dist-info/COPYING.
#
log1( f'Omitting license because contains newline(s).')
return
assert '\n' not in value, f'key={key} value contains newline: {value!r}'
if key == 'Project-URL':
assert value.count(',') == 1, f'For {key=}, should have one comma in {value!r}.'
ret[0] += f'{key}: {value}\n'
#add('Description', self.description)
add('Metadata-Version', '2.1')
# These names are from:
# https://packaging.python.org/specifications/core-metadata/
#
for name in (
'Name',
'Version',
'Platform',
'Supported-Platform',
'Summary',
'Description-Content-Type',
'Keywords',
'Home-page',
'Download-URL',
'Author',
'Author-email',
'Maintainer',
'Maintainer-email',
'License',
'Classifier',
'Requires-Dist',
'Requires-Python',
'Requires-External',
'Project-URL',
'Provides-Extra',
):
identifier = name.lower().replace( '-', '_')
add( name, getattr( self, identifier))
ret = ret[0]
# Append description as the body
if self.description:
if '\n' in self.description:
description_text = self.description.strip()
else:
with open(self.description) as f:
description_text = f.read()
ret += '\n' # Empty line separates headers from body.
ret += description_text
ret += '\n'
return ret
def _path_relative_to_root(self, path, assert_within_root=True):
'''
Returns `(path_abs, path_rel)`, where `path_abs` is absolute path and
`path_rel` is relative to `self.root`.
Interprets `path` as relative to `self.root` if not absolute.
We use `os.path.realpath()` to resolve any links.
if `assert_within_root` is true, assert-fails if `path` is not within
`self.root`.
'''
if os.path.isabs(path):
p = path
else:
p = os.path.join(self.root, path)
p = os.path.realpath(os.path.abspath(p))
if assert_within_root:
assert p.startswith(self.root+os.sep) or p == self.root, \
f'Path not within root={self.root+os.sep!r}: {path=} {p=}'
p_rel = os.path.relpath(p, self.root)
return p, p_rel
def _fromto(self, p):
'''
Returns `(from_, (to_abs, to_rel))`.
If `p` is a string we convert to `(p, p)`. Otherwise we assert that
`p` is a tuple `(from_, to_)` where `from_` is str/bytes and `to_` is
str. If `from_` is a bytes it is contents of file to add, otherwise the
path of an existing file; non-absolute paths are assumed to be relative
to `self.root`.
If `to_` is empty or `/` we set it to the leaf of `from_` (which must
be a str) - i.e. we place the file in the root directory of the wheel;
otherwise if `to_` ends with `/` we append the leaf of `from_` (which
must be a str).
If `to_` starts with `$dist-info/`, we replace this with
`self._dist_info_dir()`.
If `to_` starts with `$data/`, we replace this with
`{self.name}-{self.version}.data/`.
We assert that `to_abs` is `within self.root`.
`to_rel` is derived from the `to_abs` and is relative to self.root`.
'''
ret = None
if isinstance(p, str):
p = p, p
assert isinstance(p, tuple) and len(p) == 2
from_, to_ = p
assert isinstance(from_, (str, bytes))
assert isinstance(to_, str)
if to_ == '/' or to_ == '':
to_ = os.path.basename(from_)
elif to_.endswith('/'):
to_ += os.path.basename(from_)
prefix = '$dist-info/'
if to_.startswith( prefix):
to_ = f'{self._dist_info_dir()}/{to_[ len(prefix):]}'
prefix = '$data/'
if to_.startswith( prefix):
to_ = f'{_normalise2(self.name)}-{self.version}.data/{to_[ len(prefix):]}'
if isinstance(from_, str):
from_, _ = self._path_relative_to_root( from_, assert_within_root=False)
to_ = self._path_relative_to_root(to_)
assert isinstance(from_, (str, bytes))
log2(f'returning {from_=} {to_=}')
return from_, to_
_extensions_to_py_limited_api = dict()
def build_extension(
name,
path_i,
outdir,
*,
builddir=None,
includes=None,
defines=None,
libpaths=None,
libs=None,
optimise=True,
debug=False,
compiler_extra='',
linker_extra='',
swig=None,
cpp=True,
source_extra=None,
prerequisites_swig=None,
prerequisites_compile=None,
prerequisites_link=None,
infer_swig_includes=True,
py_limited_api=False,
):
'''
Builds a Python extension module using SWIG. Works on Windows, Linux, MacOS
and OpenBSD.
On Unix, sets rpath when linking shared libraries.
Args:
name:
Name of generated extension module.
path_i:
Path of input SWIG `.i` file. Internally we use swig to generate a
corresponding `.c` or `.cpp` file.
outdir:
Output directory for generated files:
* `{outdir}/{name}.py`
* `{outdir}/_{name}.so` # Unix
* `{outdir}/_{name}.*.pyd` # Windows
We return the leafname of the `.so` or `.pyd` file.
builddir:
Where to put intermediate files, for example the .cpp file
generated by swig and `.d` dependency files. Default is `outdir`.
includes:
A string, or a sequence of extra include directories to be prefixed
with `-I`.
defines:
A string, or a sequence of extra preprocessor defines to be
prefixed with `-D`.
libpaths
A string, or a sequence of library paths to be prefixed with
`/LIBPATH:` on Windows or `-L` on Unix.
libs
A string, or a sequence of library names. Each item is prefixed
with `-l` on non-Windows.
optimise:
Whether to use compiler optimisations and define NDEBUG.
debug:
Whether to build with debug symbols.
compiler_extra:
Extra compiler flags. Can be None.
linker_extra:
Extra linker flags. Can be None.
swig:
Swig command; if false we use 'swig'.
cpp:
If true we tell SWIG to generate C++ code instead of C.
source_extra:
Extra source files to build into the shared library,
prerequisites_swig:
prerequisites_compile:
prerequisites_link:
[These are mainly for use on Windows. On other systems we
automatically generate dynamic dependencies using swig/compile/link
commands' `-MD` and `-MF` args.]
Sequences of extra input files/directories that should force
running of swig, compile or link commands if they are newer than
any existing generated SWIG `.i` file, compiled object file or
shared library file.
If present, the first occurrence of `True` or `False` forces re-run
or no re-run. Any occurrence of None is ignored. If an item is a
directory path we look for newest file within the directory tree.
If not a sequence, we convert into a single-item list.
prerequisites_swig
We use swig's -MD and -MF args to generate dynamic dependencies
automatically, so this is not usually required.
prerequisites_compile
prerequisites_link
On non-Windows we use cc's -MF and -MF args to generate dynamic
dependencies so this is not usually required.
infer_swig_includes:
If true, we extract `-I<path>` and `-I <path>` args from
`compile_extra` (also `/I` on windows) and use them with swig so
that it can see the same header files as C/C++. This is useful
when using enviromment variables such as `CC` and `CXX` to set
`compile_extra`.
py_limited_api:
If true we build for current Python's limited API / stable ABI.
Note that we will assert false if this extension is added to a
pipcl.Package that has a different <py_limited_api>, because
on Windows importing a non-py_limited_api extension inside a
py_limited=True package fails.
Returns the leafname of the generated library file within `outdir`, e.g.
`_{name}.so` on Unix or `_{name}.cp311-win_amd64.pyd` on Windows.
'''
if compiler_extra is None:
compiler_extra = ''
if linker_extra is None:
linker_extra = ''
if builddir is None:
builddir = outdir
if not swig:
swig = 'swig'
if source_extra is None:
source_extra = list()
if isinstance(source_extra, str):
source_extra = [source_extra]
includes_text = _flags( includes, '-I')
defines_text = _flags( defines, '-D')
libpaths_text = _flags( libpaths, '/LIBPATH:', '"') if windows() else _flags( libpaths, '-L')
libs_text = _flags( libs, '' if windows() else '-l')
path_cpp = f'{builddir}/{os.path.basename(path_i)}'
path_cpp += '.cpp' if cpp else '.c'
os.makedirs( outdir, exist_ok=True)
# Run SWIG.
#
if infer_swig_includes:
# Extract include flags from `compiler_extra`.
swig_includes_extra = ''
compiler_extra_items = shlex.split(compiler_extra)
i = 0
while i < len(compiler_extra_items):
item = compiler_extra_items[i]
# Swig doesn't seem to like a space after `I`.
if item == '-I' or (windows() and item == '/I'):
swig_includes_extra += f' -I{compiler_extra_items[i+1]}'
i += 1
elif item.startswith('-I') or (windows() and item.startswith('/I')):
swig_includes_extra += f' -I{compiler_extra_items[i][2:]}'
i += 1
swig_includes_extra = swig_includes_extra.strip()
deps_path = f'{path_cpp}.d'
prerequisites_swig2 = _get_prerequisites( deps_path)
run_if(
f'''
{swig}
-Wall
{"-c++" if cpp else ""}
-python
-module {name}
-outdir {outdir}
-o {path_cpp}
-MD -MF {deps_path}
{includes_text}
{swig_includes_extra}
{path_i}
'''
,
path_cpp,
path_i,
prerequisites_swig,
prerequisites_swig2,
)
if pyodide():
so_suffix = '.so'
log0(f'pyodide: PEP-3149 suffix untested, so omitting. {_so_suffix()=}.')
else:
so_suffix = _so_suffix(use_so_versioning = not py_limited_api)
path_so_leaf = f'_{name}{so_suffix}'
path_so = f'{outdir}/{path_so_leaf}'
py_limited_api2 = current_py_limited_api() if py_limited_api else None
compiler_command, pythonflags = base_compiler(cpp=cpp)
linker_command, _ = base_linker(cpp=cpp)
# setuptools on Linux seems to use slightly different compile flags:
#
# -fwrapv -O3 -Wall -O2 -g0 -DPY_CALL_TRAMPOLINE
#
general_flags = ''
if windows():
permissive = '/permissive-'
EHsc = '/EHsc'
T = '/Tp' if cpp else '/Tc'
optimise2 = '/DNDEBUG /O2' if optimise else '/D_DEBUG'
debug2 = '/Zi' if debug else ''
py_limited_api3 = f'/DPy_LIMITED_API={py_limited_api2}' if py_limited_api2 else ''
else:
if debug:
general_flags += '/Zi' if windows() else ' -g'
if optimise:
general_flags += ' /DNDEBUG /O2' if windows() else ' -O2 -DNDEBUG'
py_limited_api3 = f'-DPy_LIMITED_API={py_limited_api2}' if py_limited_api2 else ''
if windows():
pass
elif darwin():
# MacOS's linker does not like `-z origin`.
rpath_flag = "-Wl,-rpath,@loader_path/"
# Avoid `Undefined symbols for ... "_PyArg_UnpackTuple" ...'.
general_flags += ' -undefined dynamic_lookup'
elif pyodide():
# Setting `-Wl,-rpath,'$ORIGIN',-z,origin` gives:
# emcc: warning: ignoring unsupported linker flag: `-rpath` [-Wlinkflags]
# wasm-ld: error: unknown -z value: origin
#
rpath_flag = "-Wl,-rpath,'$ORIGIN'"
else:
rpath_flag = "-Wl,-rpath,'$ORIGIN',-z,origin"
# Fun fact - on Linux, if the -L and -l options are before '{path_cpp}'
# they seem to be ignored...
#
path_os = list()
for path_source in [path_cpp] + source_extra:
path_o = f'{path_source}.obj' if windows() else f'{path_source}.o'
path_os.append(f' {path_o}')
prerequisites_path = f'{path_o}.d'
if windows():
compiler_command2 = f'''
{compiler_command}
# General:
/c # Compiles without linking.
{EHsc} # Enable "Standard C++ exception handling".
#/MD # Creates a multithreaded DLL using MSVCRT.lib.
{'/MDd' if debug else '/MD'}
# Input/output files:
{T}{path_source} # /Tp specifies C++ source file.
/Fo{path_o} # Output file. codespell:ignore
# Include paths:
{includes_text}
{pythonflags.includes} # Include path for Python headers.
# Code generation:
{optimise2}
{debug2}
{permissive} # Set standard-conformance mode.
# Diagnostics:
#/FC # Display full path of source code files passed to cl.exe in diagnostic text.
/W3 # Sets which warning level to output. /W3 is IDE default.
/diagnostics:caret # Controls the format of diagnostic messages.
/nologo #
{defines_text}
{compiler_extra}
{py_limited_api3}
'''
else:
compiler_command2 = f'''
{compiler_command}
-fPIC
{general_flags.strip()}
{pythonflags.includes}
{includes_text}
{defines_text}
-MD -MF {prerequisites_path}
-c {path_source}
-o {path_o}
{compiler_extra}
{py_limited_api3}
'''
run_if(
compiler_command2,
path_o,
path_source,
[path_source] + _get_prerequisites(prerequisites_path),
)
# Link
prerequisites_path = f'{path_so}.d'
if windows():
debug2 = '/DEBUG' if debug else ''
base, _ = os.path.splitext(path_so_leaf)
command2 = f'''
{linker_command}
/DLL # Builds a DLL.
/EXPORT:PyInit__{name} # Exports a function.
/IMPLIB:{base}.lib # Overrides the default import library name.
{libpaths_text}
{pythonflags.ldflags}
/OUT:{path_so} # Specifies the output file name.
{debug2}
/nologo
{libs_text}
{' '.join(path_os)}
{linker_extra}
'''
elif pyodide():
command2 = f'''
{linker_command}
-MD -MF {prerequisites_path}
-o {path_so}
{' '.join(path_os)}
{libpaths_text}
{libs_text}
{linker_extra}
{pythonflags.ldflags}
{rpath_flag}
'''
else:
command2 = f'''
{linker_command}
-shared
{general_flags.strip()}
-MD -MF {prerequisites_path}
-o {path_so}
{' '.join(path_os)}
{libpaths_text}
{libs_text}
{linker_extra}
{pythonflags.ldflags}
{rpath_flag}
{py_limited_api3}
'''
link_was_run = run_if(
command2,
path_so,
path_cpp,
*path_os,
*_get_prerequisites(f'{path_so}.d'),
)
if link_was_run and darwin():
# We need to patch up references to shared libraries in `libs`.
sublibraries = list()
for lib in () if libs is None else libs:
for libpath in libpaths:
found = list()
for suffix in '.so', '.dylib':
path = f'{libpath}/lib{os.path.basename(lib)}{suffix}'
if os.path.exists( path):
found.append( path)
if found:
assert len(found) == 1, f'More than one file matches lib={lib!r}: {found}'
sublibraries.append( found[0])
break
else:
log2(f'Warning: can not find path of lib={lib!r} in libpaths={libpaths}')
macos_patch( path_so, *sublibraries)
#run(f'ls -l {path_so}', check=0)
#run(f'file {path_so}', check=0)
_extensions_to_py_limited_api[os.path.abspath(path_so)] = py_limited_api
return path_so_leaf
# Functions that might be useful.
#
def base_compiler(vs=None, pythonflags=None, cpp=False, use_env=True):
'''
Returns basic compiler command and PythonFlags.
Args:
vs:
Windows only. A `wdev.WindowsVS` instance or None to use default
`wdev.WindowsVS` instance.
pythonflags:
A `pipcl.PythonFlags` instance or None to use default
`pipcl.PythonFlags` instance.
cpp:
If true we return C++ compiler command instead of C. On Windows
this has no effect - we always return `cl.exe`.
use_env:
If true we return '$CC' or '$CXX' if the corresponding
environmental variable is set (without evaluating with `getenv()`
or `os.environ`).
Returns `(cc, pythonflags)`:
cc:
C or C++ command. On Windows this is of the form
`{vs.vcvars}&&{vs.cl}`; otherwise it is typically `cc` or `c++`.
pythonflags:
The `pythonflags` arg or a new `pipcl.PythonFlags` instance.
'''
if not pythonflags:
pythonflags = PythonFlags()
cc = None
if use_env:
if cpp:
if os.environ.get( 'CXX'):
cc = '$CXX'
else:
if os.environ.get( 'CC'):
cc = '$CC'
if cc:
pass
elif windows():
if not vs:
vs = wdev.WindowsVS()
cc = f'"{vs.vcvars}"&&"{vs.cl}"'
elif wasm():
cc = 'em++' if cpp else 'emcc'
else:
cc = 'c++' if cpp else 'cc'
cc = macos_add_cross_flags( cc)
return cc, pythonflags
def base_linker(vs=None, pythonflags=None, cpp=False, use_env=True):
'''
Returns basic linker command.
Args:
vs:
Windows only. A `wdev.WindowsVS` instance or None to use default
`wdev.WindowsVS` instance.
pythonflags:
A `pipcl.PythonFlags` instance or None to use default
`pipcl.PythonFlags` instance.
cpp:
If true we return C++ linker command instead of C. On Windows this
has no effect - we always return `link.exe`.
use_env:
If true we use `os.environ['LD']` if set.
Returns `(linker, pythonflags)`:
linker:
Linker command. On Windows this is of the form
`{vs.vcvars}&&{vs.link}`; otherwise it is typically `cc` or `c++`.
pythonflags:
The `pythonflags` arg or a new `pipcl.PythonFlags` instance.
'''
if not pythonflags:
pythonflags = PythonFlags()
linker = None
if use_env:
if os.environ.get( 'LD'):
linker = '$LD'
if linker:
pass
elif windows():
if not vs:
vs = wdev.WindowsVS()
linker = f'"{vs.vcvars}"&&"{vs.link}"'
elif wasm():
linker = 'em++' if cpp else 'emcc'
else:
linker = 'c++' if cpp else 'cc'
linker = macos_add_cross_flags( linker)
return linker, pythonflags
def git_info( directory):
'''
Returns `(sha, comment, diff, branch)`, all items are str or None if not
available.
directory:
Root of git checkout.
'''
sha, comment, diff, branch = None, None, None, None
e, out = run(
f'cd {directory} && (PAGER= git show --pretty=oneline|head -n 1 && git diff)',
capture=1,
check=0
)
if not e:
sha, _ = out.split(' ', 1)
comment, diff = _.split('\n', 1)
e, out = run(
f'cd {directory} && git rev-parse --abbrev-ref HEAD',
capture=1,
check=0
)
if not e:
branch = out.strip()
log1(f'git_info(): directory={directory!r} returning branch={branch!r} sha={sha!r} comment={comment!r}')
return sha, comment, diff, branch
def git_items( directory, submodules=False):
'''
Returns list of paths for all files known to git within a `directory`.
Args:
directory:
Must be somewhere within a git checkout.
submodules:
If true we also include git submodules.
Returns:
A list of paths for all files known to git within `directory`. Each
path is relative to `directory`. `directory` must be somewhere within a
git checkout.
We run a `git ls-files` command internally.
This function can be useful for the `fn_sdist()` callback.
'''
command = 'cd ' + directory + ' && git ls-files'
if submodules:
command += ' --recurse-submodules'
log1(f'Running {command=}')
text = subprocess.check_output( command, shell=True)
ret = []
for path in text.decode('utf8').strip().split( '\n'):
path2 = os.path.join(directory, path)
# Sometimes git ls-files seems to list empty/non-existent directories
# within submodules.
#
if not os.path.exists(path2):
log2(f'Ignoring git ls-files item that does not exist: {path2}')
elif os.path.isdir(path2):
log2(f'Ignoring git ls-files item that is actually a directory: {path2}')
else:
ret.append(path)
return ret
def git_get(
local,
*,
remote=None,
branch=None,
tag=None,
text=None,
depth=1,
env_extra=None,
update=True,
submodules=True,
):
'''
Creates/updates local checkout <local> of remote repository and returns
absolute path of <local>.
If <text> is set but does not start with 'git:', it is assumed to be an up
to date local checkout, and we return absolute path of <text> without doing
any git operations.
Args:
local:
Local directory. Created and/or updated using `git clone` and `git
fetch` etc.
remote:
Remote git repostitory, for example
'https://github.com/ArtifexSoftware/mupdf.git'. Can be overridden
by <text>.
branch:
Branch to use; can be overridden by <text>.
tag:
Tag to use; can be overridden by <text>.
text:
If None or empty:
Ignored.
If starts with 'git:':
The remaining text should be a command-line
style string containing some or all of these args:
--branch <branch>
--tag <tag>
<remote>
These overrides <branch>, <tag> and <remote>.
Otherwise:
<text> is assumed to be a local directory, and we simply return
it as an absolute path without doing any git operations.
For example these all clone/update/branch master of https://foo.bar/qwerty.git to local
checkout 'foo-local':
git_get('foo-local', remote='https://foo.bar/qwerty.git', branch='master')
git_get('foo-local', text='git:--branch master https://foo.bar/qwerty.git')
git_get('foo-local', text='git:--branch master', remote='https://foo.bar/qwerty.git')
git_get('foo-local', text='git:', branch='master', remote='https://foo.bar/qwerty.git')
depth:
Depth of local checkout when cloning and fetching, or None.
env_extra:
Dict of extra name=value environment variables to use whenever we
run git.
update:
If false we do not update existing repository. Might be useful if
testing without network access.
submodules:
If true, we clone with `--recursive --shallow-submodules` and run
`git submodule update --init --recursive` before returning.
'''
log0(f'{remote=} {local=} {branch=} {tag=} {text=}')
if text:
if text.startswith('git:'):
args = iter(shlex.split(text[len('git:'):]))
while 1:
try:
arg = next(args)
except StopIteration:
break
if arg == '--branch':
branch = next(args)
tag = None
elif arg == '--tag':
tag = next(args)
branch = None
else:
remote = arg
assert remote, f'<remote> unset and no remote specified in {text=}.'
assert branch or tag, f'<branch> and <tag> unset and no branch/tag specified in {text=}.'
else:
log0(f'Using local directory {text!r}.')
return os.path.abspath(text)
assert (branch and not tag) or (not branch and tag), f'Must specify exactly one of <branch> and <tag>; {branch=} {tag=}.'
depth_arg = f' --depth {depth}' if depth else ''
def do_update():
# This seems to pull in the entire repository.
log0(f'do_update(): attempting to update {local=}.')
# Remove any local changes.
run(f'cd {local} && git reset --hard', env_extra=env_extra)
if tag:
# `-u` avoids `fatal: Refusing to fetch into current branch`.
# Using '+' and `revs/tags/` prefix seems to avoid errors like:
# error: cannot update ref 'refs/heads/v3.16.44':
# trying to write non-commit object
# 06c4ae5fe39a03b37a25a8b95214d9f8f8a867b8 to branch
# 'refs/heads/v3.16.44'
#
run(f'cd {local} && git fetch -fuv{depth_arg} {remote} +refs/tags/{tag}:refs/tags/{tag}', env_extra=env_extra)
run(f'cd {local} && git checkout {tag}', env_extra=env_extra)
if branch:
# `-u` avoids `fatal: Refusing to fetch into current branch`.
run(f'cd {local} && git fetch -fuv{depth_arg} {remote} {branch}:{branch}', env_extra=env_extra)
run(f'cd {local} && git checkout {branch}', env_extra=env_extra)
do_clone = True
if os.path.isdir(f'{local}/.git'):
if update:
# Try to update existing checkout.
try:
do_update()
do_clone = False
except Exception as e:
log0(f'Failed to update existing checkout {local}: {e}')
else:
do_clone = False
if do_clone:
# No existing git checkout, so do a fresh clone.
#_fs_remove(local)
log0(f'Cloning to: {local}')
command = f'git clone --config core.longpaths=true{depth_arg}'
if submodules:
command += f' --recursive --shallow-submodules'
if branch:
command += f' -b {branch}'
if tag:
command += f' -b {tag}'
command += f' {remote} {local}'
run(command, env_extra=env_extra)
do_update()
if submodules:
run(f'cd {local} && git submodule update --init --recursive', env_extra=env_extra)
# Show sha of checkout.
run( f'cd {local} && git show --pretty=oneline|head -n 1', check=False)
return os.path.abspath(local)
def run(
command,
*,
capture=False,
check=1,
verbose=1,
env=None,
env_extra=None,
timeout=None,
caller=1,
prefix=None,
encoding=None, # System default.
errors='backslashreplace',
):
'''
Runs a command using `subprocess.run()`.
Args:
command:
A string, the command to run.
Multiple lines in `command` are treated as a single command.
* If a line starts with `#` it is discarded.
* If a line contains ` #`, the trailing text is discarded.
When running the command on Windows, newlines are replaced by
spaces; otherwise each line is terminated by a backslash character.
capture:
If true, we include the command's output in our return value.
check:
If true we raise an exception on error; otherwise we include the
command's returncode in our return value.
verbose:
If true we show the command.
env:
None or dict to use instead of <os.environ>.
env_extra:
None or dict to add to <os.environ> or <env>.
timeout:
If not None, timeout in seconds; passed directly to
subprocess.run(). Note that on MacOS subprocess.run() seems to
leave processes running if timeout expires.
prefix:
String prefix for each line of output.
If true:
* We run command with stdout=subprocess.PIPE and
stderr=subprocess.STDOUT, repetaedly reading the command's output
and writing it to stdout with <prefix>.
* We do not support <timeout>, which must be None.
Returns:
check capture Return
--------------------------
false false returncode
false true (returncode, output)
true false None or raise exception
true true output or raise exception
'''
if env is None:
env = os.environ
if env_extra:
env = env.copy()
if env_extra:
env.update(env_extra)
lines = _command_lines( command)
if verbose:
text = f'Running:'
nl = '\n '
text += f' {nl.join(lines)}'
if env_extra:
text += f'\nwith:\n'
for k in sorted(env_extra.keys()):
text += f' {k}={shlex.quote(env_extra[k])}\n'
log1(text, caller=caller+1)
sep = ' ' if windows() else ' \\\n'
command2 = sep.join( lines)
if prefix:
assert not timeout, f'Timeout not supported with prefix.'
child = subprocess.Popen(
command2,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding=encoding,
errors=errors,
env=env,
)
if capture:
capture_text = ''
decoder = codecs.getincrementaldecoder(child.stdout.encoding)(errors)
line_start = True
while 1:
raw = os.read( child.stdout.fileno(), 10000)
text = decoder.decode(raw, final=not raw)
if capture:
capture_text += text
lines = text.split('\n')
for i, line in enumerate(lines):
if line_start:
sys.stdout.write(prefix)
line_start = False
sys.stdout.write(line)
if i < len(lines) - 1:
sys.stdout.write('\n')
line_start = True
sys.stdout.flush()
if not raw:
break
if not line_start:
sys.stdout.write('\n')
e = child.wait()
if check and e:
raise subprocess.CalledProcessError(e, command2, capture_text if capture else None)
if check:
return capture_text if capture else None
else:
return (e, capture_text) if capture else e
else:
cp = subprocess.run(
command2,
shell=True,
stdout=subprocess.PIPE if capture else None,
stderr=subprocess.STDOUT if capture else None,
check=check,
encoding=encoding,
errors=errors,
env=env,
timeout=timeout,
)
if check:
return cp.stdout if capture else None
else:
return (cp.returncode, cp.stdout) if capture else cp.returncode
def darwin():
return sys.platform.startswith( 'darwin')
def windows():
return platform.system() == 'Windows'
def wasm():
return os.environ.get( 'OS') in ('wasm', 'wasm-mt')
def pyodide():
return os.environ.get( 'PYODIDE') == '1'
def linux():
return platform.system() == 'Linux'
def openbsd():
return platform.system() == 'OpenBSD'
def show_system():
'''
Show useful information about the system plus argv and environ.
Omits os.environ if $PIPCL_SHOW_ENV is '0'.
'''
def log(text):
log0(text, caller=3)
#log(f'{__file__=}')
#log(f'{__name__=}')
log(f'{os.getcwd()=}')
log(f'{platform.machine()=}')
log(f'{platform.platform()=}')
log(f'{platform.python_implementation()=}')
log(f'{platform.python_version()=}')
log(f'{platform.system()=}')
if sys.implementation.name != 'graalpy':
log(f'{platform.uname()=}')
log(f'{sys.executable=}')
log(f'{sys.version=}')
log(f'{sys.version_info=}')
log(f'{list(sys.version_info)=}')
log(f'{sysconfig.get_config_var("Py_GIL_DISABLED")=}')
try:
log(f'{sys._is_gil_enabled()=}')
except AttributeError:
log(f'sys._is_gil_enabled() => AttributeError')
log(f'CPU bits: {cpu_bits()}')
log(f'sys.argv ({len(sys.argv)}):')
for i, arg in enumerate(sys.argv):
log(f' {i}: {arg!r}')
PIPCL_SHOW_ENV = os.environ.get('PIPCL_SHOW_ENV')
if PIPCL_SHOW_ENV == '0':
log(f'[Not showing os.environ because {PIPCL_SHOW_ENV=}.]')
else:
log(f'os.environ ({len(os.environ)}):')
for k in sorted( os.environ.keys()):
v = os.environ[ k]
if 'BEGIN OPENSSH PRIVATE KEY' in v:
# Don't show private keys.
log(f' {k} ****')
else:
log( f' {k}: {v!r}')
| Package |
python | getsentry__sentry | src/sentry/release_health/base.py | {
"start": 3309,
"end": 3427
} | class ____(TypedDict):
sessions_lower_bound: FormattedIsoTime
sessions_upper_bound: FormattedIsoTime
| _TimeBounds |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/callbacks/test_stdout.py | {
"start": 254,
"end": 1336
} | class ____(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
@override
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
return {"bar": "bar"}
def test_stdoutcallback(capsys: pytest.CaptureFixture) -> Any:
"""Test the stdout callback handler."""
chain_test = FakeChain(callbacks=[StdOutCallbackHandler(color="red")])
chain_test.invoke({"foo": "bar"})
# Capture the output
captured = capsys.readouterr()
# Assert the output is as expected
assert captured.out == (
"\n\n\x1b[1m> Entering new FakeChain "
"chain...\x1b[0m\n\n\x1b[1m> Finished chain.\x1b[0m\n"
)
| FakeChain |
python | ray-project__ray | python/ray/data/_internal/execution/operators/zip_operator.py | {
"start": 784,
"end": 12753
} | class ____(InternalQueueOperatorMixin, NAryOperator):
"""An operator that zips its inputs together.
NOTE: the implementation is bulk for now, which materializes all its inputs in
object store, before starting execution. Should re-implement it as a streaming
operator in the future.
"""
def __init__(
self,
data_context: DataContext,
*input_ops: PhysicalOperator,
):
"""Create a ZipOperator.
Args:
input_ops: Operators generating input data for this operator to zip.
"""
assert len(input_ops) >= 2
self._input_buffers: List[collections.deque[RefBundle]] = [
collections.deque() for _ in range(len(input_ops))
]
self._output_buffer: collections.deque[RefBundle] = collections.deque()
self._stats: StatsDict = {}
super().__init__(
data_context,
*input_ops,
)
def num_outputs_total(self) -> Optional[int]:
num_outputs = None
for input_op in self.input_dependencies:
input_num_outputs = input_op.num_outputs_total()
if input_num_outputs is None:
continue
if num_outputs is None:
num_outputs = input_num_outputs
else:
num_outputs = max(num_outputs, input_num_outputs)
return num_outputs
def num_output_rows_total(self) -> Optional[int]:
num_rows = None
for input_op in self.input_dependencies:
input_num_rows = input_op.num_output_rows_total()
if input_num_rows is None:
continue
if num_rows is None:
num_rows = input_num_rows
else:
num_rows = max(num_rows, input_num_rows)
return num_rows
def internal_input_queue_num_blocks(self) -> int:
return sum(
len(bundle.block_refs) for buf in self._input_buffers for bundle in buf
)
def internal_input_queue_num_bytes(self) -> int:
return sum(bundle.size_bytes() for buf in self._input_buffers for bundle in buf)
def internal_output_queue_num_blocks(self) -> int:
return sum(len(bundle.block_refs) for bundle in self._output_buffer)
def internal_output_queue_num_bytes(self) -> int:
return sum(bundle.size_bytes() for bundle in self._output_buffer)
def clear_internal_input_queue(self) -> None:
"""Clear internal input queues."""
for input_buffer in self._input_buffers:
while input_buffer:
bundle = input_buffer.popleft()
self._metrics.on_input_dequeued(bundle)
def clear_internal_output_queue(self) -> None:
"""Clear internal output queue."""
while self._output_buffer:
bundle = self._output_buffer.popleft()
self._metrics.on_output_dequeued(bundle)
def _add_input_inner(self, refs: RefBundle, input_index: int) -> None:
assert not self.completed()
assert 0 <= input_index <= len(self._input_dependencies), input_index
self._input_buffers[input_index].append(refs)
self._metrics.on_input_queued(refs)
def all_inputs_done(self) -> None:
assert len(self._output_buffer) == 0, len(self._output_buffer)
# Start with the first input buffer
while self._input_buffers[0]:
refs = self._input_buffers[0].popleft()
self._output_buffer.append(refs)
self._metrics.on_input_dequeued(refs)
# Process each additional input buffer
for input_buffer in self._input_buffers[1:]:
self._output_buffer, self._stats = self._zip(
self._output_buffer, input_buffer
)
# Clear the input buffer AFTER using it in _zip
while input_buffer:
refs = input_buffer.popleft()
self._metrics.on_input_dequeued(refs)
# Mark outputs as ready
for ref in self._output_buffer:
self._metrics.on_output_queued(ref)
super().all_inputs_done()
def has_next(self) -> bool:
return len(self._output_buffer) > 0
def _get_next_inner(self) -> RefBundle:
refs = self._output_buffer.popleft()
self._metrics.on_output_dequeued(refs)
return refs
def get_stats(self) -> StatsDict:
return self._stats
def implements_accurate_memory_accounting(self):
return True
def _zip(
self,
left_input: collections.deque[RefBundle],
right_input: collections.deque[RefBundle],
) -> Tuple[collections.deque[RefBundle], StatsDict]:
"""Zip the RefBundles from `left_input` and `right_input` together.
Zip is done in 2 steps: aligning blocks, and zipping blocks from
both sides.
Aligning blocks (optional): check the blocks from `left_input` and
`right_input` are aligned or not, i.e. if having different number of blocks, or
having different number of rows in some blocks. If not aligned, repartition the
smaller input with `_split_at_indices` to align with larger input.
Zipping blocks: after blocks from both sides are aligned, zip
blocks from both sides together in parallel.
"""
left_blocks_with_metadata = []
for bundle in left_input:
for block, meta in bundle.blocks:
left_blocks_with_metadata.append((block, meta))
right_blocks_with_metadata = []
for bundle in right_input:
for block, meta in bundle.blocks:
right_blocks_with_metadata.append((block, meta))
left_block_rows, left_block_bytes = self._calculate_blocks_rows_and_bytes(
left_blocks_with_metadata
)
right_block_rows, right_block_bytes = self._calculate_blocks_rows_and_bytes(
right_blocks_with_metadata
)
# Check that both sides have the same number of rows.
# TODO(Clark): Support different number of rows via user-directed
# dropping/padding.
total_left_rows = sum(left_block_rows)
total_right_rows = sum(right_block_rows)
if total_left_rows != total_right_rows:
raise ValueError(
"Cannot zip datasets of different number of rows: "
f"{total_left_rows}, {total_right_rows}"
)
# Whether the left and right input sides are inverted
input_side_inverted = False
if sum(right_block_bytes) > sum(left_block_bytes):
# Make sure that right side is smaller, so we minimize splitting
# work when aligning both sides.
# TODO(Clark): Improve this heuristic for minimizing splitting work,
# e.g. by generating the splitting plans for each route (via
# _generate_per_block_split_indices) and choosing the plan that splits
# the least cumulative bytes.
left_blocks_with_metadata, right_blocks_with_metadata = (
right_blocks_with_metadata,
left_blocks_with_metadata,
)
left_block_rows, right_block_rows = right_block_rows, left_block_rows
input_side_inverted = True
# Get the split indices that will align both sides.
indices = list(itertools.accumulate(left_block_rows))
indices.pop(-1)
# Split other at the alignment indices, such that for every block from
# left side, we have a list of blocks from right side that have the same
# cumulative number of rows as that left block.
# NOTE: _split_at_indices has a no-op fastpath if the blocks are already
# aligned.
aligned_right_blocks_with_metadata = _split_at_indices(
right_blocks_with_metadata,
indices,
block_rows=right_block_rows,
)
del right_blocks_with_metadata
left_blocks = [b for b, _ in left_blocks_with_metadata]
right_blocks_list = aligned_right_blocks_with_metadata[0]
del left_blocks_with_metadata, aligned_right_blocks_with_metadata
zip_one_block = cached_remote_fn(_zip_one_block, num_returns=2)
output_blocks = []
output_metadata_schema = []
for left_block, right_blocks in zip(left_blocks, right_blocks_list):
# For each block from left side, zip it together with 1 or more blocks from
# right side. We're guaranteed to have that left_block has the same number
# of rows as right_blocks has cumulatively.
res, meta_with_schema = zip_one_block.remote(
left_block, *right_blocks, inverted=input_side_inverted
)
output_blocks.append(res)
output_metadata_schema.append(meta_with_schema)
# Early release memory.
del left_blocks, right_blocks_list
# TODO(ekl) it might be nice to have a progress bar here.
output_metadata_schema: List[BlockMetadataWithSchema] = ray.get(
output_metadata_schema
)
output_refs: collections.deque[RefBundle] = collections.deque()
input_owned = all(b.owns_blocks for b in left_input)
for block, meta_with_schema in zip(output_blocks, output_metadata_schema):
output_refs.append(
RefBundle(
[
(
block,
meta_with_schema.metadata,
)
],
owns_blocks=input_owned,
schema=meta_with_schema.schema,
)
)
stats = {self._name: to_stats(output_metadata_schema)}
# Clean up inputs.
for ref in left_input:
ref.destroy_if_owned()
for ref in right_input:
ref.destroy_if_owned()
return output_refs, stats
def _calculate_blocks_rows_and_bytes(
self,
blocks_with_metadata: BlockPartition,
) -> Tuple[List[int], List[int]]:
"""Calculate the number of rows and size in bytes for a list of blocks with
metadata.
"""
get_num_rows_and_bytes = cached_remote_fn(_get_num_rows_and_bytes)
block_rows = []
block_bytes = []
for block, metadata in blocks_with_metadata:
if metadata.num_rows is None or metadata.size_bytes is None:
# Need to fetch number of rows or size in bytes, so just fetch both.
num_rows, size_bytes = ray.get(get_num_rows_and_bytes.remote(block))
# Cache on the block metadata.
metadata.num_rows = num_rows
metadata.size_bytes = size_bytes
block_rows.append(metadata.num_rows)
block_bytes.append(metadata.size_bytes)
return block_rows, block_bytes
def _zip_one_block(
block: Block, *other_blocks: Block, inverted: bool = False
) -> Tuple[Block, "BlockMetadataWithSchema"]:
"""Zip together `block` with `other_blocks`."""
stats = BlockExecStats.builder()
# Concatenate other blocks.
# TODO(Clark): Extend BlockAccessor.zip() to work with N other blocks,
# so we don't need to do this concatenation.
builder = DelegatingBlockBuilder()
for other_block in other_blocks:
builder.add_block(other_block)
other_block = builder.build()
if inverted:
# Swap blocks if ordering was inverted during block alignment splitting.
block, other_block = other_block, block
# Zip block and other blocks.
result = BlockAccessor.for_block(block).zip(other_block)
from ray.data.block import BlockMetadataWithSchema
return result, BlockMetadataWithSchema.from_block(result, stats=stats.build())
def _get_num_rows_and_bytes(block: Block) -> Tuple[int, int]:
block = BlockAccessor.for_block(block)
return block.num_rows(), block.size_bytes()
| ZipOperator |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 102128,
"end": 102357
} | class ____:
xlAbsRowRelColumn = 2 # from enum XlReferenceType
xlAbsolute = 1 # from enum XlReferenceType
xlRelRowAbsColumn = 3 # from enum XlReferenceType
xlRelative = 4 # from enum XlReferenceType
| ReferenceType |
python | huggingface__transformers | src/transformers/models/audioflamingo3/processing_audioflamingo3.py | {
"start": 1643,
"end": 13037
} | class ____(ProcessorMixin):
r"""
Constructs an AudioFlamingo3 processor which wraps an AudioFlamingo3 feature extractor and an AudioFlamingo3
tokenizer into a single processor.
[`AudioFlamingo3Processor`] offers all the functionalities of [`WhisperFeatureExtractor`] and
[`Qwen2TokenizerFast`]. See the [`~AudioFlamingo3Processor.__call__`] for more information.
Args:
feature_extractor ([`WhisperFeatureExtractor`]):
The feature extractor is a required input.
tokenizer ([`Qwen2TokenizerFast`]):
The tokenizer is a required input.
chat_template (`Optional[str]`, *optional*):
The Jinja template to use for formatting the conversation. If not provided, the tokenizer's default chat
template will be used.
audio_token (`Optional[str]`, *optional*, defaults to `"<sound>"`):
Special token used to represent audio inputs in the chat template.
"""
attributes = ["feature_extractor", "tokenizer"]
feature_extractor_class = "WhisperFeatureExtractor"
tokenizer_class = "Qwen2TokenizerFast"
def __init__(
self,
feature_extractor,
tokenizer,
chat_template=None,
audio_token="<sound>",
):
self.audio_token = audio_token
self.audio_token_id = tokenizer.convert_tokens_to_ids(audio_token)
super().__init__(feature_extractor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, list[TextInput]],
audio: Optional[AudioInput] = None,
output_labels: Optional[bool] = False,
**kwargs: Unpack[AudioFlamingo3ProcessorKwargs],
) -> BatchFeature:
r"""
Main method to prepare one or several text sequence(s) and audio waveform(s) for the model. This
method expands `<sound>` placeholders in the text based on the post-pool frame counts of the
audio windows, then tokenizes the provided strings as-is, and extracts log-mel features
with [`WhisperFeatureExtractor`]. If `audio` is `None`, no audio processing is performed and
the text is tokenized as-is (LM-only behavior).
Args:
text (`str` or `list[str]`):
Input sequence or batch of sequences.
audio (`np.ndarray` or `list[np.ndarray]`):
Input audio or batch of audios as NumPy arrays. If provided, there must be as many `text` inputs as
`audio` inputs.
output_labels (bool, *optional*, default=False):
Whether to return labels for training.
Returns:
[`BatchFeature`]: A dictionary with tokenized text (`input_ids`, `attention_mask`) and
audio features (`input_features`, `input_features_mask`).
"""
# Merge defaults with user kwargs
call_kwargs = self._merge_kwargs(
AudioFlamingo3ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
text_kwargs = call_kwargs["text_kwargs"]
audio_kwargs = call_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors")
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
audio_inputs = {}
if audio is not None:
audio = make_list_of_audio(audio)
if len(text) != len(audio):
raise ValueError(f"Got {len(text)} text but {len(audio)} audios; they must match 1:1.")
# Determine number of chunks per sample, and flatten
window_size = int(audio_kwargs["sampling_rate"] * audio_kwargs["chunk_length"])
max_windows = int(MAX_AUDIO_LEN // audio_kwargs["chunk_length"])
per_sample_windows: list[int] = []
flat_chunks: list[np.ndarray] = []
for audio_el in audio:
n_samples = int(audio_el.shape[0])
n_win = max(1, (n_samples + window_size - 1) // window_size)
if n_win > max_windows:
logger.warning(
f"Audio duration ({n_samples / audio_kwargs['sampling_rate']:.1f}s) exceeds {MAX_AUDIO_LEN}s; truncating to first {MAX_AUDIO_LEN}s."
)
n_win = max_windows
per_sample_windows.append(n_win)
time_cap = min(n_samples, n_win * window_size)
for i in range(n_win):
start = i * window_size
end = min((i + 1) * window_size, time_cap)
flat_chunks.append(audio_el[start:end])
# Feature extraction
audio_inputs = self.feature_extractor(flat_chunks, **audio_kwargs)
padding_mask = audio_inputs.pop("attention_mask")
audio_inputs["input_features_mask"] = padding_mask
# Compute sequence lengths token counting
audio_lengths = torch.stack([s.sum() for s in torch.split(padding_mask.sum(-1), per_sample_windows)])
conv_output_lengths = (audio_lengths - 1) // 2 + 1 # After conv2 downsampling
audio_tokens_lengths = (conv_output_lengths - 2) // 2 + 1 # After avg pooling
# expand audio tokens in text
for i, audio_length in enumerate(audio_tokens_lengths):
expanded = re.sub(re.escape(self.audio_token), self.audio_token * audio_length, text[i])
text[i] = expanded
# Tokenize
text_inputs = self.tokenizer(text, **text_kwargs)
data = {**text_inputs, **audio_inputs}
if output_labels:
labels = data["input_ids"].clone()
labels[labels == self.audio_token_id] = -100
labels[labels == self.tokenizer.pad_token_id] = -100
data["labels"] = labels
return BatchFeature(data=data, tensor_type=return_tensors)
@property
def model_input_names(self) -> list[str]:
tok_names = self.tokenizer.model_input_names
fea_names = self.feature_extractor.model_input_names
return list(dict.fromkeys(tok_names + fea_names + ["input_features_mask"]))
def apply_transcription_request(
self,
audio: Union[str, list[str], AudioInput],
prompt: Optional[Union[str, list[str]]] = None,
**kwargs: Unpack[AudioFlamingo3ProcessorKwargs],
) -> BatchFeature:
"""
Prepare inputs for automatic speech recognition without manually writing the default transcription prompt.
Args:
audio (`str`, `list[str]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
Audio to transcribe. Strings are interpreted as local paths or URLs and will be loaded automatically by
the chat template loader; NumPy arrays and PyTorch tensors are forwarded directly.
prompt (`str` or `list[str]`, *optional*):
Custom prompt(s) to include in the user turn. A list must be the same length as the batch. When `None`,
each sample uses `"Transcribe the input speech."`.
**kwargs:
Additional keyword arguments forwarded to [`~AudioFlamingo3Processor.apply_chat_template`] (for example
`text_kwargs`, `audio_kwargs`, ...).
Returns:
[`BatchFeature`]: Processor outputs ready to be passed to [`AudioFlamingo3ForConditionalGeneration.generate`].
"""
if isinstance(audio, str):
audio_items: list[Union[str, np.ndarray]] = [audio]
elif isinstance(audio, (list, tuple)) and audio and all(isinstance(el, str) for el in audio):
audio_items = list(audio)
else:
audio_items = list(make_list_of_audio(audio))
if is_torch_available():
audio_items = [el.detach().cpu().numpy() if isinstance(el, torch.Tensor) else el for el in audio_items]
batch_size = len(audio_items)
if batch_size == 0:
raise ValueError("`audio` must contain at least one sample.")
if prompt is None:
prompts = [DEFAULT_TRANSCRIPTION_PROMPT] * batch_size
elif isinstance(prompt, str):
prompts = [prompt] * batch_size
elif isinstance(prompt, (list, tuple)):
if len(prompt) != batch_size:
raise ValueError(
f"Received {len(prompt)} prompt(s) for {batch_size} audio sample(s); counts must match."
)
prompts = []
for item in prompt:
if item is None:
prompts.append(DEFAULT_TRANSCRIPTION_PROMPT)
elif isinstance(item, str):
prompts.append(item)
else:
raise TypeError("Each prompt must be a string or `None`.")
else:
raise TypeError("`prompt` must be a string, a sequence of strings, or `None`.")
conversations = [
[
{
"role": "user",
"content": [
{"type": "text", "text": prompt_text},
{"type": "audio", "path": audio_item}
if isinstance(audio_item, str)
else {"type": "audio", "audio": audio_item},
],
}
]
for prompt_text, audio_item in zip(prompts, audio_items)
]
return self.apply_chat_template(
conversations,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
**kwargs,
)
def batch_decode(self, *args, strip_prefix=False, **kwargs):
"""
Forward arguments to [`~PreTrainedTokenizer.batch_decode`] and optionally remove the assistant framing the model
was trained to produce.
AF3 transcription requests respond with sentences such as `"The spoken content of the audio is \"...\"."`.
Setting `strip_prefix=True` trims the fixed prefix for just the transcription text.
"""
decoded = self.tokenizer.batch_decode(*args, **kwargs)
if strip_prefix:
decoded = [self._strip_assistant_prefix_and_quotes(text) for text in decoded]
return decoded
def _strip_assistant_prefix_and_quotes(self, text: str) -> str:
"""
Remove the assistant prefix and surrounding quotes from a decoded transcription string.
"""
stripped = text.strip()
for prefix in (
"The spoken content of the audio is",
"The transcription of the audio is",
):
if stripped.startswith(prefix):
stripped = stripped[len(prefix) :].strip()
break
if stripped.endswith("."):
stripped = stripped[:-1].strip()
if len(stripped) >= 2 and stripped[0] == stripped[-1] and stripped[0] in {"'", '"'}:
stripped = stripped[1:-1].strip()
return stripped
__all__ = ["AudioFlamingo3Processor"]
| AudioFlamingo3Processor |
python | pypa__warehouse | tests/common/db/organizations.py | {
"start": 6231,
"end": 6755
} | class ____(WarehouseFactory):
class Meta:
model = OrganizationOIDCIssuer
organization_id = factory.SelfAttribute("organization.id")
organization = factory.SubFactory(OrganizationFactory)
issuer_type = factory.LazyFunction(
lambda: fake.random_element(elements=[e.value for e in OIDCIssuerType])
)
issuer_url = factory.Faker("url", schemes=["https"])
created_by_id = factory.SelfAttribute("created_by.id")
created_by = factory.SubFactory(UserFactory)
| OrganizationOIDCIssuerFactory |
python | rushter__MLAlgorithms | mla/neuralnet/optimizers.py | {
"start": 1967,
"end": 3163
} | class ____(Optimizer):
def __init__(self, learning_rate=0.01, momentum=0.9, decay=0.0, nesterov=False):
self.nesterov = nesterov
self.decay = decay
self.momentum = momentum
self.lr = learning_rate
self.iteration = 0
self.velocity = None
def update(self, network):
lr = self.lr * (1.0 / (1.0 + self.decay * self.iteration))
for i, layer in enumerate(network.parametric_layers):
for n in layer.parameters.keys():
# Get gradient values
grad = layer.parameters.grad[n]
update = self.momentum * self.velocity[i][n] - lr * grad
self.velocity[i][n] = update
if self.nesterov:
# Adjust using updated velocity
update = self.momentum * self.velocity[i][n] - lr * grad
layer.parameters.step(n, update)
self.iteration += 1
def setup(self, network):
self.velocity = defaultdict(dict)
for i, layer in enumerate(network.parametric_layers):
for n in layer.parameters.keys():
self.velocity[i][n] = np.zeros_like(layer.parameters[n])
| SGD |
python | coleifer__peewee | playhouse/psycopg3_ext.py | {
"start": 1186,
"end": 1623
} | class ____(_Psycopg3JsonLookupBase):
def __getitem__(self, value):
return JsonLookup(self.node, self.parts + [value], self._as_json)
def __sql__(self, ctx):
ctx.sql(self.node)
for part in self.parts[:-1]:
ctx.literal('->').sql(part)
if self.parts:
(ctx
.literal('->' if self._as_json else '->>')
.sql(self.parts[-1]))
return ctx
| JsonLookup |
python | kamyu104__LeetCode-Solutions | Python/plus-one-linked-list.py | {
"start": 832,
"end": 1524
} | class ____(object):
def plusOne(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def reverseList(head):
dummy = ListNode(0)
curr = head
while curr:
dummy.next, curr.next, curr = curr, dummy.next, curr.next
return dummy.next
rev_head = reverseList(head)
curr, carry = rev_head, 1
while curr and carry:
curr.val += carry
carry = curr.val / 10
curr.val %= 10
if carry and curr.next is None:
curr.next = ListNode(0)
curr = curr.next
return reverseList(rev_head)
| Solution2 |
python | hynek__structlog | src/structlog/testing.py | {
"start": 854,
"end": 2973
} | class ____:
"""
Class for capturing log messages in its entries list.
Generally you should use `structlog.testing.capture_logs`,
but you can use this class if you want to capture logs with other patterns.
:ivar List[structlog.typing.EventDict] entries: The captured log entries.
.. versionadded:: 20.1.0
.. versionchanged:: 24.3.0
Added mapping from "exception" to "error"
Added mapping from "warn" to "warning"
"""
entries: list[EventDict]
def __init__(self) -> None:
self.entries = []
def __call__(
self, _: WrappedLogger, method_name: str, event_dict: EventDict
) -> NoReturn:
event_dict["log_level"] = map_method_name(method_name)
self.entries.append(event_dict)
raise DropEvent
@contextmanager
def capture_logs(
processors: Iterable[Processor] = (),
) -> Generator[list[EventDict], None, None]:
"""
Context manager that appends all logging statements to its yielded list
while it is active. Disables all configured processors for the duration
of the context manager.
Attention: this is **not** thread-safe!
Args:
processors: Processors to apply before the logs are captured.
.. versionadded:: 20.1.0
.. versionadded:: 25.5.0 *processors* parameter
"""
cap = LogCapture()
# Modify `_Configuration.default_processors` set via `configure` but always
# keep the list instance intact to not break references held by bound
# loggers.
configured_processors = get_config()["processors"]
old_processors = configured_processors.copy()
try:
# clear processors list and use LogCapture for testing
configured_processors.clear()
configured_processors.extend(processors)
configured_processors.append(cap)
configure(processors=configured_processors)
yield cap.entries
finally:
# remove LogCapture and restore original processors
configured_processors.clear()
configured_processors.extend(old_processors)
configure(processors=configured_processors)
| LogCapture |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 49880,
"end": 57913
} | class ____(CLIPSegPreTrainedModel):
config: CLIPSegConfig
def __init__(self, config: CLIPSegConfig):
super().__init__(config)
self.config = config
self.clip = CLIPSegModel(config)
self.extract_layers = config.extract_layers
self.decoder = CLIPSegDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_conditional_embeddings(
self,
batch_size: Optional[int] = None,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
conditional_pixel_values: Optional[torch.Tensor] = None,
):
if input_ids is not None:
# compute conditional embeddings from texts
if len(input_ids) != batch_size:
raise ValueError("Make sure to pass as many prompt texts as there are query images")
with torch.no_grad():
conditional_embeddings = self.clip.get_text_features(
input_ids, attention_mask=attention_mask, position_ids=position_ids
)
elif conditional_pixel_values is not None:
# compute conditional embeddings from images
if len(conditional_pixel_values) != batch_size:
raise ValueError("Make sure to pass as many prompt images as there are query images")
with torch.no_grad():
conditional_embeddings = self.clip.get_image_features(conditional_pixel_values)
else:
raise ValueError(
"Invalid conditional, should be either provided as `input_ids` or `conditional_pixel_values`"
)
return conditional_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
conditional_pixel_values: Optional[torch.FloatTensor] = None,
conditional_embeddings: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = True,
return_dict: Optional[bool] = None,
) -> Union[tuple, CLIPSegOutput]:
r"""
conditional_pixel_values (`torch.FloatTensor`, *optional*):
The pixel values of the conditional images.
conditional_embeddings (`torch.FloatTensor` of shape `(batch_size, config.projection_dim)`, *optional*):
The conditional embeddings for the query images. If provided, the model will use this instead of computing
the embeddings from the conditional_pixel_values.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, CLIPSegForImageSegmentation
>>> from transformers.image_utils import load_image
>>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> texts = ["a cat", "a remote", "a blanket"]
>>> inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... outputs = model(**inputs)
>>> logits = outputs.logits
>>> print(logits.shape)
torch.Size([3, 352, 352])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# step 1: forward the query images through the frozen CLIP vision encoder
with torch.no_grad():
vision_outputs = self.clip.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=True, # we need the intermediate hidden states
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
pooled_output = self.clip.visual_projection(vision_outputs[1])
hidden_states = vision_outputs.hidden_states if return_dict else vision_outputs[2]
# we add +1 here as the hidden states also include the initial embeddings
activations = [hidden_states[i + 1] for i in self.extract_layers]
# update vision_outputs
if return_dict:
vision_outputs = BaseModelOutputWithPooling(
last_hidden_state=vision_outputs.last_hidden_state,
pooler_output=vision_outputs.pooler_output,
hidden_states=vision_outputs.hidden_states if output_hidden_states else None,
attentions=vision_outputs.attentions,
)
else:
vision_outputs = (
vision_outputs[:2] + vision_outputs[3:] if not output_hidden_states else vision_outputs
)
# step 2: compute conditional embeddings, either from text, images or an own provided embedding
if conditional_embeddings is None:
conditional_embeddings = self.get_conditional_embeddings(
batch_size=pixel_values.shape[0],
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
conditional_pixel_values=conditional_pixel_values,
)
else:
if conditional_embeddings.shape[0] != pixel_values.shape[0]:
raise ValueError(
"Make sure to pass as many conditional embeddings as there are query images in the batch"
)
if conditional_embeddings.shape[1] != self.config.projection_dim:
raise ValueError(
"Make sure that the feature dimension of the conditional embeddings matches"
" `config.projection_dim`."
)
# step 3: forward both the pooled output and the activations through the lightweight decoder to predict masks
decoder_outputs = self.decoder(
activations,
conditional_embeddings,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
loss = None
if labels is not None:
# move labels to the correct device to enable PP
labels = labels.to(logits.device)
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(logits, labels)
if not return_dict:
output = (logits, conditional_embeddings, pooled_output, vision_outputs, decoder_outputs)
return ((loss,) + output) if loss is not None else output
return CLIPSegImageSegmentationOutput(
loss=loss,
logits=logits,
conditional_embeddings=conditional_embeddings,
pooled_output=pooled_output,
vision_model_output=vision_outputs,
decoder_output=decoder_outputs,
)
__all__ = [
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
| CLIPSegForImageSegmentation |
python | walkccc__LeetCode | solutions/3439. Reschedule Meetings for Maximum Free Time I/3439.py | {
"start": 0,
"end": 482
} | class ____:
def maxFreeTime(
self,
eventTime: int,
k: int,
startTime: list[int],
endTime: list[int]
) -> int:
gaps = ([startTime[0]] +
[startTime[i] - endTime[i - 1] for i in range(1, len(startTime))] +
[eventTime - endTime[-1]])
windowSum = sum(gaps[:k + 1])
ans = windowSum
for i in range(k + 1, len(gaps)):
windowSum += gaps[i] - gaps[i - k - 1]
ans = max(ans, windowSum)
return ans
| Solution |
python | Textualize__textual | docs/examples/app/event01.py | {
"start": 57,
"end": 574
} | class ____(App):
COLORS = [
"white",
"maroon",
"red",
"purple",
"fuchsia",
"olive",
"yellow",
"navy",
"teal",
"aqua",
]
def on_mount(self) -> None:
self.screen.styles.background = "darkblue"
def on_key(self, event: events.Key) -> None:
if event.key.isdecimal():
self.screen.styles.background = self.COLORS[int(event.key)]
if __name__ == "__main__":
app = EventApp()
app.run()
| EventApp |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 18233,
"end": 18372
} | class ____(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return li(a*x + b)/a
@dataclass
| LiRule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 590670,
"end": 591125
} | class ____(sgqlc.types.Type):
"""A User who is a member of an enterprise through one or more
organizations.
"""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("EnterpriseMember", graphql_name="node")
"""The item at the end of the edge."""
| EnterpriseMemberEdge |
python | run-llama__llama_index | llama-index-integrations/evaluation/llama-index-evaluation-tonic-validate/llama_index/evaluation/tonic_validate/answer_consistency.py | {
"start": 358,
"end": 1985
} | class ____(BaseEvaluator):
"""
Tonic Validate's answer consistency metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
| AnswerConsistencyEvaluator |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/base.py | {
"start": 732,
"end": 2113
} | class ____(ABC, Generic[T]):
"""Abstract base class for parsing the outputs of a model."""
@abstractmethod
def parse_result(self, result: list[Generation], *, partial: bool = False) -> T:
"""Parse a list of candidate model `Generation` objects into a specific format.
Args:
result: A list of `Generation` to be parsed. The `Generation` objects are
assumed to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
Returns:
Structured output.
"""
async def aparse_result(
self, result: list[Generation], *, partial: bool = False
) -> T:
"""Async parse a list of candidate model `Generation` objects into a specific format.
Args:
result: A list of `Generation` to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
partial: Whether to parse the output as a partial result. This is useful
for parsers that can parse partial results.
Returns:
Structured output.
""" # noqa: E501
return await run_in_executor(None, self.parse_result, result, partial=partial)
| BaseLLMOutputParser |
python | scikit-learn__scikit-learn | sklearn/utils/_metadata_requests.py | {
"start": 11617,
"end": 19381
} | class ____:
"""Container for metadata requests associated with a single method.
Instances of this class get used within a :class:`MetadataRequest` - one per each
public method (`fit`, `transform`, ...) that its owning consumer has.
.. versionadded:: 1.3
Parameters
----------
owner : object
The object owning these requests.
method : str
The name of the method to which these requests belong.
requests : dict of {str: bool, None or str}, default=None
The initial requests for this method.
"""
def __init__(self, owner, method, requests=None):
self._requests = requests or dict()
self.owner = owner
self.method = method
@property
def requests(self):
"""Dictionary of the form: ``{key: alias}``."""
return self._requests
def add_request(
self,
*,
param,
alias,
):
"""Add request info for a metadata.
Parameters
----------
param : str
The metadata for which a request is set.
alias : str, or {True, False, None}
Specifies which metadata should be routed to the method that owns this
`MethodMetadataRequest`.
- str: the name (or alias) of metadata given to a meta-estimator that
should be routed to the method that owns this `MethodMetadataRequest`.
- True: requested
- False: not requested
- None: error if passed
"""
if not request_is_alias(alias) and not request_is_valid(alias):
raise ValueError(
f"The alias you're setting for `{param}` should be either a "
"valid identifier or one of {None, True, False}, but given "
f"value is: `{alias}`"
)
if alias == param:
alias = True
if alias == UNUSED:
if param in self._requests:
del self._requests[param]
else:
raise ValueError(
f"Trying to remove parameter {param} with UNUSED which doesn't"
" exist."
)
else:
self._requests[param] = alias
return self
def _get_param_names(self, return_alias):
"""Get names of all metadata that can be consumed or routed by this method.
This method returns the names of all metadata, even the ``False``
ones.
Parameters
----------
return_alias : bool
Controls whether original or aliased names should be returned. If
``False``, aliases are ignored and original names are returned.
Returns
-------
names : set of str
A set of strings with the names of all metadata.
"""
return set(
alias if return_alias and not request_is_valid(alias) else prop
for prop, alias in self._requests.items()
if not request_is_valid(alias) or alias is not False
)
def _check_warnings(self, *, params):
"""Check whether metadata is passed which is marked as WARN.
If any metadata is passed which is marked as WARN, a warning is raised.
Parameters
----------
params : dict
The metadata passed to a method.
"""
params = {} if params is None else params
warn_params = {
prop
for prop, alias in self._requests.items()
if alias == WARN and prop in params
}
for param in warn_params:
warn(
f"Support for {param} has recently been added to {self.owner} class. "
"To maintain backward compatibility, it is ignored now. "
f"Using `set_{self.method}_request({param}={{True, False}})` "
"on this method of the class, you can set the request value "
"to False to silence this warning, or to True to consume and "
"use the metadata."
)
def _route_params(self, params, parent, caller):
"""Prepare the given metadata to be passed to the method.
The output of this method can be used directly as the input to the
corresponding method as **kwargs.
Parameters
----------
params : dict
A dictionary of provided metadata.
parent : object
Parent class object, that routes the metadata.
caller : str
Method from the parent class object, where the metadata is routed from.
Returns
-------
params : Bunch
A :class:`~sklearn.utils.Bunch` of {metadata: value} which can be
passed to the corresponding method.
"""
self._check_warnings(params=params)
unrequested = dict()
args = {arg: value for arg, value in params.items() if value is not None}
res = Bunch()
for prop, alias in self._requests.items():
if alias is False or alias == WARN:
continue
elif alias is True and prop in args:
res[prop] = args[prop]
elif alias is None and prop in args:
unrequested[prop] = args[prop]
elif alias in args:
res[prop] = args[alias]
if unrequested:
if self.method in COMPOSITE_METHODS:
callee_methods = COMPOSITE_METHODS[self.method]
else:
callee_methods = [self.method]
set_requests_on = "".join(
[
f".set_{method}_request({{metadata}}=True/False)"
for method in callee_methods
]
)
message = (
f"[{', '.join([key for key in unrequested])}] are passed but are not"
" explicitly set as requested or not requested for"
f" {_routing_repr(self.owner)}.{self.method}, which is used within"
f" {_routing_repr(parent)}.{caller}. Call `{_routing_repr(self.owner)}"
+ set_requests_on
+ "` for each metadata you want to request/ignore. See the"
" Metadata Routing User guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
" information."
)
raise UnsetMetadataPassedError(
message=message,
unrequested_params=unrequested,
routed_params=res,
)
return res
def _consumes(self, params):
"""Return subset of `params` consumed by the method that owns this instance.
Parameters
----------
params : iterable of str
An iterable of parameter names to test for consumption.
Returns
-------
consumed_params : set of str
A subset of parameters from `params` which are consumed by this method.
"""
params = set(params)
consumed_params = set()
for metadata_name, alias in self._requests.items():
if alias is True and metadata_name in params:
consumed_params.add(metadata_name)
elif isinstance(alias, str) and alias in params:
consumed_params.add(alias)
return consumed_params
def _serialize(self):
"""Serialize the object.
Returns
-------
obj : dict
A serialized version of the instance in the form of a dictionary.
"""
return self._requests
def __repr__(self):
return str(self._serialize())
def __str__(self):
return str(repr(self))
| MethodMetadataRequest |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 9895,
"end": 10081
} | class ____(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ("body",)
body: list[Node]
| Template |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_dynamic_getitiem.py | {
"start": 60,
"end": 296
} | class ____:
def __getitem__(self, key):
if key == 'attributes':
return []
return {'world': 123}
ex = DynamicGetitem()
a = ex['hello']['world'] # [invalid-sequence-index] known false-positive
| DynamicGetitem |
python | pypa__twine | twine/auth.py | {
"start": 2688,
"end": 10847
} | class ____:
_tp_token: t.Optional[TrustedPublishingToken] = None
_expires: t.Optional[int] = None
def __init__(
self,
config: utils.RepositoryConfig,
input: CredentialInput,
) -> None:
self.config = config
self.input = input
@property
@functools.lru_cache()
def authenticator(self) -> "requests.auth.AuthBase":
username = self.username
password = self.password
if self._tp_token:
# If `self.password` ended up getting a Trusted Publishing token,
# we've cached it here so we should use that as the authenticator.
# We have a custom authenticator so we can repeatedly invoke
# `make_trusted_publishing_token` which if the token is 10 minutes
# old or more, we should get a new one automatically.
return TrustedPublishingAuthenticator(resolver=self)
if username and password:
return requests.auth.HTTPBasicAuth(
username=username,
password=password,
)
raise exceptions.InvalidConfiguration(
"could not determine credentials for configured repository"
)
@classmethod
def choose(cls, interactive: bool) -> t.Type["Resolver"]:
return cls if interactive else Private
@property
@functools.lru_cache()
def username(self) -> t.Optional[str]:
if self.is_pypi() and not self.input.username:
# Default username.
self.input.username = TOKEN_USERNAME
return utils.get_userpass_value(
self.input.username,
self.config,
key="username",
prompt_strategy=self.username_from_keyring_or_prompt,
)
@property
@functools.lru_cache()
def password(self) -> t.Optional[str]:
return utils.get_userpass_value(
self.input.password,
self.config,
key="password",
prompt_strategy=self.password_from_keyring_or_trusted_publishing_or_prompt,
)
def _has_valid_cached_tp_token(self) -> bool:
return self._tp_token is not None and (
int(time.time()) + TOKEN_RENEWAL_THRESHOLD.seconds
< cast(int, self._tp_token.get("expires", self._expires))
)
def _make_trusted_publishing_token(self) -> t.Optional[TrustedPublishingToken]:
if self._has_valid_cached_tp_token():
return self._tp_token
# Trusted publishing (OpenID Connect): get one token from the CI
# system, and exchange that for a PyPI token.
repository_domain = cast(str, urlparse(self.system).netloc)
session = utils.make_requests_session()
# Indices are expected to support `https://{domain}/_/oidc/audience`,
# which tells OIDC exchange clients which audience to use.
audience_url = f"https://{repository_domain}/_/oidc/audience"
resp = session.get(audience_url, timeout=5)
resp.raise_for_status()
audience = cast(str, resp.json()["audience"])
try:
oidc_token = detect_credential(audience)
except AmbientCredentialError as e:
# If we get here, we're on a supported CI platform for trusted
# publishing, and we have not been given any token, so we can error.
raise exceptions.TrustedPublishingFailure(
"Unable to retrieve an OIDC token from the CI platform for "
f"trusted publishing {e}"
)
if oidc_token is None:
logger.warning("This environment is not supported for trusted publishing")
if self._tp_token and int(time.time()) > cast(
int, self._tp_token.get("expires", self._expires)
):
return None # Fall back to prompting for a token (if possible)
# The cached trusted publishing token may still be valid for a
# while longer, let's continue using it instead of prompting
return self._tp_token
logger.warning("Got OIDC token for audience %s", audience)
token_exchange_url = f"https://{repository_domain}/_/oidc/mint-token"
mint_token_resp = session.post(
token_exchange_url,
json={"token": oidc_token},
timeout=5, # S113 wants a timeout
)
try:
mint_token_payload = mint_token_resp.json()
except json.JSONDecodeError:
raise exceptions.TrustedPublishingFailure(
"The token-minting request returned invalid JSON"
)
if not mint_token_resp.ok:
reasons = "\n".join(
f'* `{error["code"]}`: {error["description"]}'
for error in mint_token_payload["errors"]
)
raise exceptions.TrustedPublishingFailure(
"The token request failed; the index server gave the following"
f" reasons:\n\n{reasons}"
)
logger.warning("Minted upload token for trusted publishing")
self._tp_token = cast(TrustedPublishingToken, mint_token_payload)
self._expires = int(time.time()) + 900
return self._tp_token
def make_trusted_publishing_token(self) -> t.Optional[str]:
mint_token_payload = self._make_trusted_publishing_token()
if not mint_token_payload:
return None
return cast(str, mint_token_payload["token"])
@property
def system(self) -> t.Optional[str]:
return self.config["repository"]
def get_username_from_keyring(self) -> t.Optional[str]:
if keyring is None:
logger.info("keyring module is not available")
return None
try:
system = cast(str, self.system)
logger.info("Querying keyring for username")
creds = keyring.get_credential(system, None)
if creds:
return creds.username
except AttributeError:
# To support keyring prior to 15.2
pass
except Exception as exc:
logger.warning("Error getting username from keyring", exc_info=exc)
return None
def get_password_from_keyring(self) -> t.Optional[str]:
if keyring is None:
logger.info("keyring module is not available")
return None
try:
system = cast(str, self.system)
username = cast(str, self.username)
logger.info("Querying keyring for password")
return cast(str, keyring.get_password(system, username))
except NoKeyringError:
logger.info("No keyring backend found")
except Exception as exc:
logger.warning("Error getting password from keyring", exc_info=exc)
return None
def username_from_keyring_or_prompt(self) -> str:
username = self.get_username_from_keyring()
if username:
logger.info("username set from keyring")
return username
return self.prompt("username", input)
def password_from_keyring_or_trusted_publishing_or_prompt(self) -> str:
password = self.get_password_from_keyring()
if password:
logger.info("password set from keyring")
return password
if self.is_pypi() and self.username == TOKEN_USERNAME:
logger.info(
"Trying to use trusted publishing (no token was explicitly provided)"
)
if (token := self.make_trusted_publishing_token()) is not None:
return token
# Prompt for API token when required.
what = "API token" if self.is_pypi() else "password"
return self.prompt(what, getpass.getpass)
def prompt(self, what: str, how: t.Callable[..., str]) -> str:
return how(f"Enter your {what}: ")
def is_pypi(self) -> bool:
"""As of 2024-01-01, PyPI requires API tokens for uploads."""
return cast(str, self.config["repository"]).startswith(
(
utils.DEFAULT_REPOSITORY,
utils.TEST_REPOSITORY,
)
)
| Resolver |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/prompt.py | {
"start": 10414,
"end": 12462
} | class ____(PromptBase[bool]):
"""A yes / no confirmation prompt.
Example:
>>> if Confirm.ask("Continue"):
run_job()
"""
response_type = bool
validate_error_message = "[prompt.invalid]Please enter Y or N"
choices: List[str] = ["y", "n"]
def render_default(self, default: DefaultType) -> Text:
"""Render the default as (y) or (n) rather than True/False."""
yes, no = self.choices
return Text(f"({yes})" if default else f"({no})", style="prompt.default")
def process_response(self, value: str) -> bool:
"""Convert choices to a bool."""
value = value.strip().lower()
if value not in self.choices:
raise InvalidResponse(self.validate_error_message)
return value == self.choices[0]
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich import print
if Confirm.ask("Run [i]prompt[/i] tests?", default=True):
while True:
result = IntPrompt.ask(
":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5
)
if result >= 1 and result <= 10:
break
print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10")
print(f"number={result}")
while True:
password = Prompt.ask(
"Please enter a password [cyan](must be at least 5 characters)",
password=True,
)
if len(password) >= 5:
break
print("[prompt.invalid]password too short")
print(f"password={password!r}")
fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"])
print(f"fruit={fruit!r}")
doggie = Prompt.ask(
"What's the best Dog? (Case INSENSITIVE)",
choices=["Border Terrier", "Collie", "Labradoodle"],
case_sensitive=False,
)
print(f"doggie={doggie!r}")
else:
print("[b]OK :loudly_crying_face:")
| Confirm |
python | pytorch__pytorch | test/test_fx.py | {
"start": 151116,
"end": 152810
} | class ____(JitTestCase):
def setUp(self):
# Checking for mutable operations while tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = (
torch.fx.proxy.TracerBase.check_mutable_operations
)
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = (
self.orig_tracer_mutable_flag
)
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError("No Schemas Returned")
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(
sample_input.input, *sample_input.args, **sample_input.kwargs
)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f"Did not match any schemas for op {op.name}!")
| TestOperatorSignatures |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/models/ci_requirements.py | {
"start": 148,
"end": 700
} | class ____:
"""
A dataclass to store the CI requirements.
It used to make airbyte-ci client define the CI runners it will run on.
"""
dagger_version = metadata.version("dagger-io")
@property
def dagger_engine_image(self) -> str:
return f"registry.dagger.io/engine:v{self.dagger_version}"
def to_json(self) -> str:
return json.dumps(
{
"dagger_version": self.dagger_version,
"dagger_engine_image": self.dagger_engine_image,
}
)
| CIRequirements |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/mapped_covariant.py | {
"start": 1294,
"end": 1467
} | class ____(Base):
__tablename__ = "parent"
name: Mapped[str] = mapped_column(primary_key=True)
children: Mapped[Sequence["Child"]] = relationship("Child")
| Parent |
python | doocs__leetcode | solution/1800-1899/1883.Minimum Skips to Arrive at Meeting On Time/Solution.py | {
"start": 0,
"end": 601
} | class ____:
def minSkips(self, dist: List[int], speed: int, hoursBefore: int) -> int:
n = len(dist)
f = [[inf] * (n + 1) for _ in range(n + 1)]
f[0][0] = 0
eps = 1e-8
for i, x in enumerate(dist, 1):
for j in range(i + 1):
if j < i:
f[i][j] = min(f[i][j], ceil(f[i - 1][j] + x / speed - eps))
if j:
f[i][j] = min(f[i][j], f[i - 1][j - 1] + x / speed)
for j in range(n + 1):
if f[n][j] <= hoursBefore + eps:
return j
return -1
| Solution |
python | ray-project__ray | python/ray/tune/tests/test_tuner_restore.py | {
"start": 5348,
"end": 37746
} | class ____:
def __init__(self):
import numpy as np
self.data = np.random.rand((2 * 1024 * 1024))
def test_tuner_restore_num_trials(ray_start_2_cpus, tmpdir):
"""Number of trials after restoring a finished run should be the same"""
tuner = Tuner(
_dummy_train_fn,
tune_config=TuneConfig(num_samples=4, metric="_metric", mode="max"),
run_config=RunConfig(
name="test_tuner_restore_num_trials", storage_path=str(tmpdir)
),
)
results = tuner.fit()
assert len(results) == 4
assert results.get_best_result().metrics["_metric"] == 1
del tuner
tuner = Tuner.restore(
str(tmpdir / "test_tuner_restore_num_trials"), trainable=_dummy_train_fn
)
# Check restored results
results = tuner.get_results()
assert len(results) == 4
assert results.get_best_result().metrics["_metric"] == 1
results = tuner.fit()
assert len(results) == 4
assert results.get_best_result().metrics["_metric"] == 1
def test_tuner_restore_resume_errored(ray_start_2_cpus, tmpdir):
"""Resuming errored trials should pick up from previous state"""
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
failing_hanging = [
(None, None),
(fail_marker, None),
(None, None),
(fail_marker, None),
]
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(
num_samples=1,
),
run_config=RunConfig(
name="test_tuner_restore_resume_errored", storage_path=str(tmpdir)
),
param_space={
"id": tune.grid_search([0, 1, 2, 3]),
# Second and third trial fail
"failing_hanging": tune.sample_from(
lambda config: failing_hanging[config["id"]]
),
},
)
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 2
ordered_results = sorted(results, key=lambda r: r.config["id"])
# Second and third trial are at iter 1 because they failed after first report
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
del tuner
fail_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_restore_resume_errored"),
trainable=_train_fn_sometimes_failing,
resume_errored=True,
)
# Check restored results
results = tuner.get_results()
assert len(results) == 4
assert len(results.errors) == 2
# Second and third trial are at iter 1 because they failed after first report
ordered_results = sorted(results, key=lambda r: r.config["id"])
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
# Get new results
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 0
ordered_results = sorted(results, key=lambda r: r.config["id"])
# Since the errored trials are being resumed from previous state and then report
# two more times, we should observe 3 here.
assert [r.metrics["it"] for r in ordered_results] == [2, 3, 2, 3]
def test_tuner_restore_restart_errored(ray_start_2_cpus, tmpdir):
"""Restarting errored trials should re-start from scratch"""
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
failing_hanging = [
(None, None),
(fail_marker, None),
(None, None),
(fail_marker, None),
]
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="test_tuner_restore_restart_errored",
storage_path=str(tmpdir),
),
param_space={
"id": tune.grid_search([0, 1, 2, 3]),
# Second and third trial fail
"failing_hanging": tune.sample_from(
lambda config: failing_hanging[config["id"]]
),
},
)
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 2
ordered_results = sorted(results, key=lambda r: r.config["id"])
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
del tuner
fail_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_restore_restart_errored"),
trainable=_train_fn_sometimes_failing,
restart_errored=True,
)
# Check restored results
results = tuner.get_results()
assert len(results) == 4
assert len(results.errors) == 2
ordered_results = sorted(results, key=lambda r: r.config["id"])
assert [r.metrics["it"] for r in ordered_results] == [2, 1, 2, 1]
# Get new results
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 0
ordered_results = sorted(results, key=lambda r: r.config["id"])
# Since the errored trials are being restarted from scratch, they should report 2
assert [r.metrics["it"] for r in ordered_results] == [2, 2, 2, 2]
def test_tuner_resume_unfinished(ray_start_2_cpus, tmpdir, monkeypatch):
"""Resuming unfinished trials should pick up existing state"""
monkeypatch.setenv("TUNE_GLOBAL_CHECKPOINT_S", "0.1")
# Make sure that only one trial is pending at a time to prevent
# the trial order from getting shuffled around.
monkeypatch.setenv("TUNE_MAX_PENDING_TRIALS_PG", "1")
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
hang_marker = tmpdir / "hang_marker"
hang_marker.write_text("", encoding="utf-8")
param_space = {
# First trial succeeds, second hangs, third fails, fourth hangs
"failing_hanging": tune.grid_search(
[
(None, None),
(None, hang_marker),
(fail_marker, None),
(None, hang_marker),
]
),
}
# These tests need driver syncing to happen before the crash happens
# so that they can pick up from the *exact* state it left off at.
# We do this by failing after a delay of 0.3s > TUNE_GLOBAL_CHECKPOINT_S
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="test_tuner_resume_unfinished",
storage_path=str(tmpdir),
failure_config=FailureConfig(fail_fast=False),
callbacks=[_FailOnStats(num_trials=4, num_finished=2, delay_s=0.3)],
),
param_space=param_space,
)
# Catch the FailOnStats error
with pytest.raises(RuntimeError):
tuner.fit()
# After this run we have the following trial states (status, metric):
# [('TERMINATED', 2), ('RUNNING', 1), ('ERROR', 1), ('PENDING', None)]
# Restarting without hanging/failing should lead to the results:
# [2, 3, 1, 2], because:
# the TERMINATED trial is finished (state = 2),
# the RUNNING trial is continued (and picks up from state = 1 for 2 iterations),
# the ERROR trial is not continued (remains at 1 and errored)
# and the PENDING trial has not state, yet.
del tuner
fail_marker.remove(ignore_errors=True)
hang_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_resume_unfinished"),
trainable=_train_fn_sometimes_failing,
param_space=param_space,
)
tuner._local_tuner._run_config.callbacks = None
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 1
assert sorted([r.metrics["it"] for r in results]) == sorted([2, 3, 1, 2])
def test_tuner_resume_errored_only(ray_start_2_cpus, tmpdir, monkeypatch):
"""Not resuming unfinished trials (but only errored and pending) should work"""
monkeypatch.setenv("TUNE_GLOBAL_CHECKPOINT_S", "0.1")
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
hang_marker = tmpdir / "hang_marker"
hang_marker.write_text("", encoding="utf-8")
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name="test_tuner_resume_errored_only",
storage_path=str(tmpdir),
failure_config=FailureConfig(fail_fast=False),
callbacks=[_FailOnStats(num_trials=4, num_finished=2, delay_s=0.3)],
),
param_space={
# First trial succeeds, second hangs, third fails, fourth hangs.
"failing_hanging": tune.grid_search(
[
(None, None),
(None, hang_marker),
(fail_marker, None),
(None, hang_marker),
]
),
},
)
# Catch the FailOnStats error
with pytest.raises(RuntimeError):
tuner.fit()
# After this run we have the following trial states (status, metric):
# [('TERMINATED', 2), ('RUNNING', 1), ('ERROR', 1), ('PENDING', None)]
# Restarting without continuing existing trials should lead to the results
# [2, 1, 3, 0], because
# the TERMINATED trial is finished (state = 2),
# the RUNNING trial is not continued (marked as terminated),
# the ERROR trial is not continued (remains at 1 and errored)
# and the PENDING trial is not continued (marked as terminated).
del tuner
fail_marker.remove(ignore_errors=True)
hang_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "test_tuner_resume_errored_only"),
trainable=_train_fn_sometimes_failing,
resume_unfinished=False,
resume_errored=True,
)
tuner._local_tuner._run_config.callbacks = None
results = tuner.fit()
assert len(results) == 4
assert len(results.errors) == 0
assert sorted([r.metrics.get("it", 0) for r in results]) == sorted([2, 1, 3, 0])
def _test_tuner_restore_from_cloud(tmpdir, configure_storage_path, storage_path):
"""Check that restoring Tuner() objects from cloud storage works"""
tuner = Tuner(
_dummy_train_fn,
run_config=RunConfig(name="exp_dir", storage_path=configure_storage_path),
)
tuner.fit()
check_path = tmpdir / "check_save"
fs, fs_path = get_fs_and_path(storage_path)
_download_from_fs_path(fs=fs, fs_path=fs_path, local_path=str(check_path))
remote_contents = os.listdir(check_path / "exp_dir")
assert "tuner.pkl" in remote_contents
prev_cp = _find_newest_experiment_checkpoint(str(check_path / "exp_dir"))
prev_lstat = os.lstat(prev_cp)
tuner2 = Tuner.restore(
str(URI(storage_path) / "exp_dir"), trainable=_dummy_train_fn
)
results = tuner2.fit()
assert results[0].metrics["_metric"] == 1
check_path_2 = tmpdir / "check_save_2"
_download_from_fs_path(fs=fs, fs_path=fs_path, local_path=str(check_path_2))
after_cp = _find_newest_experiment_checkpoint(str(check_path_2 / "exp_dir"))
after_lstat = os.lstat(after_cp)
# Experiment checkpoint was updated
assert os.path.basename(prev_cp) != os.path.basename(after_cp)
# Old experiment checkpoint still exists in dir
assert os.path.basename(prev_cp) in os.listdir(check_path_2 / "exp_dir")
# Contents changed
assert prev_lstat.st_size != after_lstat.st_size
def test_tuner_restore_from_cloud_manual_path(
ray_start_2_cpus, tmpdir, mock_s3_bucket_uri
):
_test_tuner_restore_from_cloud(
tmpdir,
configure_storage_path=mock_s3_bucket_uri,
storage_path=mock_s3_bucket_uri,
)
# TODO(justinvyu): [fallback_to_latest]
@pytest.mark.skip("Fallback to latest checkpoint is not implemented.")
@pytest.mark.parametrize(
"storage_path",
[None, "/tmp/ray_results"],
)
def test_tuner_restore_latest_available_checkpoint(
ray_start_2_cpus, monkeypatch, tmpdir, storage_path
):
"""Resuming errored trials should pick up from previous state"""
@pytest.mark.parametrize("retry_num", [0, 2])
def test_restore_retry(ray_start_2_cpus, tmpdir, retry_num):
"""
Test retrying restore on a trial level by setting `TUNE_RESTORE_RETRY_NUM`.
This unit test holds the following hyperparameters:
- `retry_num`: Maximum number of retry attempts for restoring a trial.
This value is assigned to the environment variable `TUNE_RESTORE_RETRY_NUM`.
If the restoration fails after retry_num attempts, the trial increments its
counter of total number of failures by 1.
- `retry_num_to_fail`: Number of restore attempts to fail. In this test,
retry_num_to_fail is set to 2, causing the first two restore attempts to fail.
- `max_failures`: Maximum allowable failures during training. Here, max_failures is
set to 2, meaning the training process will terminate after two total failures.
"""
class MockTrainable(Trainable):
"""A trainable that can generate one failure during training and
another `config["retry_num_to_fail"]` times during restoring."""
def setup(self, config):
self.idx = 0
self.tag_file_path = config["tag_file_path"]
self.retry_num_to_fail = 2
self._is_restored = False
def step(self):
time.sleep(1)
if self.idx == 0 and self._is_restored:
raise RuntimeError(
"===== Restored trial cannot start from scratch ====="
)
elif self.idx == 2 and not self._is_restored:
raise RuntimeError("===== First run fails at idx=2 =====")
self.idx += 1
return {"score": self.idx}
def save_checkpoint(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"idx": self.idx}))
def load_checkpoint(self, checkpoint_dir):
self._is_restored = True
with open(self.tag_file_path, "r") as f:
retried_num = json.loads(f.read())["retried_num"]
with open(self.tag_file_path, "w") as f:
f.write(json.dumps({"retried_num": retried_num + 1}))
if retried_num < self.retry_num_to_fail:
raise RuntimeError(f"===== Failing restore #{retried_num + 1} =====")
with open(os.path.join(checkpoint_dir, "checkpoint"), "r") as f:
self.idx = json.loads(f.read())["idx"]
# Set environment variable just for this test
with unittest.mock.patch.dict(
os.environ, {"TUNE_RESTORE_RETRY_NUM": str(retry_num)}
):
tag_file = os.path.join(tmpdir, "tag")
# set up tag file
with open(tag_file, "w") as f:
f.write(json.dumps({"retried_num": 0}))
tuner = Tuner(
MockTrainable,
run_config=RunConfig(
name="tryout_restore",
stop={"training_iteration": 5},
storage_path=str(tmpdir),
failure_config=FailureConfig(max_failures=2),
checkpoint_config=CheckpointConfig(checkpoint_frequency=1),
),
param_space={"tag_file_path": tag_file},
)
results = tuner.fit()
[result] = list(results)
if retry_num > 0:
assert result.metrics["score"] == 5
else:
assert result.metrics["score"] == 2
def test_restore_overwrite_trainable(ray_start_2_cpus, tmpdir):
"""Test validation for trainable compatibility, when re-specifying a trainable
on restore."""
def train_func_1(config):
data = {"data": config["data"]}
with create_dict_checkpoint(data) as checkpoint:
tune.report(data, checkpoint=checkpoint)
raise RuntimeError("Failing!")
tuner = Tuner(
train_func_1,
run_config=RunConfig(name="overwrite_trainable", storage_path=str(tmpdir)),
param_space={"data": 1},
)
tuner.fit()
del tuner
# Can't overwrite with a different Trainable type
with pytest.raises(ValueError):
tuner = Tuner.restore(
str(tmpdir / "overwrite_trainable"),
trainable="abcd",
resume_errored=True,
)
# Can't overwrite with a different Trainable name
def train_func_2(config):
raise RuntimeError("Should not run...")
with pytest.raises(ValueError):
tuner = Tuner.restore(
str(tmpdir / "overwrite_trainable"),
trainable=train_func_2,
resume_errored=True,
)
# Can technically change trainable code (not recommended!)
def train_func_1(config):
checkpoint = tune.get_checkpoint()
assert checkpoint and load_dict_checkpoint(checkpoint)["data"] == config["data"]
tuner = Tuner.restore(
str(tmpdir / "overwrite_trainable"),
trainable=train_func_1,
resume_errored=True,
)
results = tuner.fit()
assert not results.errors
@pytest.mark.parametrize("use_function_trainable", [True, False])
def test_restore_with_parameters(ray_start_2_cpus, tmp_path, use_function_trainable):
"""Tests Tuner restoration for a `tune.with_parameters` wrapped trainable."""
def train_func(config, data_str=None, data_obj=None):
assert data_str is not None and data_obj is not None
fail_marker = config.pop("fail_marker", None)
config["failing_hanging"] = (fail_marker, None)
_train_fn_sometimes_failing(config)
class FailingTrainable(Trainable):
def setup(self, config, data_str=None, data_obj=None):
assert data_str is not None and data_obj is not None
self.idx = 0
self.fail_marker = config.get("fail_marker", None)
def step(self):
if self.fail_marker and self.fail_marker.exists():
raise RuntimeError("==== Run is failing ====")
self.idx += 1
return {"score": self.idx}
def save_checkpoint(self, checkpoint_dir):
return {"idx": self.idx}
def load_checkpoint(self, checkpoint_dict):
self.idx = checkpoint_dict["idx"]
trainable = train_func if use_function_trainable else FailingTrainable
def create_trainable_with_params():
data = MockData()
trainable_with_params = tune.with_parameters(
trainable, data_str="data", data_obj=data
)
return trainable_with_params
exp_name = f"restore_with_params-{use_function_trainable=}"
fail_marker = tmp_path / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
tuner = Tuner(
create_trainable_with_params(),
run_config=RunConfig(
name=exp_name,
storage_path=str(tmp_path),
stop={"training_iteration": 3},
failure_config=FailureConfig(max_failures=0),
checkpoint_config=CheckpointConfig(
checkpoint_frequency=0 if use_function_trainable else 1
),
),
param_space={"fail_marker": fail_marker},
)
results = tuner.fit()
assert results.errors
fail_marker.unlink()
tuner = Tuner.restore(
str(tmp_path / exp_name),
trainable=create_trainable_with_params(),
resume_errored=True,
)
results = tuner.fit()
assert not results.errors
@pytest.mark.parametrize("use_tune_run", [True, False])
def test_tuner_restore_from_moved_experiment_path(
ray_start_2_cpus, tmp_path, use_tune_run
):
"""Check that restoring a Tuner from a moved experiment directory works."""
# Create a fail_marker dummy file that causes the first Tune run to fail and
# the second run to succeed
fail_marker = tmp_path / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
old_storage_path = tmp_path / "ray_results"
old_exp_name = "exp_dir"
new_storage_path = tmp_path / "new_ray_results"
new_exp_name = "new_exp_dir"
# Initial training run (that errors out in the middle)
num_to_keep = 2
tuner = Tuner(
_train_fn_sometimes_failing,
tune_config=TuneConfig(
num_samples=1,
),
run_config=RunConfig(
name=old_exp_name,
storage_path=str(old_storage_path),
checkpoint_config=CheckpointConfig(num_to_keep=num_to_keep),
),
param_space={
"failing_hanging": (fail_marker, None),
},
)
tuner.fit()
# Move experiment from `tmp_path/ray_results/exp_dir`
# to `tmp_path/moved_ray_results/new_exp_dir`, changing both `storage_path` and
# the experiment `name`
shutil.move(str(old_storage_path), str(new_storage_path))
os.rename(
str(new_storage_path / old_exp_name), str(new_storage_path / new_exp_name)
)
# Check that the results can be read from the new location.
restore_path = str(new_storage_path / new_exp_name)
results = ResultGrid(ExperimentAnalysis(restore_path))
assert len(results.errors) == 1
training_iteration = results[0].metrics["training_iteration"]
assert (
training_iteration == 1
), f"Should only have 1 tune.report before erroring, got {training_iteration}"
assert results[0].checkpoint.path.endswith("checkpoint_000000")
assert "new_exp_dir" in results[0].checkpoint.path
del tuner
# Remove fail_marker so that the restored Tuner doesn't error again
fail_marker.unlink()
# Restore from moved experiment directory location, and launch resumed training
if use_tune_run:
analysis = tune.run(
_train_fn_sometimes_failing,
name=new_exp_name,
storage_path=str(new_storage_path),
resume="AUTO+ERRORED",
)
results = ResultGrid(analysis)
else:
tuner = Tuner.restore(
restore_path, trainable=_train_fn_sometimes_failing, resume_errored=True
)
results = tuner.fit()
assert len(results.errors) == 0
# Check that we restored iter=1, then made 2 calls to tune.report -> iter=3
training_iteration = results[0].metrics["training_iteration"]
assert training_iteration == 3, training_iteration
# Make sure that checkpoints are loaded properly
assert results[0].checkpoint
assert len(results[0].best_checkpoints) == num_to_keep
checkpoint_dirs = [
path for path in os.listdir(results[0].path) if path.startswith("checkpoint_")
]
assert sorted(checkpoint_dirs) == ["checkpoint_000001", "checkpoint_000002"]
# Make sure that we did not create a logdir in the old location
assert not old_storage_path.exists()
def test_custom_searcher_and_scheduler_restore(ray_start_2_cpus, tmpdir):
"""Check that a restored Tune experiment uses the original searcher/scheduler."""
fail_marker = tmpdir / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
class MockSearcher(OptunaSearch):
def on_trial_result(self, trial_id: str, result: dict):
super().on_trial_result(trial_id, result)
if not hasattr(self, "_test_result_counter"):
self._test_result_counter = 0
self._test_result_counter += 1
class MockScheduler(ASHAScheduler):
def on_trial_result(self, runner, trial, result):
decision = super().on_trial_result(runner, trial, result)
if not hasattr(self, "_test_result_counter"):
self._test_result_counter = 0
self._test_result_counter += 1
return decision
tuner = Tuner(
_train_fn_sometimes_failing,
run_config=RunConfig(storage_path=str(tmpdir), name="exp_name"),
tune_config=TuneConfig(
search_alg=MockSearcher(),
scheduler=MockScheduler(),
metric="it",
mode="max",
),
param_space={"a": tune.uniform(0, 1), "failing_hanging": (fail_marker, None)},
)
tuner.fit()
del tuner
fail_marker.remove(ignore_errors=True)
tuner = Tuner.restore(
str(tmpdir / "exp_name"),
trainable=_train_fn_sometimes_failing,
resume_errored=True,
)
tuner.fit()
searcher = tuner._local_tuner._tune_config.search_alg
scheduler = tuner._local_tuner._tune_config.scheduler
assert isinstance(searcher, MockSearcher)
assert isinstance(scheduler, MockScheduler)
# Searcher state should get loaded correctly
# Total of 3 reported results (1 from before failure, 2 after restore)
assert searcher._test_result_counter == 3
# Make sure that the restored scheduler is at least used
assert (
hasattr(scheduler, "_test_result_counter")
and scheduler._test_result_counter > 0
)
# TODO: [V2] Delete the `data_parallel` variant once V1 is fully removed.
@pytest.mark.parametrize("trainable_type", ["function", "class", "data_parallel"])
def test_checkpoints_saved_after_resume(ray_start_2_cpus, tmp_path, trainable_type):
"""Checkpoints saved after experiment restore should pick up at the correct
iteration and should not overwrite the checkpoints from the original run.
Old checkpoints should still be deleted if the total number of checkpoints
(old + new) exceeds `num_to_keep`.
In this test, `num_to_keep=4`:
- Initial run saves checkpoint_000000 and checkpoint_000001
- Restored run saves checkpoint_000002, checkpoint_000003, and checkpoint_000004
- Checkpoint 000000 should be deleted.
"""
def get_checkpoints(experiment_dir):
checkpoint_dirs = [
path
for path in os.listdir(experiment_dir)
if path.startswith("checkpoint_")
]
sorted_checkpoint_dirs = sorted(checkpoint_dirs)
checkpoints = [
Checkpoint.from_directory(os.path.join(experiment_dir, d))
for d in sorted_checkpoint_dirs
]
return sorted_checkpoint_dirs, checkpoints
fail_marker = tmp_path / "fail_marker"
fail_marker.write_text("", encoding="utf-8")
num_to_keep = 4
checkpoint_config = CheckpointConfig(num_to_keep=num_to_keep)
param_space = {
"failing_hanging": (fail_marker, None),
"num_epochs": 2,
}
if trainable_type == "function":
trainable = _train_fn_sometimes_failing
elif trainable_type == "class":
trainable = _ClassTrainableSometimesFailing
checkpoint_config.checkpoint_frequency = 1
param_space["num_epochs"] = 4
param_space["fail_epochs"] = 2
elif trainable_type == "data_parallel":
trainable = DataParallelTrainer(
_train_fn_sometimes_failing,
scaling_config=ray.train.ScalingConfig(num_workers=1),
)
param_space = {"train_loop_config": param_space}
else:
raise ValueError(f"Invalid trainable type: {trainable_type}")
exp_name = f"{trainable_type=}"
tuner = Tuner(
trainable,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
name=exp_name,
storage_path=str(tmp_path),
checkpoint_config=checkpoint_config,
),
param_space=param_space,
)
results = tuner.fit()
training_iteration = results[0].metrics["training_iteration"]
assert (
training_iteration == 2
), f"Should be at 2 iters before erroring, got {training_iteration}"
# Initial run saves the first 2 checkpoints
checkpoint_dirs, checkpoints = get_checkpoints(results[0].path)
assert checkpoint_dirs == ["checkpoint_000000", "checkpoint_000001"]
assert [load_dict_checkpoint(ckpt)["it"] for ckpt in checkpoints] == [1, 2]
fail_marker.unlink()
tuner = Tuner.restore(
str(tmp_path / exp_name), trainable=trainable, resume_errored=True
)
results = tuner.fit()
assert len(results.errors) == 0
training_iteration = results[0].metrics["training_iteration"]
# Restored at it=2, reported 3 more times -> should have it=5
assert training_iteration == 5
# Restored run saves the 3 more checkpoints, and first checkpoint should be deleted
checkpoint_dirs, checkpoints = get_checkpoints(results[0].path)
assert checkpoint_dirs == [f"checkpoint_00000{i}" for i in range(1, 5)]
assert [load_dict_checkpoint(ckpt)["it"] for ckpt in checkpoints] == [2, 3, 4, 5]
def test_tuner_can_restore(tmp_path):
"""Make sure that `can_restore` detects an existing experiment at a
path and only returns True if it's at the experiment dir root.
"""
name = "exp_name"
Tuner(
lambda _: print("dummy"),
run_config=RunConfig(name=name, storage_path=str(tmp_path)),
)
assert Tuner.can_restore(tmp_path / name)
assert Tuner.can_restore(
tmp_path / name, storage_filesystem=pyarrow.fs.LocalFileSystem()
)
assert not Tuner.can_restore(tmp_path)
assert not Tuner.can_restore(tmp_path / name / "other")
def testParamSpaceOverwriteValidation(ray_start_4_cpus, tmp_path):
"""Check that validation on restore fails if we try adding or removing
hyperparameters to the param_space."""
name = "test_param_space_valid"
param_space = {"a": 1, "b": {"c": tune.choice([0, 1])}, "d": tune.uniform(0, 1)}
tuner = Tuner(
lambda _: print("dummy"),
param_space=param_space,
run_config=RunConfig(storage_path=str(tmp_path), name=name),
)
tuner.fit()
bad_param_spaces = [
{},
{"a": 1, "b": {}, "d": 2},
{"a": 1, "b": {"c": 2, "e": 3}, "d": 4},
]
for bad_param_space in bad_param_spaces:
with pytest.raises(ValueError):
Tuner.restore(
str(tmp_path / name),
lambda _: print("dummy"),
param_space=bad_param_space,
)
# Should work with the original param space
Tuner.restore(
str(tmp_path / name),
trainable=lambda _: print("dummy"),
param_space=param_space,
)
def testParamSpaceOverwrite(ray_start_4_cpus, tmp_path, monkeypatch):
"""Test that overwriting param space on restore propagates new refs to existing
trials and newly generated trials."""
# Limit the number of generated trial configs -- so restore tests
# newly generated trials.
monkeypatch.setenv("TUNE_MAX_PENDING_TRIALS_PG", "1")
class FakeDataset:
def __init__(self, name):
self.name = name
def __repr__(self):
return f"<FakeDataset {self.name}>"
def train_fn(config):
raise RuntimeError("Failing!")
param_space = {
"test": tune.grid_search(
[FakeDataset("1"), FakeDataset("2"), FakeDataset("3")]
),
"test2": tune.grid_search(
[
FakeDataset("4"),
FakeDataset("5"),
FakeDataset("6"),
FakeDataset("7"),
]
),
}
tuner = Tuner(
train_fn,
param_space=param_space,
tune_config=TuneConfig(num_samples=1),
run_config=RunConfig(
storage_path=str(tmp_path),
name="param_space_overwrite",
callbacks=[_FailOnStats(num_trials=4, num_finished=2)],
),
)
with pytest.raises(RuntimeError):
tuner.fit()
# Just suppress the error this time with a new trainable
def train_fn(config):
pass
param_space = {
"test": tune.grid_search(
[FakeDataset("8"), FakeDataset("9"), FakeDataset("10")]
),
"test2": tune.grid_search(
[
FakeDataset("11"),
FakeDataset("12"),
FakeDataset("13"),
FakeDataset("14"),
]
),
}
tuner = Tuner.restore(
str(tmp_path / "param_space_overwrite"),
trainable=train_fn,
param_space=param_space,
resume_errored=True,
)
tuner._local_tuner._run_config.callbacks = None
result_grid = tuner.fit()
assert not result_grid.errors
assert len(result_grid) == 12
for r in result_grid:
# Make sure that test and test2 are updated.
assert r.config["test"].name in ["8", "9", "10"]
assert r.config["test2"].name in ["11", "12", "13", "14"]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| MockData |
python | doocs__leetcode | lcof/面试题67. 把字符串转换成整数/Solution.py | {
"start": 0,
"end": 764
} | class ____:
def strToInt(self, str: str) -> int:
if not str:
return 0
n = len(str)
if n == 0:
return 0
i = 0
while str[i] == ' ':
i += 1
# 仅包含空格
if i == n:
return 0
sign = -1 if str[i] == '-' else 1
if str[i] in ['-', '+']:
i += 1
res, flag = 0, (2**31 - 1) // 10
while i < n:
# 非数字,跳出循环体
if not str[i].isdigit():
break
c = int(str[i])
# 溢出判断
if res > flag or (res == flag and c > 7):
return 2**31 - 1 if sign > 0 else -(2**31)
res = res * 10 + c
i += 1
return sign * res
| Solution |
python | doocs__leetcode | solution/2900-2999/2973.Find Number of Coins to Place in Tree Nodes/Solution.py | {
"start": 0,
"end": 702
} | class ____:
def placedCoins(self, edges: List[List[int]], cost: List[int]) -> List[int]:
def dfs(a: int, fa: int) -> List[int]:
res = [cost[a]]
for b in g[a]:
if b != fa:
res.extend(dfs(b, a))
res.sort()
if len(res) >= 3:
ans[a] = max(res[-3] * res[-2] * res[-1], res[0] * res[1] * res[-1], 0)
if len(res) > 5:
res = res[:2] + res[-3:]
return res
n = len(cost)
g = [[] for _ in range(n)]
for a, b in edges:
g[a].append(b)
g[b].append(a)
ans = [1] * n
dfs(0, -1)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/vaultgemma/modeling_vaultgemma.py | {
"start": 10815,
"end": 12622
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: VaultGemmaConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.attention_type = config.layer_types[layer_idx]
self.self_attn = VaultGemmaAttention(config=config, layer_idx=layer_idx)
self.mlp = VaultGemmaMLP(config)
self.input_layernorm = VaultGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_feedforward_layernorm = VaultGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| VaultGemmaDecoderLayer |
python | getsentry__sentry | src/sentry/issues/highlights.py | {
"start": 381,
"end": 1131
} | class ____(serializers.Field):
def to_internal_value(self, data: object) -> dict[str, list[str]]:
if not isinstance(data, dict):
raise serializers.ValidationError("Expected a dictionary.")
for key, value in data.items():
if not VALID_KEY_PATTERN.match(key):
raise serializers.ValidationError(f"Key '{key}' is invalid.")
if not isinstance(value, list) or not all(isinstance(item, str) for item in value):
raise serializers.ValidationError(f"Value for '{key}' must be a list of strings.")
# Remove duplicates
data[key] = list(set(value))
return data
def to_representation(self, value):
return value
| HighlightContextField |
python | bokeh__bokeh | tests/support/plugins/file_server.py | {
"start": 1707,
"end": 2436
} | class ____(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self) -> None:
"""GET method handler."""
# depending on Python version, leading / may be present or not
path = self.path.split("?")[0].removeprefix("/")
try:
with open(HTML_ROOT / path, mode="rb") as f:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(f.read())
except OSError:
self.send_error(404, f"File Not Found: {path}")
def log_message(self, format: str, *args: Any) -> None:
"""Override default to avoid trashing stderr"""
pass
| HtmlOnlyHandler |
python | django__django | tests/db_functions/text/test_trim.py | {
"start": 207,
"end": 1416
} | class ____(TestCase):
def test_trim(self):
Author.objects.create(name=" John ", alias="j")
Author.objects.create(name="Rhonda", alias="r")
authors = Author.objects.annotate(
ltrim=LTrim("name"),
rtrim=RTrim("name"),
trim=Trim("name"),
)
self.assertQuerySetEqual(
authors.order_by("alias"),
[
("John ", " John", "John"),
("Rhonda", "Rhonda", "Rhonda"),
],
lambda a: (a.ltrim, a.rtrim, a.trim),
)
def test_trim_transform(self):
Author.objects.create(name=" John ")
Author.objects.create(name="Rhonda")
tests = (
(LTrim, "John "),
(RTrim, " John"),
(Trim, "John"),
)
for transform, trimmed_name in tests:
with self.subTest(transform=transform):
with register_lookup(CharField, transform):
authors = Author.objects.filter(
**{"name__%s" % transform.lookup_name: trimmed_name}
)
self.assertQuerySetEqual(authors, [" John "], lambda a: a.name)
| TrimTests |
python | tensorflow__tensorflow | tensorflow/python/distribute/reduce_util.py | {
"start": 882,
"end": 1673
} | class ____(enum.Enum):
"""Indicates how a set of values should be reduced.
* `SUM`: Add all the values.
* `MEAN`: Take the arithmetic mean ("average") of the values.
"""
# TODO(priyag): Add the following types:
# `MIN`: Return the minimum of all values.
# `MAX`: Return the maximum of all values.
SUM = "SUM"
MEAN = "MEAN"
@staticmethod
def from_variable_aggregation(aggregation):
mapping = {
variable_scope.VariableAggregation.SUM: ReduceOp.SUM,
variable_scope.VariableAggregation.MEAN: ReduceOp.MEAN,
}
reduce_op = mapping.get(aggregation)
if not reduce_op:
raise ValueError("Could not convert from `tf.VariableAggregation` %s to"
"`tf.distribute.ReduceOp` type" % aggregation)
return reduce_op
| ReduceOp |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/custom_io_manager.py | {
"start": 897,
"end": 1291
} | class ____(dg.IOManager):
def __init__(self, api_token):
self._api_token = api_token
# setup stateful cache
self._cache = {}
def handle_output(self, context: dg.OutputContext, obj): ...
def load_input(self, context: dg.InputContext):
if context.asset_key in self._cache:
return self._cache[context.asset_key]
...
| ExternalIOManager |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.