language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/0900-0999/0905.Sort Array By Parity/Solution.py | {
"start": 0,
"end": 372
} | class ____:
def sortArrayByParity(self, nums: List[int]) -> List[int]:
i, j = 0, len(nums) - 1
while i < j:
if nums[i] % 2 == 0:
i += 1
elif nums[j] % 2 == 1:
j -= 1
else:
nums[i], nums[j] = nums[j], nums[i]
i, j = i + 1, j - 1
return nums
| Solution |
python | PyCQA__pylint | pylint/extensions/empty_comment.py | {
"start": 1186,
"end": 1963
} | class ____(BaseRawFileChecker):
name = "empty-comment"
msgs = {
"R2044": (
"Line with empty comment",
"empty-comment",
(
"Used when a # symbol appears on a line not followed by an actual comment"
),
)
}
options = ()
def process_module(self, node: nodes.Module) -> None:
with node.stream() as stream:
for line_num, line in enumerate(stream):
line = line.rstrip()
if line.endswith(b"#"):
if not is_line_commented(line[:-1]):
self.add_message("empty-comment", line=line_num + 1)
def register(linter: PyLinter) -> None:
linter.register_checker(CommentChecker(linter))
| CommentChecker |
python | django-import-export__django-import-export | tests/core/tests/test_results.py | {
"start": 253,
"end": 1159
} | class ____(SimpleTestCase):
def test_repr_no_details(self):
try:
1 / 0
except Exception as exc:
error = Error(exc)
self.assertEqual(repr(error), "<Error: ZeroDivisionError('division by zero')>")
def test_repr_all_details(self):
try:
1 / 0
except Exception as exc:
error = Error(exc, row=1, number=2)
self.assertEqual(
repr(error),
"<Error: ZeroDivisionError('division by zero') at row 1 at number 2>",
)
def test_traceback(self):
try:
1 / 0
except Exception as exc:
error = Error(exc)
self.assertTrue(
error.traceback.startswith("Traceback (most recent call last):\n")
)
self.assertIn(
"ZeroDivisionError: division by zero\n",
error.traceback,
)
| ErrorTest |
python | mlflow__mlflow | dev/clint/src/clint/rules/no_shebang.py | {
"start": 36,
"end": 451
} | class ____(Rule):
def _message(self) -> str:
return "Python scripts should not contain shebang lines"
@staticmethod
def check(file_content: str) -> bool:
"""
Returns True if the file contains a shebang line at the beginning.
A shebang line is a line that starts with '#!' (typically #!/usr/bin/env python).
"""
return file_content.startswith("#!")
| NoShebang |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/linear.py | {
"start": 3695,
"end": 13612
} | class ____(WeightedQuantizedModule):
r"""
A quantized linear module with quantized tensor as inputs and outputs.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`~torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized to zero.
scale: `scale` parameter of output Quantized Tensor, type: double
zero_point: `zero_point` parameter for output Quantized Tensor, type: long
Examples::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_QENGINE)
>>> m = nn.quantized.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> # xdoctest: +SKIP
>>> input = torch.quantize_per_tensor(input, 1.0, 0, torch.quint8)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_version = 3
_FLOAT_MODULE = (nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear)
def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8):
super().__init__()
# We don't muck around with buffers or attributes or anything here
# to keep the module simple. *everything* is simply a Python attribute.
# Serialization logic is explicitly handled in the below serialization and
# deserialization modules
self.in_features = in_features
self.out_features = out_features
bias = None
if bias_:
bias = torch.zeros(out_features, dtype=torch.float)
if dtype == torch.qint8:
qweight = torch._empty_affine_quantized(
[out_features, in_features], scale=1, zero_point=0, dtype=torch.qint8
)
elif dtype == torch.float16:
qweight = torch.zeros([out_features, in_features], dtype=torch.float)
else:
raise RuntimeError("Unsupported dtype specified for quantized Linear!")
self._packed_params = LinearPackedParams(dtype)
self._packed_params.set_weight_bias(qweight, bias)
self.scale = 1.0
self.zero_point = 0
def _get_name(self):
return "QuantizedLinear"
def extra_repr(self):
return (
f"in_features={self.in_features}, out_features={self.out_features}, scale={self.scale}, "
f"zero_point={self.zero_point}, qscheme={self.weight().qscheme()}"
)
def __repr__(self):
return _hide_packed_params_repr(self, LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.linear(
x, self._packed_params._packed_params, self.scale, self.zero_point
)
# ===== Serialization methods =====
# The special consideration here is that we have to unpack the weights into their
# regular QTensor form for serialization. Packed weights should not live
# outside the process in which they were created, rather they should be derived
# from the QTensor weight.
#
# Version 1
# self
# |--- scale : float
# |--- zero_point : int
# |--- weight : Tensor
# |--- bias : Tensor
#
# Version 2
# self
# |--- scale : float
# |--- zero_point : int
# |--- _packed_params : Module
# |--- weight : Tensor
# |--- bias : Tensor
#
# Version 3
# self
# |--- scale : float
# |--- zero_point : int
# |--- _packed_params : Module
# |--- _packed_params : (Tensor, Tensor) representing weight, bias
# of LinearPackedParams C++ struct
#
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "scale"] = torch.tensor(self.scale)
destination[prefix + "zero_point"] = torch.tensor(self.zero_point)
# ===== Deserialization methods =====
# Counterpart to the serialization methods, we must pack the serialized QTensor
# weight into its packed format for use by the FBGEMM ops.
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
self.scale = float(state_dict[prefix + "scale"])
state_dict.pop(prefix + "scale")
self.zero_point = int(state_dict[prefix + "zero_point"])
state_dict.pop(prefix + "zero_point")
version = local_metadata.get("version", None)
if version is None or version == 1:
# We moved the parameters into a LinearPackedParameters submodule
weight = state_dict.pop(prefix + "weight")
bias = state_dict.pop(prefix + "bias")
state_dict.update(
{
prefix + "_packed_params.weight": weight,
prefix + "_packed_params.bias": bias,
}
)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
# Function rather than property to make sure that JIT serialization doesn't
# register this as an attribute
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def set_weight_bias(self, w: torch.Tensor, b: torch.Tensor | None) -> None:
self._packed_params.set_weight_bias(w, b)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a quantized module from an observed float module
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
use_precomputed_fake_quant (bool): if True, the module will reuse min/max
values from the precomputed fake quant module.
"""
if hasattr(mod, "weight_fake_quant"):
if type_before_parametrizations(mod) == nniqat.LinearBn1d:
mod.weight, mod.bias = fuse_linear_bn_weights(
mod.weight,
mod.bias,
mod.bn.running_mean,
mod.bn.running_var,
mod.bn.eps,
mod.bn.weight,
mod.bn.bias,
)
weight_post_process = mod.weight_fake_quant
activation_post_process = mod.activation_post_process
else:
# This function does not participate in JIT, so it is OK to ignore
# the type mismatch in assignment. Also, mypy has an issue with
# iterables not being implemented, so we are ignoring those too.
if not isinstance(cls._FLOAT_MODULE, Iterable):
# pyrefly: ignore [bad-assignment]
cls._FLOAT_MODULE = [cls._FLOAT_MODULE]
supported_modules = ", ".join(
[float_mod.__name__ for float_mod in cls._FLOAT_MODULE]
)
error_msg = f"nnq.{cls.__name__}.from_float only works for {supported_modules}, but got: {type(mod)}"
assert type_before_parametrizations(mod) in cls._FLOAT_MODULE, (
error_msg.format()
)
assert hasattr(mod, "qconfig"), (
"Input float module must have qconfig defined"
)
activation_post_process = mod.activation_post_process
if type_before_parametrizations(mod) == nni.LinearReLU:
mod = mod[0]
weight_post_process = (
mod.qconfig.weight()
if not hasattr(mod, "weight_fake_quant")
else mod.weight_fake_quant
)
if not use_precomputed_fake_quant:
# Observer may not have been called yet
# Observer might have been called in the previous stage via PTQ algorithm e.g. AdaRound
weight_post_process(mod.weight)
dtype = weight_post_process.dtype
act_scale, act_zp = activation_post_process.calculate_qparams()
assert dtype == torch.qint8, "Weight observer must have dtype torch.qint8"
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
qlinear = cls(mod.in_features, mod.out_features, dtype=dtype)
qlinear.set_weight_bias(qweight, mod.bias)
qlinear.scale = float(act_scale)
qlinear.zero_point = int(act_zp)
return qlinear
@classmethod
def from_reference(cls, ref_qlinear, output_scale, output_zero_point):
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
ref_qlinear (Module): a reference quantized linear module, either produced by torch.ao.quantization
utilities or provided by the user
output_scale (float): scale for output Tensor
output_zero_point (int): zero point for output Tensor
"""
qlinear = cls(ref_qlinear.in_features, ref_qlinear.out_features)
qweight = ref_qlinear.get_quantized_weight()
qlinear.set_weight_bias(qweight, ref_qlinear.bias)
qlinear.scale = float(output_scale)
qlinear.zero_point = int(output_zero_point)
return qlinear
| Linear |
python | pypa__pip | src/pip/_vendor/rich/table.py | {
"start": 5885,
"end": 6133
} | class ____(NamedTuple):
"""A single cell in a table."""
style: StyleType
"""Style to apply to cell."""
renderable: "RenderableType"
"""Cell renderable."""
vertical: VerticalAlignMethod
"""Cell vertical alignment."""
| _Cell |
python | django__django | tests/queries/tests.py | {
"start": 176112,
"end": 177974
} | class ____(TestCase):
def test_ticket_23605(self):
# Test filtering on a complicated q-object from ticket's report.
# The query structure is such that we have multiple nested subqueries.
# The original problem was that the inner queries weren't relabeled
# correctly.
# See also #24090.
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=10000.0)
Ticket23605B.objects.create(
field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1
)
complex_q = Q(
pk__in=Ticket23605A.objects.filter(
Q(
# True for a1 as field_b0 = 10000, field_c0=10000
# False for a2 as no ticket23605b found
ticket23605b__field_b0__gte=1000000
/ F("ticket23605b__modelc_fk__field_c0")
)
&
# True for a1 (field_b1=True)
Q(ticket23605b__field_b1=True)
& ~Q(
ticket23605b__pk__in=Ticket23605B.objects.filter(
~(
# Same filters as above commented filters, but
# double-negated (one for Q() above, one for
# parentheses). So, again a1 match, a2 not.
Q(field_b1=True)
& Q(field_b0__gte=1000000 / F("modelc_fk__field_c0"))
)
)
)
).filter(ticket23605b__field_b1=True)
)
qs1 = Ticket23605A.objects.filter(complex_q)
self.assertSequenceEqual(qs1, [a1])
qs2 = Ticket23605A.objects.exclude(complex_q)
self.assertSequenceEqual(qs2, [a2])
| Ticket23605Tests |
python | huggingface__transformers | tests/models/big_bird/test_modeling_big_bird.py | {
"start": 24445,
"end": 42664
} | class ____(unittest.TestCase):
# we can have this true once block_sparse attn_probs works accurately
test_attention_probs = False
def _get_dummy_input_ids(self):
# fmt: off
ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def test_inference_block_sparse_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="block_sparse")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device)
with torch.no_grad():
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 4096, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[-0.5583, 0.0475, -0.2508, 7.4423],
[0.7409, 1.4460, -0.7593, 7.7010],
[1.9150, 3.1395, 5.8840, 9.3498],
[-0.1854, -1.4640, -2.2052, 3.7968],
],
device=torch_device,
)
torch.testing.assert_close(
prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, rtol=1e-4, atol=1e-4
)
expected_seq_relationship_logits = torch.tensor([[46.9465, 47.9517]], device=torch_device)
torch.testing.assert_close(seq_relationship_logits, expected_seq_relationship_logits, rtol=1e-4, atol=1e-4)
def test_inference_full_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="original_full")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device)
with torch.no_grad():
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 512 * 4, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[0.1499, -1.1217, 0.1990, 8.4499],
[-2.7757, -3.0687, -4.8577, 7.5156],
[1.5446, 0.1982, 4.3016, 10.4281],
[-1.3705, -4.0130, -3.9629, 5.1526],
],
device=torch_device,
)
torch.testing.assert_close(
prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, rtol=1e-4, atol=1e-4
)
expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device)
torch.testing.assert_close(seq_relationship_logits, expected_seq_relationship_logits, rtol=1e-4, atol=1e-4)
def test_block_sparse_attention_probs(self):
"""
Asserting if outputted attention matrix is similar to hard coded attention matrix
"""
if not self.test_attention_probs:
self.skipTest("test_attention_probs is set to False")
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
hidden_states = model.embeddings(input_ids)
batch_size, seqlen, _ = hidden_states.size()
attn_mask = torch.ones(batch_size, seqlen, device=torch_device, dtype=torch.float)
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = config.block_size
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
from_blocked_mask = to_blocked_mask = blocked_mask
for i in range(config.num_hidden_layers):
pointer = model.encoder.layer[i].attention.self
query_layer = pointer.transpose_for_scores(pointer.query(hidden_states))
key_layer = pointer.transpose_for_scores(pointer.key(hidden_states))
value_layer = pointer.transpose_for_scores(pointer.value(hidden_states))
context_layer, attention_probs = pointer.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
pointer.num_attention_heads,
pointer.num_random_blocks,
pointer.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=pointer.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=True,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
cl = torch.einsum("bhqk,bhkd->bhqd", attention_probs, value_layer)
cl = cl.view(context_layer.size())
torch.testing.assert_close(context_layer, cl, rtol=0.001, atol=0.001)
def test_block_sparse_context_layer(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
dummy_hidden_states = model.embeddings(input_ids)
attn_mask = torch.ones_like(input_ids, device=torch_device)
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
targeted_cl = torch.tensor(
[
[0.1870, 1.5248, 0.2333, -0.0483, -0.0952, 1.8359, -0.0142, 0.1239, 0.0083, -0.0045],
[-0.0601, 0.1243, 0.1329, -0.1524, 0.2347, 0.0894, -0.2248, -0.2461, -0.0645, -0.0109],
[-0.0418, 0.1463, 0.1290, -0.1638, 0.2489, 0.0799, -0.2341, -0.2406, -0.0524, 0.0106],
[0.1859, 1.5182, 0.2324, -0.0473, -0.0952, 1.8295, -0.0148, 0.1242, 0.0080, -0.0045],
[0.1879, 1.5300, 0.2334, -0.0480, -0.0967, 1.8428, -0.0137, 0.1256, 0.0087, -0.0050],
[0.1852, 1.5149, 0.2330, -0.0492, -0.0936, 1.8236, -0.0154, 0.1210, 0.0080, -0.0048],
[0.1857, 1.5186, 0.2331, -0.0484, -0.0940, 1.8285, -0.0148, 0.1224, 0.0077, -0.0045],
[0.1884, 1.5336, 0.2334, -0.0469, -0.0974, 1.8477, -0.0132, 0.1266, 0.0085, -0.0046],
[0.1881, 1.5308, 0.2334, -0.0479, -0.0969, 1.8438, -0.0136, 0.1258, 0.0088, -0.0050],
[0.1849, 1.5143, 0.2329, -0.0491, -0.0930, 1.8230, -0.0156, 0.1209, 0.0074, -0.0047],
[0.1878, 1.5299, 0.2333, -0.0472, -0.0967, 1.8434, -0.0137, 0.1257, 0.0084, -0.0048],
[0.1873, 1.5260, 0.2333, -0.0478, -0.0961, 1.8383, -0.0142, 0.1245, 0.0083, -0.0048],
[0.1849, 1.5145, 0.2327, -0.0491, -0.0935, 1.8237, -0.0156, 0.1215, 0.0083, -0.0046],
[0.1866, 1.5232, 0.2332, -0.0488, -0.0950, 1.8342, -0.0143, 0.1237, 0.0084, -0.0047],
],
device=torch_device,
)
context_layer = model.encoder.layer[0].attention.self(
dummy_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_mask,
to_blocked_mask=blocked_mask,
)
context_layer = context_layer[0]
self.assertEqual(context_layer.shape, torch.Size((1, 128, 768)))
torch.testing.assert_close(context_layer[0, 64:78, 300:310], targeted_cl, rtol=0.0001, atol=0.0001)
def test_tokenizer_inference(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
text = [
"Transformer-based models are unable to process long sequences due to their self-attention operation,"
" which scales quadratically with the sequence length. To address this limitation, we introduce the"
" Longformer with an attention mechanism that scales linearly with sequence length, making it easy to"
" process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in"
" replacement for the standard self-attention and combines a local windowed attention with a task"
" motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer"
" on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In"
" contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream"
" tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new"
" state-of-the-art results on WikiHop and TriviaQA."
]
inputs = tokenizer(text)
for k in inputs:
inputs[k] = torch.tensor(inputs[k], device=torch_device, dtype=torch.long)
prediction = model(**inputs)
prediction = prediction[0]
self.assertEqual(prediction.shape, torch.Size((1, 199, 768)))
expected_prediction = torch.tensor(
[
[0.1887, -0.0474, 0.2604, 0.1453],
[0.0651, 0.1999, 0.1797, 0.1161],
[0.2833, -0.3036, 0.6910, 0.1123],
[0.2836, -0.4644, -0.0111, 0.1530],
[0.3919, -0.2823, 0.4192, 0.1687],
[0.2168, -0.1956, 0.4050, 0.0925],
[0.2597, -0.0884, 0.1258, 0.1119],
[0.1127, -0.1203, 0.1924, 0.2859],
[0.1362, -0.1315, 0.2693, 0.1027],
[-0.3169, -0.2266, 0.4419, 0.6740],
[0.2366, -0.1452, 0.2589, 0.0579],
[0.0358, -0.2021, 0.3112, -0.1392],
],
device=torch_device,
)
torch.testing.assert_close(prediction[0, 52:64, 320:324], expected_prediction, rtol=1e-4, atol=1e-4)
def test_inference_question_answering(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-base-trivia-itc")
model = BigBirdForQuestionAnswering.from_pretrained(
"google/bigbird-base-trivia-itc", attention_type="block_sparse", block_size=16, num_random_blocks=3
)
model.to(torch_device)
context = (
"The BigBird model was proposed in Big Bird: Transformers for Longer Sequences by Zaheer, Manzil and"
" Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago"
" and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a"
" sparse-attention based transformer which extends Transformer based models, such as BERT to much longer"
" sequences. In addition to sparse attention, BigBird also applies global attention as well as random"
" attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and"
" random attention approximates full attention, while being computationally much more efficient for longer"
" sequences. As a consequence of the capability to handle longer context, BigBird has shown improved"
" performance on various long document NLP tasks, such as question answering and summarization, compared"
" to BERT or RoBERTa."
)
question = [
"Which is better for longer sequences- BigBird or BERT?",
"What is the benefit of using BigBird over BERT?",
]
inputs = tokenizer(
question,
[context, context],
padding=True,
return_tensors="pt",
add_special_tokens=True,
max_length=256,
truncation=True,
)
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
start_logits, end_logits = model(**inputs).to_tuple()
# fmt: off
target_start_logits = torch.tensor(
[[-8.5622, -9.6209, -14.3351, -8.7032, -11.8596, -7.7446, -9.6730, -13.6063, -8.9651, -11.7417, -8.2641, -8.7056, -13.4116, -5.6600, -8.8316, -10.4148, -12.2180, -7.7979, -12.5274, -6.0685, -10.3373, -11.3128, -6.6456, -14.4030, -6.8292, -14.5383, -11.5638, -6.3326, 11.5293, -1.8434, -10.0013, -7.6150], [-10.7384, -13.1179, -10.1837, -13.7700, -10.0186, -11.7335, -13.3411, -10.0188, -13.4235, -9.9381, -10.4252, -13.1281, -8.2022, -10.4326, -11.5542, -14.1549, -10.7546, -13.4691, -8.2744, -11.4324, -13.3773, -9.8284, -14.5825, -8.7471, -14.7050, -8.0364, -11.3627, -6.4638, -11.7031, -14.3446, -9.9425, -8.0088]], # noqa: E231
device=torch_device,
)
target_end_logits = torch.tensor(
[[-12.1736, -8.8487, -14.8877, -11.6713, -15.1165, -12.2396, -7.6828, -15.4153, -12.2528, -14.3671, -12.3596, -7.4272, -14.9615, -13.6356, -11.7939, -9.9767, -14.8112, -8.9567, -15.8798, -11.5291, -9.4249, -14.7544, -7.9387, -16.2789, -8.9702, -15.3111, -11.5585, -7.9992, -4.1127, 10.3209, -8.3926, -10.2005], [-11.1375, -15.4027, -12.6861, -16.9884, -13.7093, -10.3560, -15.7228, -12.9290, -15.8519, -13.7953, -10.2460, -15.7198, -14.2078, -12.8477, -11.4861, -16.1017, -11.8900, -16.4488, -13.2959, -10.3980, -15.4874, -10.3539, -16.8263, -10.9973, -17.0344, -9.2751, -10.1196, -13.8907, -12.1025, -13.0628, -12.8530, -13.8173]],
device=torch_device,
)
# fmt: on
torch.testing.assert_close(start_logits[:, 64:96], target_start_logits, rtol=1e-4, atol=1e-4)
torch.testing.assert_close(end_logits[:, 64:96], target_end_logits, rtol=1e-4, atol=1e-4)
input_ids = inputs["input_ids"].tolist()
answer = [
input_ids[i][torch.argmax(start_logits, dim=-1)[i] : torch.argmax(end_logits, dim=-1)[i] + 1]
for i in range(len(input_ids))
]
answer = tokenizer.batch_decode(answer)
self.assertTrue(answer == ["BigBird", "global attention"])
def test_fill_mask(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base")
model.to(torch_device)
input_ids = tokenizer("The goal of life is [MASK] .", return_tensors="pt").input_ids.to(torch_device)
logits = model(input_ids).logits
# [MASK] is token at 6th position
pred_token = tokenizer.decode(torch.argmax(logits[0, 6:7], axis=-1))
self.assertEqual(pred_token, "happiness")
def test_auto_padding(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long)
with torch.no_grad():
output = model(input_ids).to_tuple()[0]
# fmt: off
target = torch.tensor(
[[-0.129420, -0.164740, 0.042422, -0.336030, 0.094379, 0.033794, 0.384590, 0.229660, -0.196500, 0.108020], [-0.000154, -0.168800, 0.165820, -0.313670, 0.101240, 0.035145, 0.381880, 0.213730, -0.201080, 0.077443], [0.053754, -0.166350, 0.225520, -0.272900, 0.119670, 0.019987, 0.348670, 0.199190, -0.181600, 0.084640], [0.063636, -0.187110, 0.237010, -0.297380, 0.126300, 0.020025, 0.268490, 0.191820, -0.192300, 0.035077], [0.073893, -0.184790, 0.188870, -0.297860, 0.134280, 0.028972, 0.174650, 0.186890, -0.180530, 0.006851], [0.005253, -0.169360, 0.123100, -0.302550, 0.126930, 0.024188, 0.133410, 0.200600, -0.168210, -0.001006], [-0.093336, -0.175370, -0.004768, -0.333170, 0.114330, 0.034168, 0.120960, 0.203570, -0.162810, -0.005757], [-0.160210, -0.169310, -0.049064, -0.331950, 0.115730, 0.027062, 0.143600, 0.205310, -0.144580, 0.026746], [-0.193200, -0.156820, -0.079422, -0.351600, 0.106450, 0.032174, 0.245690, 0.210250, -0.173480, 0.043914], [-0.167980, -0.153050, -0.059764, -0.357890,0.103910, 0.031481, 0.334190, 0.208960,-0.178180, 0.072165], [-0.136990, -0.156950, -0.012099, -0.353140,0.096996, 0.025864, 0.376340, 0.216050, -0.171820, 0.089963], [-0.041143, -0.167060, 0.079754, -0.353220, 0.093247, 0.019867, 0.385810, 0.214340, -0.191800, 0.065946],[0.040373, -0.158610, 0.152570, -0.312930, 0.110590, 0.012282, 0.345270, 0.204040, -0.176500, 0.064972], [0.043762, -0.166450, 0.179500, -0.317930, 0.117280, -0.004040, 0.304490, 0.201380, -0.182780, 0.044000]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertEqual(output.shape, torch.Size((1, 241, 768)))
torch.testing.assert_close(output[0, 64:78, 300:310], target, rtol=0.0001, atol=0.0001)
| BigBirdModelIntegrationTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1422,
"end": 1826
} | class ____(sgqlc.types.Enum):
"""Represents an annotation's information level.
Enumeration Choices:
* `FAILURE`: An annotation indicating an inescapable error.
* `NOTICE`: An annotation indicating some information.
* `WARNING`: An annotation indicating an ignorable error.
"""
__schema__ = github_schema
__choices__ = ("FAILURE", "NOTICE", "WARNING")
| CheckAnnotationLevel |
python | PrefectHQ__prefect | src/integrations/prefect-email/tests/conftest.py | {
"start": 661,
"end": 1211
} | class ____(MagicMock):
def __init__(self, server, port, context=None):
super().__init__()
self.server = server
self.port = port
self.context = context
def login(self, username, password):
self.username = username
self.password = password
def starttls(self, context=None):
self.context = context
@pytest.fixture
def smtp(monkeypatch):
monkeypatch.setattr("prefect_email.credentials.SMTP", SMTPMock)
monkeypatch.setattr("prefect_email.credentials.SMTP_SSL", SMTPMock)
| SMTPMock |
python | run-llama__llama_index | llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-s3/llama_index/storage/kvstore/s3/base.py | {
"start": 189,
"end": 4800
} | class ____(BaseKVStore):
"""
S3 Key-Value store.
Stores key-value pairs in a S3 bucket. Can optionally specify a path to a folder
where KV data is stored.
The KV data is further divided into collections, which are subfolders in the path.
Each key-value pair is stored as a JSON file.
Args:
s3_bucket (Any): boto3 S3 Bucket instance
path (Optional[str]): path to folder in S3 bucket where KV data is stored
"""
def __init__(
self,
bucket: Any,
path: Optional[str] = "./",
) -> None:
"""Init a S3DBKVStore."""
self._bucket = bucket
self._path = path or "./"
@classmethod
def from_s3_location(
cls,
bucket_name: str,
path: Optional[str] = None,
) -> "S3DBKVStore":
"""
Load a S3DBKVStore from a S3 URI.
Args:
bucket_name (str): S3 bucket name
path (Optional[str]): path to folder in S3 bucket where KV data is stored
"""
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucket_name)
return cls(
bucket,
path=path,
)
def _get_object_key(self, collection: str, key: str) -> str:
return str(PurePath(f"{self._path}/{collection}/{key}.json"))
def put(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
obj_key = self._get_object_key(collection, key)
self._bucket.put_object(
Key=obj_key,
Body=json.dumps(val),
)
async def aput(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
raise NotImplementedError
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
obj_key = self._get_object_key(collection, key)
try:
obj = next(iter(self._bucket.objects.filter(Prefix=obj_key).limit(1)))
except StopIteration:
return None
body = obj.get()["Body"].read()
return json.loads(body)
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
raise NotImplementedError
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
collection_path = str(PurePath(f"{self._path}/{collection}/"))
collection_kv_dict = {}
for obj in self._bucket.objects.filter(Prefix=collection_path):
body = obj.get()["Body"].read()
json_filename = os.path.split(obj.key)[-1]
key = os.path.splitext(json_filename)[0]
value = json.loads(body)
collection_kv_dict[key] = value
return collection_kv_dict
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
raise NotImplementedError
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
obj_key = self._get_object_key(collection, key)
matched_objs = list(self._bucket.objects.filter(Prefix=obj_key).limit(1))
if len(matched_objs) == 0:
return False
obj = matched_objs[0]
obj.delete()
return True
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
raise NotImplementedError
| S3DBKVStore |
python | pypa__pip | src/pip/_vendor/pygments/lexer.py | {
"start": 1457,
"end": 10741
} | class ____(metaclass=LexerMeta):
"""
Lexer for a specific language.
See also :doc:`lexerdevelopment`, a high-level guide to writing
lexers.
Lexer classes have attributes used for choosing the most appropriate
lexer based on various criteria.
.. autoattribute:: name
:no-value:
.. autoattribute:: aliases
:no-value:
.. autoattribute:: filenames
:no-value:
.. autoattribute:: alias_filenames
.. autoattribute:: mimetypes
:no-value:
.. autoattribute:: priority
Lexers included in Pygments should have two additional attributes:
.. autoattribute:: url
:no-value:
.. autoattribute:: version_added
:no-value:
Lexers included in Pygments may have additional attributes:
.. autoattribute:: _example
:no-value:
You can pass options to the constructor. The basic options recognized
by all lexers and processed by the base `Lexer` class are:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
Latin1 detection. Can also be ``'chardet'`` to use the chardet
library, if it is installed.
``inencoding``
Overrides the ``encoding`` if given.
"""
#: Full name of the lexer, in human-readable form
name = None
#: A list of short, unique identifiers that can be used to look
#: up the lexer from a list, e.g., using `get_lexer_by_name()`.
aliases = []
#: A list of `fnmatch` patterns that match filenames which contain
#: content for this lexer. The patterns in this list should be unique among
#: all lexers.
filenames = []
#: A list of `fnmatch` patterns that match filenames which may or may not
#: contain content for this lexer. This list is used by the
#: :func:`.guess_lexer_for_filename()` function, to determine which lexers
#: are then included in guessing the correct one. That means that
#: e.g. every lexer for HTML and a template language should include
#: ``\*.html`` in this list.
alias_filenames = []
#: A list of MIME types for content that can be lexed with this lexer.
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
#: URL of the language specification/definition. Used in the Pygments
#: documentation. Set to an empty string to disable.
url = None
#: Version of Pygments in which the lexer was added.
version_added = None
#: Example file name. Relative to the ``tests/examplefiles`` directory.
#: This is used by the documentation generator to show an example.
_example = None
def __init__(self, **options):
"""
This constructor takes arbitrary options as keyword arguments.
Every subclass must first process its own options and then call
the `Lexer` constructor, since it processes the basic
options like `stripnl`.
An example looks like this:
.. sourcecode:: python
def __init__(self, **options):
self.compress = options.get('compress', '')
Lexer.__init__(self, **options)
As these options must all be specifiable as strings (due to the
command line usage), there are various utility functions
available to help with that, see `Utilities`_.
"""
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'guess')
self.encoding = options.get('inencoding') or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
else:
return f'<pygments.lexers.{self.__class__.__name__}>'
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
A static method which is called for lexer guessing.
It should analyse the text and return a float in the range
from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
will not be selected as the most probable one, if it returns
``1.0``, it will be selected immediately. This is used by
`guess_lexer`.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def _preprocess_lexer_input(self, text):
"""Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
if not isinstance(text, str):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
try:
# pip vendoring note: this code is not reachable by pip,
# removed import of chardet to make it clear.
raise ImportError('chardet is not vendored by pip')
except ImportError as e:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/') from e
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = text[len(bom):].decode(encoding, 'replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
else:
text = text.decode(self.encoding)
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
else:
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
return text
def get_tokens(self, text, unfiltered=False):
"""
This method is the basic interface of a lexer. It is called by
the `highlight()` function. It must process the text and return an
iterable of ``(tokentype, value)`` pairs from `text`.
Normally, you don't need to override this method. The default
implementation processes the options recognized by all lexers
(`stripnl`, `stripall` and so on), and then yields all tokens
from `get_tokens_unprocessed()`, with the ``index`` dropped.
If `unfiltered` is set to `True`, the filtering mechanism is
bypassed even if filters are defined.
"""
text = self._preprocess_lexer_input(text)
def streamer():
for _, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
This method should process the text and return an iterable of
``(index, tokentype, value)`` tuples where ``index`` is the starting
position of the token within the input text.
It must be overridden by subclasses. It is recommended to
implement it as a generator to maximize effectiveness.
"""
raise NotImplementedError
| Lexer |
python | instagram__MonkeyType | monkeytype/db/sqlite.py | {
"start": 1927,
"end": 3753
} | class ____(CallTraceStore):
def __init__(self, conn: sqlite3.Connection, table: str = DEFAULT_TABLE) -> None:
self.conn = conn
self.table = table
@classmethod
def make_store(cls, connection_string: str) -> "CallTraceStore":
conn = sqlite3.connect(connection_string)
create_call_trace_table(conn)
return cls(conn)
def add(self, traces: Iterable[CallTrace]) -> None:
values = []
for row in serialize_traces(traces):
values.append(
(
datetime.datetime.now(),
row.module,
row.qualname,
row.arg_types,
row.return_type,
row.yield_type,
)
)
with self.conn:
self.conn.executemany(
"INSERT INTO {table} VALUES (?, ?, ?, ?, ?, ?)".format(
table=self.table
),
values,
)
def filter(
self, module: str, qualname_prefix: Optional[str] = None, limit: int = 2000
) -> List[CallTraceThunk]:
sql_query, values = make_query(self.table, module, qualname_prefix, limit)
with self.conn:
cur = self.conn.cursor()
cur.execute(sql_query, values)
return [CallTraceRow(*row) for row in cur.fetchall()]
def list_modules(self) -> List[str]:
with self.conn:
cur = self.conn.cursor()
cur.execute(
"""
SELECT module FROM {table}
GROUP BY module
ORDER BY date(created_at) DESC
""".format(table=self.table)
)
return [row[0] for row in cur.fetchall() if row[0]]
| SQLiteStore |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_sqs.py | {
"start": 1081,
"end": 1259
} | class ____:
@mock_aws
def test_get_conn(self):
hook = SqsHook(aws_conn_id="aws_default")
assert hook.get_conn() is not None
@pytest.mark.asyncio
| TestSqsHook |
python | great-expectations__great_expectations | scripts/gen_stub.py | {
"start": 508,
"end": 3948
} | class ____(Protocol):
__signature__: Signature
__call__: Callable
def _print_method( # noqa: C901, PLR0912
method: _Callable,
method_name: str | None = None,
default_override: str = "...",
return_type_override: str = "",
):
if method_name:
print(f"def {method_name}(")
signature: Signature = method.__signature__
for name, param in signature.parameters.items():
# ignore kwargs
if param.kind == Parameter.VAR_KEYWORD:
continue
annotation = param.annotation
if isinstance(annotation, ForwardRef):
annotation = annotation.__forward_arg__
elif getattr(annotation, "__name__", None):
annotation = annotation.__name__
if name in ["self"]:
print(f"\t{name}", end="")
else:
print(f"\t{name}: {annotation}", end="")
if param.kind == Parameter.KEYWORD_ONLY:
if default_override:
default = default_override
elif param.default is Parameter.empty:
default = "..."
else:
default = param.default
if isinstance(default, str):
default = f"'{default}'"
print(f" = {default}", end="")
print(",")
if return_type_override:
return_type = return_type_override
else:
return_type = getattr(signature.return_annotation, "__name__", signature.return_annotation)
print(f") -> {return_type}:\n\t...")
def print_add_asset_method_signatures(
datasource_class: Type[Datasource],
method_name_template_str: str = "add_{0}_asset",
default_override: str = "...",
):
"""
Prints out all of the asset methods for a given datasource in a format that be used
for defining methods in stub files.
"""
type_lookup: TypeLookup = datasource_class._type_lookup
for asset_type_name in type_lookup.type_names():
asset_type = type_lookup[asset_type_name]
method_name = method_name_template_str.format(asset_type_name)
method = getattr(datasource_class, method_name)
_print_method(
method,
method_name=method_name,
default_override=default_override,
return_type_override=asset_type.__name__,
)
def print_datasource_crud_signatures(
source_factories: DataSourceManager,
method_name_templates: tuple[str, ...] = (
"add_{0}",
"update_{0}",
"add_or_update_{0}",
"delete_{0}",
),
default_override: str = "...",
):
"""
Prints out all of the CRUD methods for a given datasource in a format that be used
for defining methods in stub files.
"""
datasource_type_lookup = source_factories.type_lookup
for datasource_name in datasource_type_lookup.type_names():
for method_name_tmplt in method_name_templates:
method_name = method_name_tmplt.format(datasource_name)
_print_method(
getattr(source_factories, method_name),
method_name=method_name,
default_override=default_override,
)
if __name__ == "__main__":
# replace the provided dataclass as needed
print_add_asset_method_signatures(PandasFilesystemDatasource)
print_datasource_crud_signatures(
source_factories=DataSourceManager("dummy_context"), # type: ignore[arg-type]
)
| _Callable |
python | conda__conda | conda/core/path_actions.py | {
"start": 5421,
"end": 5558
} | class ____(Action, metaclass=ABCMeta):
@abstractproperty
def target_full_path(self):
raise NotImplementedError()
| PathAction |
python | conda__conda | conda/auxlib/entity.py | {
"start": 17571,
"end": 17642
} | class ____(Field):
_type = int
IntField = IntegerField
| IntegerField |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 979130,
"end": 979870
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for SponsorsTier."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("SponsorsTierEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("SponsorsTier"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| SponsorsTierConnection |
python | redis__redis-py | redis/_parsers/encoders.py | {
"start": 37,
"end": 1734
} | class ____:
"Encode strings to bytes-like and decode bytes-like to strings"
__slots__ = "encoding", "encoding_errors", "decode_responses"
def __init__(self, encoding, encoding_errors, decode_responses):
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
def encode(self, value):
"Return a bytestring or bytes-like representation of the value"
if isinstance(value, (bytes, memoryview)):
return value
elif isinstance(value, bool):
# special case bool since it is a subclass of int
raise DataError(
"Invalid input of type: 'bool'. Convert to a "
"bytes, string, int or float first."
)
elif isinstance(value, (int, float)):
value = repr(value).encode()
elif not isinstance(value, str):
# a value we don't know how to deal with. throw an error
typename = type(value).__name__
raise DataError(
f"Invalid input of type: '{typename}'. "
f"Convert to a bytes, string, int or float first."
)
if isinstance(value, str):
value = value.encode(self.encoding, self.encoding_errors)
return value
def decode(self, value, force=False):
"Return a unicode string from the bytes-like representation"
if self.decode_responses or force:
if isinstance(value, memoryview):
value = value.tobytes()
if isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
return value
| Encoder |
python | python-pillow__Pillow | Tests/test_image.py | {
"start": 903,
"end": 37986
} | class ____:
@pytest.mark.parametrize("mode", Image.MODES)
def test_image_modes_success(self, mode: str) -> None:
Image.new(mode, (1, 1))
@pytest.mark.parametrize("mode", ("", "bad", "very very long"))
def test_image_modes_fail(self, mode: str) -> None:
with pytest.raises(ValueError, match="unrecognized image mode"):
Image.new(mode, (1, 1))
def test_exception_inheritance(self) -> None:
assert issubclass(UnidentifiedImageError, OSError)
def test_sanity(self) -> None:
im = Image.new("L", (100, 100))
assert repr(im).startswith("<PIL.Image.Image image mode=L size=100x100 at")
assert im.mode == "L"
assert im.size == (100, 100)
im = Image.new("RGB", (100, 100))
assert repr(im).startswith("<PIL.Image.Image image mode=RGB size=100x100 ")
assert im.mode == "RGB"
assert im.size == (100, 100)
Image.new("L", (100, 100), None)
im2 = Image.new("L", (100, 100), 0)
im3 = Image.new("L", (100, 100), "black")
assert im2.getcolors() == [(10000, 0)]
assert im3.getcolors() == [(10000, 0)]
with pytest.raises(ValueError):
Image.new("X", (100, 100))
with pytest.raises(ValueError):
Image.new("", (100, 100))
# with pytest.raises(MemoryError):
# Image.new("L", (1000000, 1000000))
@pytest.mark.skipif(PrettyPrinter is None, reason="IPython is not installed")
def test_repr_pretty(self) -> None:
im = Image.new("L", (100, 100))
output = io.StringIO()
assert PrettyPrinter is not None
p = PrettyPrinter(output)
im._repr_pretty_(p, False)
assert output.getvalue() == "<PIL.Image.Image image mode=L size=100x100>"
def test_open_formats(self) -> None:
PNGFILE = "Tests/images/hopper.png"
JPGFILE = "Tests/images/hopper.jpg"
with pytest.raises(TypeError):
with Image.open(PNGFILE, formats=123): # type: ignore[arg-type]
pass
format_list: list[list[str] | tuple[str, ...]] = [
["JPEG"],
("JPEG",),
["jpeg"],
["Jpeg"],
["jPeG"],
["JpEg"],
]
for formats in format_list:
with pytest.raises(UnidentifiedImageError):
with Image.open(PNGFILE, formats=formats):
pass
with Image.open(JPGFILE, formats=formats) as im:
assert im.mode == "RGB"
assert im.size == (128, 128)
for file in [PNGFILE, JPGFILE]:
with Image.open(file, formats=None) as im:
assert im.mode == "RGB"
assert im.size == (128, 128)
def test_open_verbose_failure(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(Image, "WARN_POSSIBLE_FORMATS", True)
im = io.BytesIO(b"")
with pytest.raises(UnidentifiedImageError):
with pytest.warns(UserWarning, match="opening failed"):
with Image.open(im):
pass
def test_width_height(self) -> None:
im = Image.new("RGB", (1, 2))
assert im.width == 1
assert im.height == 2
with pytest.raises(AttributeError):
im.size = (3, 4) # type: ignore[misc]
def test_set_mode(self) -> None:
im = Image.new("RGB", (1, 1))
with pytest.raises(AttributeError):
im.mode = "P" # type: ignore[misc]
def test_empty_path(self) -> None:
with pytest.raises(FileNotFoundError):
Image.open("")
def test_invalid_image(self) -> None:
im = io.BytesIO(b"")
with pytest.raises(UnidentifiedImageError):
with Image.open(im):
pass
def test_bad_mode(self) -> None:
with pytest.raises(ValueError):
with Image.open("filename", "bad mode"): # type: ignore[arg-type]
pass
def test_stringio(self) -> None:
with pytest.raises(ValueError):
with Image.open(io.StringIO()): # type: ignore[arg-type]
pass
def test_string(self, tmp_path: Path) -> None:
out = str(tmp_path / "temp.png")
im = hopper()
im.save(out)
with Image.open(out) as reloaded:
assert_image_equal(im, reloaded)
def test_pathlib(self, tmp_path: Path) -> None:
with Image.open(Path("Tests/images/multipage-mmap.tiff")) as im:
assert im.mode == "P"
assert im.size == (10, 10)
with Image.open(Path("Tests/images/hopper.jpg")) as im:
assert im.mode == "RGB"
assert im.size == (128, 128)
for ext in (".jpg", ".jp2"):
if ext == ".jp2" and not features.check_codec("jpg_2000"):
pytest.skip("jpg_2000 not available")
im.save(tmp_path / ("temp." + ext))
def test_fp_name(self, tmp_path: Path) -> None:
temp_file = tmp_path / "temp.jpg"
class FP(io.BytesIO):
name: Path
if sys.version_info >= (3, 12):
from collections.abc import Buffer
def write(self, data: Buffer) -> int:
return len(data)
else:
def write(self, data: Any) -> int:
return len(data)
fp = FP()
fp.name = temp_file
im = hopper()
im.save(fp)
def test_tempfile(self) -> None:
# see #1460, pathlib support breaks tempfile.TemporaryFile on py27
# Will error out on save on 3.0.0
im = hopper()
with tempfile.TemporaryFile() as fp:
im.save(fp, "JPEG")
fp.seek(0)
with Image.open(fp) as reloaded:
assert_image_similar(im, reloaded, 20)
def test_unknown_extension(self, tmp_path: Path) -> None:
temp_file = tmp_path / "temp.unknown"
with hopper() as im:
with pytest.raises(ValueError):
im.save(temp_file)
def test_internals(self) -> None:
im = Image.new("L", (100, 100))
im.readonly = 1
im._copy()
assert not im.readonly
im.readonly = 1
im.paste(0, (0, 0, 100, 100))
assert not im.readonly
@pytest.mark.skipif(is_win32(), reason="Test requires opening tempfile twice")
@pytest.mark.skipif(
sys.platform == "cygwin",
reason="Test requires opening an mmaped file for writing",
)
def test_readonly_save(self, tmp_path: Path) -> None:
temp_file = tmp_path / "temp.bmp"
shutil.copy("Tests/images/rgb32bf-rgba.bmp", temp_file)
with Image.open(temp_file) as im:
assert im.readonly
im.save(temp_file)
def test_save_without_changing_readonly(self, tmp_path: Path) -> None:
temp_file = tmp_path / "temp.bmp"
with Image.open("Tests/images/rgb32bf-rgba.bmp") as im:
assert im.readonly
im.save(temp_file)
assert im.readonly
def test_dump(self, tmp_path: Path) -> None:
im = Image.new("L", (10, 10))
im._dump(str(tmp_path / "temp_L.ppm"))
im = Image.new("RGB", (10, 10))
im._dump(str(tmp_path / "temp_RGB.ppm"))
im = Image.new("HSV", (10, 10))
with pytest.raises(ValueError):
im._dump(str(tmp_path / "temp_HSV.ppm"))
def test_comparison_with_other_type(self) -> None:
# Arrange
item = Image.new("RGB", (25, 25), "#000")
num = 12
# Act/Assert
# Shouldn't cause AttributeError (#774)
assert item is not None
assert item != num
def test_getbands(self) -> None:
# Assert
assert hopper("RGB").getbands() == ("R", "G", "B")
assert hopper("YCbCr").getbands() == ("Y", "Cb", "Cr")
def test_getchannel_wrong_params(self) -> None:
im = hopper()
with pytest.raises(ValueError):
im.getchannel(-1)
with pytest.raises(ValueError):
im.getchannel(3)
with pytest.raises(ValueError):
im.getchannel("Z")
with pytest.raises(ValueError):
im.getchannel("1")
def test_getchannel(self) -> None:
im = hopper("YCbCr")
Y, Cb, Cr = im.split()
assert_image_equal(Y, im.getchannel(0))
assert_image_equal(Y, im.getchannel("Y"))
assert_image_equal(Cb, im.getchannel(1))
assert_image_equal(Cb, im.getchannel("Cb"))
assert_image_equal(Cr, im.getchannel(2))
assert_image_equal(Cr, im.getchannel("Cr"))
def test_getbbox(self) -> None:
# Arrange
im = hopper()
# Act
bbox = im.getbbox()
# Assert
assert bbox == (0, 0, 128, 128)
def test_ne(self) -> None:
# Arrange
im1 = Image.new("RGB", (25, 25), "black")
im2 = Image.new("RGB", (25, 25), "white")
# Act / Assert
assert im1 != im2
def test_alpha_composite(self) -> None:
# https://stackoverflow.com/questions/3374878
# Arrange
expected_colors = sorted(
[
(1122, (128, 127, 0, 255)),
(1089, (0, 255, 0, 255)),
(3300, (255, 0, 0, 255)),
(1156, (170, 85, 0, 192)),
(1122, (0, 255, 0, 128)),
(1122, (255, 0, 0, 128)),
(1089, (0, 255, 0, 0)),
]
)
dst = Image.new("RGBA", size=(100, 100), color=(0, 255, 0, 255))
draw = ImageDraw.Draw(dst)
draw.rectangle((0, 33, 100, 66), fill=(0, 255, 0, 128))
draw.rectangle((0, 67, 100, 100), fill=(0, 255, 0, 0))
src = Image.new("RGBA", size=(100, 100), color=(255, 0, 0, 255))
draw = ImageDraw.Draw(src)
draw.rectangle((33, 0, 66, 100), fill=(255, 0, 0, 128))
draw.rectangle((67, 0, 100, 100), fill=(255, 0, 0, 0))
# Act
img = Image.alpha_composite(dst, src)
# Assert
img_colors = img.getcolors()
assert img_colors is not None
assert sorted(img_colors) == expected_colors
def test_alpha_composite_la(self) -> None:
# Arrange
expected_colors = sorted(
[
(3300, (255, 255)),
(1156, (170, 192)),
(1122, (128, 255)),
(1089, (0, 0)),
(1122, (255, 128)),
(1122, (0, 128)),
(1089, (0, 255)),
]
)
dst = Image.new("LA", size=(100, 100), color=(0, 255))
draw = ImageDraw.Draw(dst)
draw.rectangle((0, 33, 100, 66), fill=(0, 128))
draw.rectangle((0, 67, 100, 100), fill=(0, 0))
src = Image.new("LA", size=(100, 100), color=(255, 255))
draw = ImageDraw.Draw(src)
draw.rectangle((33, 0, 66, 100), fill=(255, 128))
draw.rectangle((67, 0, 100, 100), fill=(255, 0))
# Act
img = Image.alpha_composite(dst, src)
# Assert
img_colors = img.getcolors()
assert img_colors is not None
assert sorted(img_colors) == expected_colors
def test_alpha_inplace(self) -> None:
src = Image.new("RGBA", (128, 128), "blue")
over = Image.new("RGBA", (128, 128), "red")
mask = hopper("L")
over.putalpha(mask)
target = Image.alpha_composite(src, over)
# basic
full = src.copy()
full.alpha_composite(over)
assert_image_equal(full, target)
# with offset down to right
offset = src.copy()
offset.alpha_composite(over, (64, 64))
assert_image_equal(offset.crop((64, 64, 127, 127)), target.crop((0, 0, 63, 63)))
assert offset.size == (128, 128)
# with negative offset
offset = src.copy()
offset.alpha_composite(over, (-64, -64))
assert_image_equal(offset.crop((0, 0, 63, 63)), target.crop((64, 64, 127, 127)))
assert offset.size == (128, 128)
# offset and crop
box = src.copy()
box.alpha_composite(over, (64, 64), (0, 0, 32, 32))
assert_image_equal(box.crop((64, 64, 96, 96)), target.crop((0, 0, 32, 32)))
assert_image_equal(box.crop((96, 96, 128, 128)), src.crop((0, 0, 32, 32)))
assert box.size == (128, 128)
# source point
source = src.copy()
source.alpha_composite(over, (32, 32), (32, 32, 96, 96))
assert_image_equal(source.crop((32, 32, 96, 96)), target.crop((32, 32, 96, 96)))
assert source.size == (128, 128)
# errors
with pytest.raises(ValueError):
source.alpha_composite(over, "invalid destination") # type: ignore[arg-type]
with pytest.raises(ValueError):
source.alpha_composite(over, (0, 0), "invalid source") # type: ignore[arg-type]
with pytest.raises(ValueError):
source.alpha_composite(over, 0) # type: ignore[arg-type]
with pytest.raises(ValueError):
source.alpha_composite(over, (0, 0), 0) # type: ignore[arg-type]
with pytest.raises(ValueError):
source.alpha_composite(over, (0, 0), (0, -1))
def test_register_open_duplicates(self) -> None:
# Arrange
factory, accept = Image.OPEN["JPEG"]
id_length = len(Image.ID)
# Act
Image.register_open("JPEG", factory, accept)
# Assert
assert len(Image.ID) == id_length
def test_registered_extensions_uninitialized(self) -> None:
# Arrange
Image._initialized = 0
# Act
Image.registered_extensions()
# Assert
assert Image._initialized == 2
def test_registered_extensions(self) -> None:
# Arrange
# Open an image to trigger plugin registration
with Image.open("Tests/images/rgb.jpg"):
pass
# Act
extensions = Image.registered_extensions()
# Assert
assert extensions
for ext in [".cur", ".icns", ".tif", ".tiff"]:
assert ext in extensions
def test_effect_mandelbrot(self) -> None:
# Arrange
size = (512, 512)
extent = (-3, -2.5, 2, 2.5)
quality = 100
# Act
im = Image.effect_mandelbrot(size, extent, quality)
# Assert
assert im.size == (512, 512)
assert_image_equal_tofile(im, "Tests/images/effect_mandelbrot.png")
def test_effect_mandelbrot_bad_arguments(self) -> None:
# Arrange
size = (512, 512)
# Get coordinates the wrong way round:
extent = (+3, +2.5, -2, -2.5)
# Quality < 2:
quality = 1
# Act/Assert
with pytest.raises(ValueError):
Image.effect_mandelbrot(size, extent, quality)
def test_effect_noise(self) -> None:
# Arrange
size = (100, 100)
sigma = 128
# Act
im = Image.effect_noise(size, sigma)
# Assert
assert im.size == (100, 100)
assert im.mode == "L"
p0 = im.getpixel((0, 0))
p1 = im.getpixel((0, 1))
p2 = im.getpixel((0, 2))
p3 = im.getpixel((0, 3))
p4 = im.getpixel((0, 4))
assert_not_all_same([p0, p1, p2, p3, p4])
def test_effect_spread(self) -> None:
# Arrange
im = hopper()
distance = 10
# Act
im2 = im.effect_spread(distance)
# Assert
assert im.size == (128, 128)
assert_image_similar_tofile(im2, "Tests/images/effect_spread.png", 110)
def test_effect_spread_zero(self) -> None:
# Arrange
im = hopper()
distance = 0
# Act
im2 = im.effect_spread(distance)
# Assert
assert_image_equal(im, im2)
def test_check_size(self) -> None:
# Checking that the _check_size function throws value errors when we want it to
with pytest.raises(ValueError):
# not a tuple
Image.new("RGB", 0) # type: ignore[arg-type]
with pytest.raises(ValueError):
# tuple too short
Image.new("RGB", (0,)) # type: ignore[arg-type]
with pytest.raises(ValueError):
Image.new("RGB", (-1, -1)) # w,h < 0
# this should pass with 0 sized images, #2259
im = Image.new("L", (0, 0))
assert im.size == (0, 0)
im = Image.new("L", (0, 100))
assert im.size == (0, 100)
im = Image.new("L", (100, 0))
assert im.size == (100, 0)
assert Image.new("RGB", (1, 1))
# Should pass lists too
i = Image.new("RGB", [1, 1])
assert isinstance(i.size, tuple)
@timeout_unless_slower_valgrind(0.75)
@pytest.mark.parametrize("size", ((0, 100000000), (100000000, 0)))
def test_empty_image(self, size: tuple[int, int]) -> None:
Image.new("RGB", size)
def test_storage_neg(self) -> None:
# Storage.c accepted negative values for xsize, ysize. Was
# test_neg_ppm, but the core function for that has been
# removed Calling directly into core to test the error in
# Storage.c, rather than the size check above
with pytest.raises(ValueError):
Image.core.fill("RGB", (2, -2), (0, 0, 0))
def test_one_item_tuple(self) -> None:
for mode in ("I", "F", "L"):
im = Image.new(mode, (100, 100), (5,))
assert im.getpixel((0, 0)) == 5
def test_linear_gradient_wrong_mode(self) -> None:
# Arrange
wrong_mode = "RGB"
# Act / Assert
with pytest.raises(ValueError):
Image.linear_gradient(wrong_mode)
@pytest.mark.parametrize("mode", ("L", "P", "I", "F"))
def test_linear_gradient(self, mode: str) -> None:
# Arrange
target_file = "Tests/images/linear_gradient.png"
# Act
im = Image.linear_gradient(mode)
# Assert
assert im.size == (256, 256)
assert im.mode == mode
assert im.getpixel((0, 0)) == 0
assert im.getpixel((255, 255)) == 255
with Image.open(target_file) as target:
target = target.convert(mode)
assert_image_equal(im, target)
def test_radial_gradient_wrong_mode(self) -> None:
# Arrange
wrong_mode = "RGB"
# Act / Assert
with pytest.raises(ValueError):
Image.radial_gradient(wrong_mode)
@pytest.mark.parametrize("mode", ("L", "P", "I", "F"))
def test_radial_gradient(self, mode: str) -> None:
# Arrange
target_file = "Tests/images/radial_gradient.png"
# Act
im = Image.radial_gradient(mode)
# Assert
assert im.size == (256, 256)
assert im.mode == mode
assert im.getpixel((0, 0)) == 255
assert im.getpixel((128, 128)) == 0
with Image.open(target_file) as target:
target = target.convert(mode)
assert_image_equal(im, target)
def test_register_extensions(self) -> None:
test_format = "a"
exts = ["b", "c"]
for ext in exts:
Image.register_extension(test_format, ext)
ext_individual = Image.EXTENSION.copy()
for ext in exts:
del Image.EXTENSION[ext]
Image.register_extensions(test_format, exts)
ext_multiple = Image.EXTENSION.copy()
for ext in exts:
del Image.EXTENSION[ext]
assert ext_individual == ext_multiple
def test_remap_palette(self) -> None:
# Test identity transform
with Image.open("Tests/images/hopper.gif") as im:
assert_image_equal(im, im.remap_palette(list(range(256))))
# Test identity transform with an RGBA palette
im = Image.new("P", (256, 1))
for x in range(256):
im.putpixel((x, 0), x)
im.putpalette(list(range(256)) * 4, "RGBA")
im_remapped = im.remap_palette(list(range(256)))
assert_image_equal(im, im_remapped)
assert im.palette is not None
assert im_remapped.palette is not None
assert im.palette.palette == im_remapped.palette.palette
# Test illegal image mode
with hopper() as im:
with pytest.raises(ValueError):
im.remap_palette([])
def test_remap_palette_transparency(self) -> None:
im = Image.new("P", (1, 2), (0, 0, 0))
im.putpixel((0, 1), (255, 0, 0))
im.info["transparency"] = 0
im_remapped = im.remap_palette([1, 0])
assert im_remapped.info["transparency"] == 1
palette = im_remapped.getpalette()
assert palette is not None
assert len(palette) == 6
# Test unused transparency
im.info["transparency"] = 2
im_remapped = im.remap_palette([1, 0])
assert "transparency" not in im_remapped.info
def test__new(self) -> None:
im = hopper("RGB")
im_p = hopper("P")
blank_p = Image.new("P", (10, 10))
blank_pa = Image.new("PA", (10, 10))
blank_p.palette = None
blank_pa.palette = None
def _make_new(
base_image: Image.Image,
image: Image.Image,
palette_result: ImagePalette.ImagePalette | None = None,
) -> None:
new_image = base_image._new(image.im)
assert new_image.mode == image.mode
assert new_image.size == image.size
assert new_image.info == base_image.info
if palette_result is not None:
assert new_image.palette is not None
assert new_image.palette.tobytes() == palette_result.tobytes()
else:
assert new_image.palette is None
_make_new(im, im_p, ImagePalette.ImagePalette("RGB"))
_make_new(im_p, im, None)
_make_new(im, blank_p, ImagePalette.ImagePalette())
_make_new(im, blank_pa, ImagePalette.ImagePalette())
@pytest.mark.parametrize(
"mode, color",
(
("RGB", "#DDEEFF"),
("RGB", (221, 238, 255)),
("RGBA", (221, 238, 255, 255)),
),
)
def test_p_from_rgb_rgba(self, mode: str, color: str | tuple[int, ...]) -> None:
im = Image.new("P", (100, 100), color)
expected = Image.new(mode, (100, 100), color)
assert_image_equal(im.convert(mode), expected)
def test_no_resource_warning_on_save(self, tmp_path: Path) -> None:
# https://github.com/python-pillow/Pillow/issues/835
# Arrange
test_file = "Tests/images/hopper.png"
temp_file = tmp_path / "temp.jpg"
# Act/Assert
with Image.open(test_file) as im:
with warnings.catch_warnings():
warnings.simplefilter("error")
im.save(temp_file)
def test_no_new_file_on_error(self, tmp_path: Path) -> None:
temp_file = tmp_path / "temp.jpg"
im = Image.new("RGB", (0, 0))
with pytest.raises(ValueError):
im.save(temp_file)
assert not os.path.exists(temp_file)
def test_load_on_nonexclusive_multiframe(self) -> None:
with open("Tests/images/frozenpond.mpo", "rb") as fp:
def act(fp: IO[bytes]) -> None:
im = Image.open(fp)
im.load()
act(fp)
with Image.open(fp) as im:
im.load()
assert not fp.closed
def test_empty_exif(self) -> None:
with Image.open("Tests/images/exif.png") as im:
exif = im.getexif()
assert dict(exif)
# Test that exif data is cleared after another load
exif.load(b"")
assert not dict(exif)
# Test loading just the EXIF header
exif.load(b"Exif\x00\x00")
assert not dict(exif)
def test_duplicate_exif_header(self) -> None:
with Image.open("Tests/images/exif.png") as im:
im.load()
im.info["exif"] = b"Exif\x00\x00" + im.info["exif"]
exif = im.getexif()
assert exif[274] == 1
def test_empty_get_ifd(self) -> None:
exif = Image.Exif()
ifd = exif.get_ifd(0x8769)
assert ifd == {}
ifd[36864] = b"0220"
assert exif.get_ifd(0x8769) == {36864: b"0220"}
reloaded_exif = Image.Exif()
reloaded_exif.load(exif.tobytes())
assert reloaded_exif.get_ifd(0x8769) == {36864: b"0220"}
@mark_if_feature_version(
pytest.mark.valgrind_known_error, "libjpeg_turbo", "2.0", reason="Known Failing"
)
def test_exif_jpeg(self, tmp_path: Path) -> None:
with Image.open("Tests/images/exif-72dpi-int.jpg") as im: # Little endian
exif = im.getexif()
assert 258 not in exif
assert 274 in exif
assert 282 in exif
assert exif[296] == 2
assert exif[11] == "gThumb 3.0.1"
out = tmp_path / "temp.jpg"
exif[258] = 8
del exif[274]
del exif[282]
exif[296] = 455
exif[11] = "Pillow test"
im.save(out, exif=exif)
with Image.open(out) as reloaded:
reloaded_exif = reloaded.getexif()
assert reloaded_exif[258] == 8
assert 274 not in reloaded_exif
assert 282 not in reloaded_exif
assert reloaded_exif[296] == 455
assert reloaded_exif[11] == "Pillow test"
with Image.open("Tests/images/no-dpi-in-exif.jpg") as im: # Big endian
exif = im.getexif()
assert 258 not in exif
assert 306 in exif
assert exif[274] == 1
assert exif[305] == "Adobe Photoshop CC 2017 (Macintosh)"
out = tmp_path / "temp.jpg"
exif[258] = 8
del exif[306]
exif[274] = 455
exif[305] = "Pillow test"
im.save(out, exif=exif)
with Image.open(out) as reloaded:
reloaded_exif = reloaded.getexif()
assert reloaded_exif[258] == 8
assert 306 not in reloaded_exif
assert reloaded_exif[274] == 455
assert reloaded_exif[305] == "Pillow test"
@skip_unless_feature("webp")
def test_exif_webp(self, tmp_path: Path) -> None:
with Image.open("Tests/images/hopper.webp") as im:
exif = im.getexif()
assert exif == {}
out = tmp_path / "temp.webp"
exif[258] = 8
exif[40963] = 455
exif[305] = "Pillow test"
def check_exif() -> None:
with Image.open(out) as reloaded:
reloaded_exif = reloaded.getexif()
assert reloaded_exif[258] == 8
assert reloaded_exif[40963] == 455
assert reloaded_exif[305] == "Pillow test"
im.save(out, exif=exif)
check_exif()
im.save(out, exif=exif, save_all=True)
check_exif()
def test_exif_png(self, tmp_path: Path) -> None:
with Image.open("Tests/images/exif.png") as im:
exif = im.getexif()
assert exif == {274: 1}
out = tmp_path / "temp.png"
exif[258] = 8
del exif[274]
exif[40963] = 455
exif[305] = "Pillow test"
im.save(out, exif=exif)
with Image.open(out) as reloaded:
reloaded_exif = reloaded.getexif()
assert reloaded_exif == {258: 8, 40963: 455, 305: "Pillow test"}
def test_exif_interop(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
exif = im.getexif()
assert exif.get_ifd(0xA005) == {
1: "R98",
2: b"0100",
4097: 2272,
4098: 1704,
}
reloaded_exif = Image.Exif()
reloaded_exif.load(exif.tobytes())
assert reloaded_exif.get_ifd(0xA005) == exif.get_ifd(0xA005)
def test_exif_ifd1(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
exif = im.getexif()
assert exif.get_ifd(ExifTags.IFD.IFD1) == {
513: 2036,
514: 5448,
259: 6,
296: 2,
282: 180.0,
283: 180.0,
}
def test_exif_ifd(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
exif = im.getexif()
del exif.get_ifd(0x8769)[0xA005]
reloaded_exif = Image.Exif()
reloaded_exif.load(exif.tobytes())
assert reloaded_exif.get_ifd(0x8769) == exif.get_ifd(0x8769)
def test_delete_ifd_tag(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
exif = im.getexif()
exif.get_ifd(0x8769)
assert 0x8769 in exif
del exif[0x8769]
reloaded_exif = Image.Exif()
reloaded_exif.load(exif.tobytes())
assert 0x8769 not in reloaded_exif
def test_exif_load_from_fp(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
data = im.info["exif"]
if data.startswith(b"Exif\x00\x00"):
data = data[6:]
fp = io.BytesIO(data)
exif = Image.Exif()
exif.load_from_fp(fp)
assert exif == {
271: "Canon",
272: "Canon PowerShot S40",
274: 1,
282: 180.0,
283: 180.0,
296: 2,
306: "2003:12:14 12:01:44",
531: 1,
34665: 196,
}
def test_exif_hide_offsets(self) -> None:
with Image.open("Tests/images/flower.jpg") as im:
exif = im.getexif()
# Check offsets are present initially
assert 0x8769 in exif
for tag in (0xA005, 0x927C):
assert tag in exif.get_ifd(0x8769)
assert exif.get_ifd(0xA005)
loaded_exif = exif
with Image.open("Tests/images/flower.jpg") as im:
new_exif = im.getexif()
for exif in (loaded_exif, new_exif):
exif.hide_offsets()
# Assert they are hidden afterwards,
# but that the IFDs are still available
assert 0x8769 not in exif
assert exif.get_ifd(0x8769)
for tag in (0xA005, 0x927C):
assert tag not in exif.get_ifd(0x8769)
assert exif.get_ifd(0xA005)
def test_exif_from_xmp_bytes(self) -> None:
im = Image.new("RGB", (1, 1))
im.info["xmp"] = b'\xff tiff:Orientation="2"'
assert im.getexif()[274] == 2
def test_empty_xmp(self) -> None:
with Image.open("Tests/images/hopper.gif") as im:
if ElementTree is None:
with pytest.warns(
UserWarning,
match="XMP data cannot be read without defusedxml dependency",
):
xmp = im.getxmp()
else:
xmp = im.getxmp()
assert xmp == {}
def test_getxmp_padded(self) -> None:
im = Image.new("RGB", (1, 1))
im.info["xmp"] = (
b'<?xpacket begin="\xef\xbb\xbf" id="W5M0MpCehiHzreSzNTczkc9d"?>\n'
b'<x:xmpmeta xmlns:x="adobe:ns:meta/" />\n<?xpacket end="w"?>\x00\x00 '
)
if ElementTree is None:
with pytest.warns(
UserWarning,
match="XMP data cannot be read without defusedxml dependency",
):
assert im.getxmp() == {}
else:
assert im.getxmp() == {"xmpmeta": None}
def test_get_child_images(self) -> None:
im = Image.new("RGB", (1, 1))
with pytest.warns(DeprecationWarning, match="Image.Image.get_child_images"):
assert im.get_child_images() == []
def test_show(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(ImageShow, "_viewers", [])
im = Image.new("RGB", (1, 1))
with pytest.warns(DeprecationWarning, match="Image._show"):
Image._show(im)
@pytest.mark.parametrize("size", ((1, 0), (0, 1), (0, 0)))
def test_zero_tobytes(self, size: tuple[int, int]) -> None:
im = Image.new("RGB", size)
assert im.tobytes() == b""
@pytest.mark.parametrize("size", ((1, 0), (0, 1), (0, 0)))
def test_zero_frombytes(self, size: tuple[int, int]) -> None:
Image.frombytes("RGB", size, b"")
im = Image.new("RGB", size)
im.frombytes(b"")
def test_has_transparency_data(self) -> None:
for mode in ("1", "L", "P", "RGB"):
im = Image.new(mode, (1, 1))
assert not im.has_transparency_data
for mode in ("LA", "La", "PA", "RGBA", "RGBa"):
im = Image.new(mode, (1, 1))
assert im.has_transparency_data
# P mode with "transparency" info
with Image.open("Tests/images/first_frame_transparency.gif") as im:
assert "transparency" in im.info
assert im.has_transparency_data
# RGB mode with "transparency" info
with Image.open("Tests/images/rgb_trns.png") as im:
assert "transparency" in im.info
assert im.has_transparency_data
# P mode with RGBA palette
im = Image.new("RGBA", (1, 1)).convert("P")
assert im.mode == "P"
assert im.palette is not None
assert im.palette.mode == "RGBA"
assert im.has_transparency_data
def test_apply_transparency(self) -> None:
im = Image.new("P", (1, 1))
im.putpalette((0, 0, 0, 1, 1, 1))
assert im.palette is not None
assert im.palette.colors == {(0, 0, 0): 0, (1, 1, 1): 1}
# Test that no transformation is applied without transparency
im.apply_transparency()
assert im.palette.colors == {(0, 0, 0): 0, (1, 1, 1): 1}
# Test that a transparency index is applied
im.info["transparency"] = 0
im.apply_transparency()
assert "transparency" not in im.info
assert im.palette.colors == {(0, 0, 0, 0): 0, (1, 1, 1, 255): 1}
# Test that existing transparency is kept
im = Image.new("P", (1, 1))
im.putpalette((0, 0, 0, 255, 1, 1, 1, 128), "RGBA")
im.info["transparency"] = 0
im.apply_transparency()
assert im.palette is not None
assert im.palette.colors == {(0, 0, 0, 0): 0, (1, 1, 1, 128): 1}
# Test that transparency bytes are applied
with Image.open("Tests/images/pil123p.png") as im:
assert isinstance(im.info["transparency"], bytes)
assert im.palette is not None
assert im.palette.colors[(27, 35, 6)] == 24
im.apply_transparency()
assert im.palette is not None
assert im.palette.colors[(27, 35, 6, 214)] == 24
def test_merge_pa(self) -> None:
p = hopper("P")
a = Image.new("L", p.size)
pa = Image.merge("PA", (p, a))
assert p.getpalette() == pa.getpalette()
def test_constants(self) -> None:
for enum in (
Image.Transpose,
Image.Transform,
Image.Resampling,
Image.Dither,
Image.Palette,
Image.Quantize,
):
for name in enum.__members__:
assert getattr(Image, name) == enum[name]
@pytest.mark.parametrize(
"path",
[
"fli_overrun.bin",
"sgi_overrun.bin",
"sgi_overrun_expandrow.bin",
"sgi_overrun_expandrow2.bin",
"pcx_overrun.bin",
"pcx_overrun2.bin",
"ossfuzz-4836216264589312.pcx",
"01r_00.pcx",
],
)
def test_overrun(self, path: str) -> None:
"""For overrun completeness, test as:
valgrind pytest -qq Tests/test_image.py::TestImage::test_overrun | grep decode.c
"""
with Image.open(os.path.join("Tests/images", path)) as im:
with pytest.raises(OSError) as e:
im.load()
buffer_overrun = str(e.value) == "buffer overrun when reading image file"
truncated = "image file is truncated" in str(e.value)
assert buffer_overrun or truncated
def test_fli_overrun2(self) -> None:
with Image.open("Tests/images/fli_overrun2.bin") as im:
with pytest.raises(OSError, match="buffer overrun when reading image file"):
im.seek(1)
def test_exit_fp(self) -> None:
with Image.new("L", (1, 1)) as im:
pass
assert not hasattr(im, "fp")
def test_close_graceful(self, caplog: pytest.LogCaptureFixture) -> None:
with Image.open("Tests/images/hopper.jpg") as im:
copy = im.copy()
with caplog.at_level(logging.DEBUG):
im.close()
copy.close()
assert len(caplog.records) == 0
assert im.fp is None
| TestImage |
python | ApeWorX__ape | src/ape/utils/os.py | {
"start": 11697,
"end": 13889
} | class ____:
"""
A directory for caching data where each data item is named
``<key>.json`` and is in the directory. You can access the
items by their key like a dictionary. This type is used
in Ape's contract-caching for ContractTypes, ProxyInfoAPI,
and other model types.
"""
def __init__(self, path: Path):
if path.is_file():
raise ValueError("Expecting directory.")
self._path = path
def __getitem__(self, key: str) -> dict:
"""
Get the data from ``base_path / <key>.json``.
Returns:
The JSON dictionary
"""
return self.get_data(key)
def __setitem__(self, key: str, value: dict):
"""
Cache the given data to ``base_path / <key>.json``.
Args:
key (str): The key, used as the file name ``{key}.json``.
value (dict): The JSON dictionary to cache.
"""
self.cache_data(key, value)
def __delitem__(self, key: str):
"""
Delete the cache-file.
Args:
key (str): The file stem of the JSON.
"""
self.delete_data(key)
def get_file(self, key: str) -> Path:
return self._path / f"{key}.json"
def cache_data(self, key: str, data: dict):
json_str = json.dumps(data)
file = self.get_file(key)
file.unlink(missing_ok=True)
file.parent.mkdir(parents=True, exist_ok=True)
file.write_text(json_str)
def get_data(self, key: str) -> dict:
file = self.get_file(key)
if not file.is_file():
return {}
json_str = file.read_text(encoding="utf8")
return json.loads(json_str)
def delete_data(self, key: str):
file = self.get_file(key)
file.unlink(missing_ok=True)
@contextmanager
def within_directory(directory: Path):
"""
A context-manager for changing the cwd to the given path.
Args:
directory (Path): The directory to change.
"""
here = Path.cwd()
if directory != here:
os.chdir(directory)
try:
yield
finally:
if Path.cwd() != here:
os.chdir(here)
| CacheDirectory |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 96931,
"end": 99275
} | class ____(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(sysconfig.get_path('include'))
except ImportError:
pass
py_incl_dir = sysconfig.get_path('include')
include_dirs.append(py_incl_dir)
py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
_c_string_literal(vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
| _numpy_info |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/asset_backfill.py | {
"start": 3465,
"end": 4355
} | class ____(
NamedTuple(
"_PartitionedAssetBackfillStatus",
[
("asset_key", AssetKey),
("num_targeted_partitions", int),
("partitions_counts_by_status", Mapping[AssetBackfillStatus, int]),
],
)
):
def __new__(
cls,
asset_key: AssetKey,
num_targeted_partitions: int,
partitions_counts_by_status: Mapping[AssetBackfillStatus, int],
):
return super().__new__(
cls,
check.inst_param(asset_key, "asset_key", AssetKey),
check.int_param(num_targeted_partitions, "num_targeted_partitions"),
check.mapping_param(
partitions_counts_by_status,
"partitions_counts_by_status",
key_type=AssetBackfillStatus,
value_type=int,
),
)
| PartitionedAssetBackfillStatus |
python | bokeh__bokeh | tests/unit/bokeh/core/test_has_props.py | {
"start": 13980,
"end": 14588
} | class ____(hp.HasProps, hp.NonQualified):
foo = Int()
def test_qualified() -> None:
class InnerQualified(hp.HasProps, hp.Qualified):
foo = Int()
class InnerNonQualified(hp.HasProps, hp.NonQualified):
foo = Int()
assert TopLevelQualified.__qualified_model__ == "test_has_props.TopLevelQualified"
assert TopLevelNonQualified.__qualified_model__ == "TopLevelNonQualified"
assert InnerQualified.__qualified_model__ == "test_has_props.test_qualified.InnerQualified"
assert InnerNonQualified.__qualified_model__ == "test_qualified.InnerNonQualified"
| TopLevelNonQualified |
python | ansible__ansible | hacking/create-bulk-issues.py | {
"start": 6402,
"end": 6551
} | class ____:
create: bool
verbose: bool
def run(self) -> None:
raise NotImplementedError()
@dataclasses.dataclass(frozen=True)
| Args |
python | kamyu104__LeetCode-Solutions | Python/number-of-subarrays-with-and-value-of-k.py | {
"start": 70,
"end": 633
} | class ____(object):
def countSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
result = 0
dp = collections.defaultdict(int)
for x in nums:
new_dp = collections.defaultdict(int)
if x&k == k:
new_dp[x] += 1
for y, c in dp.iteritems():
new_dp[y&x] += c
if k in new_dp:
result += new_dp[k]
dp = new_dp
return result
| Solution |
python | plotly__plotly.py | plotly/graph_objs/indicator/delta/_font.py | {
"start": 233,
"end": 9883
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "indicator.delta"
_path_str = "indicator.delta.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Set the font used to display the delta
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.delta.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.indicator.delta.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.delta.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | tiangolo__fastapi | tests/test_serialize_response.py | {
"start": 155,
"end": 1414
} | class ____(BaseModel):
name: str
price: Optional[float] = None
owner_ids: Optional[List[int]] = None
@app.get("/items/valid", response_model=Item)
def get_valid():
return {"name": "valid", "price": 1.0}
@app.get("/items/coerce", response_model=Item)
def get_coerce():
return {"name": "coerce", "price": "1.0"}
@app.get("/items/validlist", response_model=List[Item])
def get_validlist():
return [
{"name": "foo"},
{"name": "bar", "price": 1.0},
{"name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
]
client = TestClient(app)
def test_valid():
response = client.get("/items/valid")
response.raise_for_status()
assert response.json() == {"name": "valid", "price": 1.0, "owner_ids": None}
def test_coerce():
response = client.get("/items/coerce")
response.raise_for_status()
assert response.json() == {"name": "coerce", "price": 1.0, "owner_ids": None}
def test_validlist():
response = client.get("/items/validlist")
response.raise_for_status()
assert response.json() == [
{"name": "foo", "price": None, "owner_ids": None},
{"name": "bar", "price": 1.0, "owner_ids": None},
{"name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
]
| Item |
python | spack__spack | lib/spack/spack/multimethod.py | {
"start": 6298,
"end": 11484
} | class ____:
"""This is a multi-purpose class, which can be used
1. As a context manager to **group directives together** that share the same ``when=``
argument.
2. As a **decorator** for defining multi-methods (multiple methods with the same name are
defined, but the version that is called depends on the condition of the package's spec)
As a **context manager** it groups directives together. It allows you to write::
with when("+nvptx"):
conflicts("@:6", msg="NVPTX only supported from gcc 7")
conflicts("languages=ada")
conflicts("languages=brig")
instead of the more repetitive::
conflicts("@:6", when="+nvptx", msg="NVPTX only supported from gcc 7")
conflicts("languages=ada", when="+nvptx")
conflicts("languages=brig", when="+nvptx")
This context manager is composable both with nested ``when`` contexts and with other ``when=``
arguments in directives. For example::
with when("+foo"):
with when("+bar"):
depends_on("dependency", when="+baz")
is equilavent to::
depends_on("dependency", when="+foo +bar +baz")
As a **decorator**, it allows packages to declare multiple versions of methods like
``install()`` that depend on the package's spec. For example::
class SomePackage(Package):
...
def install(self, spec: Spec, prefix: Prefix):
# Do default install
@when("target=x86_64:")
def install(self, spec: Spec, prefix: Prefix):
# This will be executed instead of the default install if
# the package's target is in the x86_64 family.
@when("target=aarch64:")
def install(self, spec: Spec, prefix: Prefix):
# This will be executed if the package's target is in
# the aarch64 family
This allows each package to have a default version of ``install()`` AND
specialized versions for particular platforms. The version that is
called depends on the architecture of the instantiated package.
Note that this works for methods other than install, as well. So,
if you only have part of the install that is platform specific, you
could do this:
.. code-block:: python
class SomePackage(Package):
...
# virtual dependence on MPI.
# could resolve to mpich, mpich2, OpenMPI
depends_on("mpi")
def setup(self):
# do nothing in the default case
pass
@when("^openmpi")
def setup(self):
# do something special when this is built with OpenMPI for its MPI implementations.
pass
def install(self, prefix):
# Do common install stuff
self.setup()
# Do more common install stuff
Note that the default version of decorated methods must *always* come first. Otherwise it will
override all of the decorated versions. This is a limitation of the Python language.
"""
def __init__(self, condition: Union[str, bool]):
"""Can be used both as a decorator, for multimethods, or as a context
manager to group ``when=`` arguments together.
Args:
condition (str): condition to be met
"""
if isinstance(condition, bool):
self.spec = spack.spec.Spec() if condition else None
else:
self.spec = spack.spec.Spec(condition)
def __call__(self, method):
assert (
MultiMethodMeta._locals is not None
), "cannot use multimethod, missing MultiMethodMeta metaclass?"
# Create a multimethod with this name if there is not one already
original_method = MultiMethodMeta._locals.get(method.__name__)
if not isinstance(original_method, SpecMultiMethod):
original_method = SpecMultiMethod(original_method)
if self.spec is not None:
original_method.register(self.spec, method)
return original_method
def __enter__(self):
spack.directives_meta.DirectiveMeta.push_to_context(str(self.spec))
def __exit__(self, exc_type, exc_val, exc_tb):
spack.directives_meta.DirectiveMeta.pop_from_context()
@contextmanager
def default_args(**kwargs):
"""Context manager to override the default arguments of directives.
Example::
with default_args(type=("build", "run")):
depends_on("py-foo")
depends_on("py-bar")
depends_on("py-baz")
Notice that unlike then :func:`when` context manager, this one is *not* composable, as it
merely overrides the default argument values for the duration of the context. For example::
with default_args(when="+foo"):
depends_on("pkg-a")
depends_on("pkg-b", when="+bar")
is equivalent to::
depends_on("pkg-a", when="+foo")
depends_on("pkg-b", when="+bar")
"""
spack.directives_meta.DirectiveMeta.push_default_args(kwargs)
yield
spack.directives_meta.DirectiveMeta.pop_default_args()
| when |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 23365,
"end": 25358
} | class ____(Base4bitTest):
def setUp(self):
self.model_name = "facebook/opt-350m"
super().setUp()
def test_training(self):
# Step 1: freeze all parameters
model = AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=BitsAndBytesConfig(load_in_4bit=True), revision="refs/pr/40"
)
if torch_device in ["cuda", "xpu"]:
self.assertEqual(
set(model.hf_device_map.values()), {backend_torch_accelerator_module(torch_device).current_device()}
)
else:
self.assertTrue(all(param.device.type == "cpu" for param in model.parameters()))
for param in model.parameters():
param.requires_grad = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
param.data = param.data.to(torch.float32)
# Step 2: add adapters
for _, module in model.named_modules():
if isinstance(module, OPTAttention):
module.q_proj = LoRALayer(module.q_proj, rank=16)
module.k_proj = LoRALayer(module.k_proj, rank=16)
module.v_proj = LoRALayer(module.v_proj, rank=16)
# Step 3: dummy batch
batch = self.tokenizer("Test batch ", return_tensors="pt").to(torch_device)
# Step 4: Check if the gradient is not None
with torch.autocast(torch_device):
out = model.forward(**batch)
out.logits.norm().backward()
for module in model.modules():
if isinstance(module, LoRALayer):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(module, nn.Embedding):
self.assertTrue(module.weight.grad is None)
@apply_skip_if_not_implemented
| Bnb4BitTestTraining |
python | getsentry__sentry | src/sentry/notifications/notification_action/grouptype.py | {
"start": 684,
"end": 2682
} | class ____(GroupType):
type_id = 9001
slug = "send-test-notification"
description = "Send test notification"
category = GroupCategory.TEST_NOTIFICATION.value
category_v2 = GroupCategory.TEST_NOTIFICATION.value
released = False
in_default_search = False
enable_auto_resolve = True
enable_escalation_detection = False
enable_status_change_workflow_notifications = True
creation_quota = Quota(3600, 60, 1000) # 1000 per hour, sliding window of 60 seconds
@classmethod
def allow_post_process_group(cls, organization: Organization) -> bool:
return False
@classmethod
def allow_ingest(cls, organization: Organization) -> bool:
return True
def get_test_notification_event_data(project) -> GroupEvent | None:
occurrence = IssueOccurrence(
id=uuid4().hex,
project_id=project.id,
event_id=uuid4().hex,
fingerprint=[md5(str(uuid4()).encode("utf-8")).hexdigest()],
issue_title="Test Issue",
subtitle="Test issue created to test a notification related action",
resource_id=None,
evidence_data={},
evidence_display=[],
type=SendTestNotification,
detection_time=datetime.now(UTC),
level="error",
culprit="Test notification",
)
# Load mock data
event_data = load_data(
platform=project.platform,
default="javascript",
event_id=occurrence.event_id,
)
# Setting this tag shows the sample event banner in the UI
set_tag(event_data, "sample_event", "yes")
event_data["project_id"] = occurrence.project_id
occurrence, group_info = process_event_and_issue_occurrence(occurrence.to_dict(), event_data)
if group_info is None:
return None
generic_group = group_info.group
event = fetch_event(occurrence.event_id, occurrence.project_id)
if event is None:
return None
return GroupEvent.from_event(event, generic_group)
| SendTestNotification |
python | numba__numba | numba/core/typing/npydecl.py | {
"start": 24251,
"end": 24548
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
arr, = args
if isinstance(arr, types.Array):
enumerate_type = types.NumpyNdEnumerateType(arr)
return signature(enumerate_type, *args)
@infer_global(np.nditer)
| NdEnumerate |
python | django__django | tests/staticfiles_tests/test_management.py | {
"start": 1139,
"end": 2153
} | class ____(StaticFilesTestCase):
@override_settings(MIDDLEWARE=["django.middleware.common.CommonMiddleware"])
def test_middleware_loaded_only_once(self):
command = runserver.Command()
with mock.patch("django.middleware.common.CommonMiddleware") as mocked:
command.get_handler(use_static_handler=True, insecure_serving=True)
self.assertEqual(mocked.call_count, 1)
def test_404_response(self):
command = runserver.Command()
handler = command.get_handler(use_static_handler=True, insecure_serving=True)
missing_static_file = os.path.join(settings.STATIC_URL, "unknown.css")
req = RequestFactory().get(missing_static_file)
with override_settings(DEBUG=False):
response = handler.get_response(req)
self.assertEqual(response.status_code, 404)
with override_settings(DEBUG=True):
response = handler.get_response(req)
self.assertEqual(response.status_code, 404)
| TestRunserver |
python | tensorflow__tensorflow | tensorflow/compiler/tests/einsum_op_test.py | {
"start": 983,
"end": 3332
} | class ____(xla_test.XLATestCase):
"""Test cases for einsum op."""
def _testUnary(self, op, inp, expected):
"""Verifies that unary 'op' produces 'expected' when fed input 'inp'."""
with self.session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name='a')
output = op(pinp)
result = session.run(output, {pinp: inp})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=1e-3, atol=1e-5, bfloat16_rtol=0.03)
def _testBinary(self, op, a, b, expected):
"""Verifies that binary 'op' produces 'expected' when fed 'a' and 'b'."""
with self.session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name='a')
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name='b')
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
self.assertAllCloseAccordingToType(result, expected, rtol=1e-3)
def testMatMul(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ij,jk->ik', x, y),
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
def testImplicitForm(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ijk,kji', x, y),
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
np.array([[[1], [3], [2]], [[5], [6], [8]]], dtype=dtype),
expected=np.array(128, dtype=dtype))
def testReducedIndices(self):
for dtype in self.float_types:
self._testBinary(
lambda x, y: special_math_ops.einsum('ij,j->', x, y),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([3, 2], dtype=dtype),
expected=np.array(59, dtype=dtype))
def testUnary(self):
for dtype in self.float_types:
self._testUnary(
lambda x: special_math_ops.einsum('ijk->kji', x),
np.array([[[1, 3], [2, 5], [6, 8]]], dtype=dtype),
expected=np.array([[[1], [2], [6]], [[3], [5], [8]]], dtype=dtype))
if __name__ == '__main__':
googletest.main()
| EinsumOpTest |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/rendezvous_backend_test.py | {
"start": 527,
"end": 3411
} | class ____(ABC):
_backend: RendezvousBackend
# Type hints
assertEqual: Callable
assertNotEqual: Callable
assertIsNone: Callable
assertIsNotNone: Callable
assertRaises: Callable
@abstractmethod
def _corrupt_state(self) -> None:
"""Corrupts the state stored in the backend."""
def _set_state(
self, state: bytes, token: Optional[Any] = None
) -> tuple[bytes, Token, bool]:
result = self._backend.set_state(state, token)
self.assertIsNotNone(result)
return cast(tuple[bytes, Token, bool], result)
def test_get_state_returns_backend_state(self) -> None:
self._backend.set_state(b"x")
result = self._backend.get_state()
self.assertIsNotNone(result)
state, token = cast(tuple[bytes, Token], result)
self.assertEqual(b"x", state)
self.assertIsNotNone(token)
def test_get_state_returns_none_if_backend_state_does_not_exist(self) -> None:
result = self._backend.get_state()
self.assertIsNone(result)
def test_get_state_raises_error_if_backend_state_is_corrupt(self) -> None:
self._corrupt_state()
with self.assertRaises(RendezvousStateError):
self._backend.get_state()
def test_set_state_sets_backend_state_if_it_does_not_exist(self) -> None:
state, token, has_set = self._set_state(b"x")
self.assertEqual(b"x", state)
self.assertIsNotNone(token)
self.assertTrue(has_set)
def test_set_state_sets_backend_state_if_token_is_current(self) -> None:
_, token1, has_set1 = self._set_state(b"x")
state2, token2, has_set2 = self._set_state(b"y", token1)
self.assertEqual(b"y", state2)
self.assertNotEqual(token1, token2)
self.assertTrue(has_set1)
self.assertTrue(has_set2)
def test_set_state_returns_current_backend_state_if_token_is_old(self) -> None:
_, token1, _ = self._set_state(b"x")
state2, token2, _ = self._set_state(b"y", token1)
state3, token3, has_set = self._set_state(b"z", token1)
self.assertEqual(state2, state3)
self.assertEqual(token2, token3)
self.assertFalse(has_set)
def test_set_state_returns_current_backend_state_if_token_is_none(self) -> None:
state1, token1, _ = self._set_state(b"x")
state2, token2, has_set = self._set_state(b"y")
self.assertEqual(state1, state2)
self.assertEqual(token1, token2)
self.assertFalse(has_set)
def test_set_state_returns_current_backend_state_if_token_is_invalid(self) -> None:
state1, token1, _ = self._set_state(b"x")
state2, token2, has_set = self._set_state(b"y", token="invalid")
self.assertEqual(state1, state2)
self.assertEqual(token1, token2)
self.assertFalse(has_set)
| RendezvousBackendTestMixin |
python | django-debug-toolbar__django-debug-toolbar | tests/base.py | {
"start": 1960,
"end": 3688
} | class ____:
_is_async = False
client_class = ToolbarTestClient
async_client_class = AsyncToolbarTestClient
panel: Panel | None = None
panel_id = None
def setUp(self):
super().setUp()
self._get_response = lambda request: HttpResponse()
self.request = rf.get("/")
if self._is_async:
self.request = arf.get("/")
self.toolbar = DebugToolbar(self.request, self.get_response_async)
else:
self.toolbar = DebugToolbar(self.request, self.get_response)
self.toolbar.stats = {}
if self.panel_id:
self.panel = self.toolbar.get_panel_by_id(self.panel_id)
self.panel.enable_instrumentation()
else:
self.panel = None
def tearDown(self):
if self.panel:
self.panel.disable_instrumentation()
super().tearDown()
def get_response(self, request):
return self._get_response(request)
async def get_response_async(self, request):
return self._get_response(request)
def assertValidHTML(self, content):
parser = html5lib.HTMLParser()
parser.parseFragment(content)
if parser.errors:
msg_parts = ["Invalid HTML:"]
lines = content.split("\n")
for position, errorcode, datavars in parser.errors:
msg_parts.append(f" {html5lib.constants.E[errorcode]}" % datavars)
msg_parts.append(f" {lines[position[0] - 1]}")
raise self.failureException("\n".join(msg_parts))
def reload_stats(self):
data = self.toolbar.store.panel(self.toolbar.request_id, self.panel_id)
self.panel.load_stats_from_store(data)
| BaseMixin |
python | google__pytype | pytype/tests/test_import2.py | {
"start": 98,
"end": 4434
} | class ____(test_base.BaseTest):
"""Tests for import."""
def test_module_attributes(self):
ty = self.Infer("""
import os
f = os.__file__
n = os.__name__
d = os.__doc__
p = os.__package__
""")
self.assertTypesMatchPytd(
ty,
"""
import os
from typing import Optional
f = ... # type: str
n = ... # type: str
d = ... # type: str
p = ... # type: Optional[str]
""",
)
def test_import_sys2(self):
ty = self.Infer(
"""
import sys
import bad_import # doesn't exist
def f():
return sys.stderr
def g():
return sys.maxsize
def h():
return sys.getrecursionlimit()
""",
report_errors=False,
)
self.assertTypesMatchPytd(
ty,
"""
import sys
from typing import Any, TextIO
bad_import = ... # type: Any
def f() -> TextIO: ...
def g() -> int: ...
def h() -> int: ...
""",
)
def test_relative_priority(self):
with test_utils.Tempdir() as d:
d.create_file("a.pyi", "x = ... # type: int")
d.create_file("b/a.pyi", "x = ... # type: complex")
ty = self.Infer(
"""
import a
x = a.x
""",
pythonpath=[d.path],
module_name="b.main",
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: int
""",
)
def test_import_attribute_error(self):
self.CheckWithErrors("""
try:
import nonexistent # import-error
except ImportError as err:
print(err.name)
""")
def test_datetime_datetime(self):
with self.DepTree([("foo.py", "from datetime import datetime")]):
self.Check("""
import foo
assert_type(foo.datetime(1, 1, 1), "datetime.datetime")
""")
def test_cycle(self):
# See https://github.com/google/pytype/issues/1028. This can happen when a
# file needs to be analyzed twice due to a dependency cycle.
with self.DepTree([
(
"components.pyi",
"""
import loaders
from typing import Dict, Type
Foo: Type[loaders.Foo]
class Component:
def __init__(self, foos: Dict[int, loaders.Foo]) -> None: ...
""",
),
(
"loaders.pyi",
"""
from typing import Any, NamedTuple
Component: Any
class Foo(NamedTuple):
foo: int
def load() -> Any: ...
""",
),
]):
self.Infer(
"""
from typing import Dict, NamedTuple
from components import Component
class Foo(NamedTuple):
foo: int
def load() -> Component:
foos: Dict[int, Foo] = {}
return Component(foos=foos)
""",
module_name="loaders",
)
def test_import_any(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Any
dep: Any
x: dep.Thing
class A(dep.Base):
def get(self) -> dep.Got: ...
""",
)]):
self.Check("""
from typing import Any
import foo
assert_type(foo.dep, Any)
assert_type(foo.x, Any)
assert_type(foo.A(), foo.A)
assert_type(foo.A().get(), Any)
""")
def test_alias_in_dep_of_dep(self):
# Regression test: `depofdep.Magic.HTMLParser` would be treated as the
# attribute HTMLParser on the class Magic in the module depofdep by
# visitors.LookupExternalTypes. In actuality, Magic is a pytd.Alias to a
# pytd.Module, not a class at all.
# The different import styles produce different ASTs, so we need to check
# that both are supported.
with self.DepTree([
("depofdep.pyi", "import html.parser as Magic"),
(
"dep.pyi",
"""
from depofdep import Magic
class A(Magic.HTMLParser): ...""",
),
]):
self.Check("import dep")
with self.DepTree([
("depofdep.pyi", "from html import parser as Magic"),
(
"dep.pyi",
"""
from depofdep import Magic
class A(Magic.HTMLParser): ...""",
),
]):
self.Check("import dep")
if __name__ == "__main__":
test_base.main()
| ImportTest |
python | modin-project__modin | modin/core/execution/ray/common/engine_wrapper.py | {
"start": 8800,
"end": 9953
} | class ____:
"""The Hook is called during the materialization and allows performing pre/post computations."""
def pre_materialize(self):
"""
Get an object reference to be materialized or a pre-computed value.
Returns
-------
ray.ObjectRef or object
"""
raise NotImplementedError()
def post_materialize(self, materialized):
"""
Perform computations on the materialized object.
Parameters
----------
materialized : object
The materialized object to be post-computed.
Returns
-------
object
The post-computed object.
"""
raise NotImplementedError()
def __reduce__(self):
"""
Replace this hook with the materialized object on serialization.
Returns
-------
tuple
"""
data = RayWrapper.materialize(self)
if not isinstance(data, int):
raise NotImplementedError("Only integers are currently supported")
return int, (data,)
ObjectRefTypes = (ray.ObjectRef, MaterializationHook)
| MaterializationHook |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 2854,
"end": 2949
} | class ____(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
| CertificationAudit |
python | huggingface__transformers | src/transformers/models/qwen2_vl/configuration_qwen2_vl.py | {
"start": 11116,
"end": 14835
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2VLModel`]. It is used to instantiate a
Qwen2-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen2-VL-7B-Instruct [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen2VLTextConfig`):
The config object or dictionary of the text backbone.
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen2VLVisionConfig`):
The config object or dictionary of the vision backbone.
image_token_id (`int`, *optional*, defaults to 151655):
The image token index to encode the image prompt.
video_token_id (`int`, *optional*, defaults to 151656):
The video token index to encode the image prompt.
vision_start_token_id (`int`, *optional*, defaults to 151652):
The token index to denote start of vision input.
vision_end_token_id (`int`, *optional*, defaults to 151653):
The token index to denote end of vision input.
```python
>>> from transformers import Qwen2VLForConditionalGeneration, Qwen2VLConfig
>>> # Initializing a Qwen2VL style configuration
>>> configuration = Qwen2VLConfig()
>>> # Initializing a model from the Qwen2-VL-7B style configuration
>>> model = Qwen2VLForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen2_vl"
sub_configs = {"vision_config": Qwen2VLVisionConfig, "text_config": Qwen2VLTextConfig}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=151655,
video_token_id=151656,
vision_start_token_id=151652,
vision_end_token_id=151653,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
# Hub configs are saved as flat dicts so we pop some of kwargs to init `TextConfig`
text_params = inspect.signature(self.sub_configs["text_config"].__init__).parameters.keys()
text_params = list(text_params) + ["rope_scaling", "rope_theta"]
text_config = {key: kwargs.pop(key) for key in text_params if key in kwargs}
text_config["dtype"] = kwargs.get("torch_dtype", kwargs.get("dtype")) # don't pop the dtype
self.text_config = self.sub_configs["text_config"](**text_config)
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
# FIXME: arthur/cyril - tying has to be used from the text config
kwargs["tie_word_embeddings"] = self.text_config.tie_word_embeddings
super().__init__(**kwargs)
__all__ = ["Qwen2VLConfig", "Qwen2VLTextConfig"]
| Qwen2VLConfig |
python | numpy__numpy | numpy/_core/tests/test_umath_complex.py | {
"start": 1205,
"end": 4913
} | class ____:
def test_simple(self):
check = check_complex_value
f = np.exp
check(f, 1, 0, np.exp(1), 0, False)
check(f, 0, 1, np.cos(1), np.sin(1), False)
ref = np.exp(1) * complex(np.cos(1), np.sin(1))
check(f, 1, 1, ref.real, ref.imag, False)
@platform_skip
def test_special_values(self):
# C99: Section G 6.3.1
check = check_complex_value
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
check(f, ncu.PZERO, 0, 1, 0, False)
check(f, ncu.NZERO, 0, 1, 0, False)
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
check(f, 1, np.inf, np.nan, np.nan)
check(f, -1, np.inf, np.nan, np.nan)
check(f, 0, np.inf, np.nan, np.nan)
# cexp(inf + 0i) is inf + 0i
check(f, np.inf, 0, np.inf, 0)
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO)
check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO)
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
check(f, np.inf, 1, np.inf, np.inf)
check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf)
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_inf(None)
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
_check_inf_inf(None)
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
_check_ninf_nan(None)
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
z = f(np.array(complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
_check_inf_nan(None)
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
check(f, np.nan, 1, np.nan, np.nan)
check(f, np.nan, -1, np.nan, np.nan)
check(f, np.nan, np.inf, np.nan, np.nan)
check(f, np.nan, -np.inf, np.nan, np.nan)
# cexp(nan + nani) is nan + nani
check(f, np.nan, np.nan, np.nan, np.nan)
# TODO This can be xfail when the generator functions are got rid of.
@pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
check = check_complex_value
f = np.exp
check(f, np.nan, 0, np.nan, 0)
| TestCexp |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 11915,
"end": 14847
} | class ____(BaseGroup):
"""
Feature: The Python "in" builtin tests for membership
"""
def test_contains(self):
""" "in" builtin works for membership (byte and Unicode) """
name = make_name()
self.f.create_group(name)
self.assertIn(name.encode("utf-8"), self.f)
self.assertIn(name, self.f)
self.assertIn(f"/{name}".encode("utf-8"), self.f)
self.assertIn(f"/{name}", self.f)
self.assertNotIn(b'mongoose', self.f)
self.assertNotIn('mongoose', self.f)
def test_closed(self):
""" "in" on closed File returns False (see also issue 174) """
f = File(self.mktemp(), 'w')
f.create_group('a')
self.assertTrue(b'a' in f)
self.assertTrue('a' in f)
f.close()
self.assertFalse(b'a' in f)
self.assertFalse('a' in f)
def test_empty(self):
""" Empty strings work properly and aren't contained """
self.assertNotIn('', self.f)
self.assertNotIn(b'', self.f)
def test_dot(self):
""" Current group "." is always contained """
self.assertIn(b'.', self.f)
self.assertIn('.', self.f)
def test_root(self):
""" Root group (by itself) is contained """
self.assertIn(b'/', self.f)
self.assertIn('/', self.f)
def test_trailing_slash(self):
""" Trailing slashes are unconditionally ignored """
g = make_name("g")
d = make_name("d")
self.f.create_group(g)
self.f[d] = 42
self.assertIn(f"/{g}/", self.f)
self.assertIn(f"{g}/", self.f)
self.assertIn(f"/{d}/", self.f)
self.assertIn(f"{d}/", self.f)
def test_softlinks(self):
""" Broken softlinks are contained, but their members are not """
name = make_name()
self.f.create_group(name)
self.f[f'/{name}/soft'] = h5py.SoftLink('/mongoose')
self.f[f'/{name}/external'] = h5py.ExternalLink('mongoose.hdf5', '/mongoose')
self.assertIn(f"/{name}/soft", self.f)
self.assertNotIn(f"/{name}/soft/something", self.f)
self.assertIn(f"/{name}/external", self.f)
self.assertNotIn(f"/{name}/external/something", self.f)
def test_oddball_paths(self):
""" Technically legitimate (but odd-looking) paths """
x = make_name('x')
dset = make_name("dset")
self.f.create_group(f"{x}/y/z")
self.f[dset] = 42
self.assertIn('/', self.f)
self.assertIn('//', self.f)
self.assertIn('///', self.f)
self.assertIn('.///', self.f)
self.assertIn('././/', self.f)
grp = self.f[x]
self.assertIn(f'.//{x}/y/z', self.f)
self.assertNotIn(f'.//{x}/y/z', grp)
self.assertIn(f'{x}///', self.f)
self.assertIn(f'./{x}///', self.f)
self.assertIn(f'{dset}///', self.f)
self.assertIn(f'/{dset}//', self.f)
| TestContains |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_truncatedSVD.py | {
"start": 304,
"end": 1960
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(TruncatedSVD)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_classify(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(
dataset="digits", make_sparse=True
)
configuration_space = TruncatedSVD.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = TruncatedSVD(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None
},
)
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = RidgeClassifier()
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)
self.assertAlmostEqual(accuracy, 0.44201578627808136, places=2)
@unittest.skip("Truncated SVD returns np.float64.")
def test_preprocessing_dtype(self):
super(TruncatedSVDComponentTest, self)._test_preprocessing_dtype(
TruncatedSVD, test_sparse=False
)
| TruncatedSVDComponentTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 13133,
"end": 15478
} | class ____(HasExpressionLookup, TypeEngineMixin, Generic[_N]):
"""common mixin for the :class:`.Numeric` and :class:`.Float` types.
.. versionadded:: 2.1
"""
_default_decimal_return_scale = 10
operator_classes = OperatorClass.NUMERIC
if TYPE_CHECKING:
@util.ro_memoized_property
def _type_affinity(self) -> Type[Union[Numeric[_N], Float[_N]]]: ...
def __init__(
self,
*,
precision: Optional[int],
scale: Optional[int],
decimal_return_scale: Optional[int],
asdecimal: bool,
):
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Float: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Float: self.__class__,
Integer: self.__class__,
},
operators.add: {
Numeric: self.__class__,
Float: self.__class__,
Integer: self.__class__,
},
operators.sub: {
Numeric: self.__class__,
Float: self.__class__,
Integer: self.__class__,
},
}
| NumericCommon |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/basic.py | {
"start": 6782,
"end": 7104
} | class ____(DefaultComponent):
type = "dag"
def __init__(self, title=None, subtitle=None, data={}):
super().__init__(title=title, subtitle=subtitle)
self._data = data
def render(self):
datadict = super().render()
datadict["data"] = self._data
return datadict
| DagComponent |
python | conda__conda | conda/cli/conda_argparse.py | {
"start": 8734,
"end": 13424
} | class ____(argparse._SubParsersAction):
"""A custom subparser action to conditionally act as a greedy consumer.
This is a workaround since argparse.REMAINDER does not work as expected,
see https://github.com/python/cpython/issues/61252.
"""
def __call__(self, parser, namespace, values, option_string=None):
super().__call__(parser, namespace, values, option_string)
parser = self._name_parser_map[values[0]]
# if the parser has a greedy=True attribute we want to consume all arguments
# i.e. all unknown args should be passed to the subcommand as is
if getattr(parser, "greedy", False):
try:
unknown = getattr(namespace, argparse._UNRECOGNIZED_ARGS_ATTR)
delattr(namespace, argparse._UNRECOGNIZED_ARGS_ATTR)
except AttributeError:
unknown = ()
# underscore prefixed indicating this is not a normal argparse argument
namespace._args = tuple(unknown)
def _get_subactions(self):
"""Sort actions for subcommands to appear alphabetically in help blurb."""
return sorted(self._choices_actions, key=lambda action: action.dest)
def _exec(executable_args, env_vars):
return (_exec_win if on_win else _exec_unix)(executable_args, env_vars)
def _exec_win(executable_args, env_vars):
p = Popen(executable_args, env=env_vars)
try:
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
sys.exit(p.returncode)
def _exec_unix(executable_args, env_vars):
os.execvpe(executable_args[0], executable_args, env_vars)
def configure_parser_plugins(sub_parsers) -> None:
"""
For each of the provided plugin-based subcommands, we'll create
a new subparser for an improved help printout and calling the
:meth:`~conda.plugins.types.CondaSubcommand.configure_parser`
with the newly created subcommand specific argument parser.
"""
plugin_subcommands = context.plugin_manager.get_subcommands()
for name, plugin_subcommand in plugin_subcommands.items():
# if the name of the plugin-based subcommand overlaps a built-in
# subcommand, we print an error
if name in BUILTIN_COMMANDS:
log.error(
dals(
f"""
The plugin '{name}' is trying to override the built-in command
with the same name, which is not allowed.
Please uninstall the plugin to stop seeing this error message.
"""
)
)
continue
parser = sub_parsers.add_parser(
name,
description=plugin_subcommand.summary,
help=plugin_subcommand.summary,
add_help=False, # defer to subcommand's help processing
)
# case 1: plugin extends the parser
if plugin_subcommand.configure_parser:
plugin_subcommand.configure_parser(parser)
# attempt to add standard help processing, will fail if plugin defines their own
try:
add_parser_help(parser)
except argparse.ArgumentError:
pass
# case 2: plugin has their own parser, see _GreedySubParsersAction
else:
parser.greedy = True
# underscore prefixed indicating this is not a normal argparse argument
parser.set_defaults(_plugin_subcommand=plugin_subcommand)
if context.no_plugins:
return
# Ignore the legacy `conda-env` entrypoints since we already register `env`
# as a subcommand in `generate_parser` above
legacy = set(find_commands()).difference(plugin_subcommands) - {"env"}
for name in legacy:
# if the name of the plugin-based subcommand overlaps a built-in
# subcommand, we print an error
if name in BUILTIN_COMMANDS:
log.error(
dals(
f"""
The (legacy) plugin '{name}' is trying to override the built-in command
with the same name, which is not allowed.
Please uninstall the plugin to stop seeing this error message.
"""
)
)
continue
parser = sub_parsers.add_parser(
name,
description=f"See `conda {name} --help`.",
help=f"See `conda {name} --help`.",
add_help=False, # defer to subcommand's help processing
)
# case 3: legacy plugins are always greedy
parser.greedy = True
parser.set_defaults(_executable=name)
| _GreedySubParsersAction |
python | wandb__wandb | wandb/sdk/artifacts/_generated/input_types.py | {
"start": 7697,
"end": 8273
} | class ____(GQLInput):
team_id: GQLId = Field(alias="teamId")
project_id: GQLId = Field(alias="projectId")
team_project_role: str = Field(alias="teamProjectRole")
client_mutation_id: Optional[str] = Field(alias="clientMutationId", default=None)
UpsertModelInput.model_rebuild()
UpdateArtifactInput.model_rebuild()
LinkArtifactInput.model_rebuild()
AddAliasesInput.model_rebuild()
DeleteAliasesInput.model_rebuild()
CreateArtifactCollectionTagAssignmentsInput.model_rebuild()
DeleteArtifactCollectionTagAssignmentsInput.model_rebuild()
| UpdateProjectTeamMemberInput |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/models.py | {
"start": 5943,
"end": 11148
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncModelsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncModelsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncModelsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncModelsWithStreamingResponse(self)
async def retrieve(
self,
model_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ModelInfo:
"""
Get a specific model.
The Models API response can be used to determine information about a specific
model or resolve a model alias to a model ID.
Args:
model_id: Model identifier or alias.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not model_id:
raise ValueError(f"Expected a non-empty value for `model_id` but received {model_id!r}")
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return await self._get(
f"/v1/models/{model_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelInfo,
)
def list(
self,
*,
after_id: str | Omit = omit,
before_id: str | Omit = omit,
limit: int | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ModelInfo, AsyncPage[ModelInfo]]:
"""
List available models.
The Models API response can be used to determine which models are available for
use in the API. More recently released models are listed first.
Args:
after_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately after this object.
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately before this object.
limit: Number of items to return per page.
Defaults to `20`. Ranges from `1` to `1000`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given({"anthropic-beta": ",".join(str(e) for e in betas) if is_given(betas) else not_given}),
**(extra_headers or {}),
}
return self._get_api_list(
"/v1/models",
page=AsyncPage[ModelInfo],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after_id": after_id,
"before_id": before_id,
"limit": limit,
},
model_list_params.ModelListParams,
),
),
model=ModelInfo,
)
| AsyncModels |
python | huggingface__transformers | src/transformers/models/sam/modeling_sam.py | {
"start": 1688,
"end": 2344
} | class ____(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Segment-Anything model's output
"""
)
| SamVisionEncoderOutput |
python | Textualize__textual | docs/examples/guide/widgets/checker04.py | {
"start": 329,
"end": 3465
} | class ____(ScrollView):
COMPONENT_CLASSES = {
"checkerboard--white-square",
"checkerboard--black-square",
"checkerboard--cursor-square",
}
DEFAULT_CSS = """
CheckerBoard > .checkerboard--white-square {
background: #A5BAC9;
}
CheckerBoard > .checkerboard--black-square {
background: #004578;
}
CheckerBoard > .checkerboard--cursor-square {
background: darkred;
}
"""
cursor_square = var(Offset(0, 0))
def __init__(self, board_size: int) -> None:
super().__init__()
self.board_size = board_size
# Each square is 4 rows and 8 columns
self.virtual_size = Size(board_size * 8, board_size * 4)
def on_mouse_move(self, event: events.MouseMove) -> None:
"""Called when the user moves the mouse over the widget."""
mouse_position = event.offset + self.scroll_offset
self.cursor_square = Offset(mouse_position.x // 8, mouse_position.y // 4)
def watch_cursor_square(
self, previous_square: Offset, cursor_square: Offset
) -> None:
"""Called when the cursor square changes."""
def get_square_region(square_offset: Offset) -> Region:
"""Get region relative to widget from square coordinate."""
x, y = square_offset
region = Region(x * 8, y * 4, 8, 4)
# Move the region into the widgets frame of reference
region = region.translate(-self.scroll_offset)
return region
# Refresh the previous cursor square
self.refresh(get_square_region(previous_square))
# Refresh the new cursor square
self.refresh(get_square_region(cursor_square))
def render_line(self, y: int) -> Strip:
"""Render a line of the widget. y is relative to the top of the widget."""
scroll_x, scroll_y = self.scroll_offset # The current scroll position
y += scroll_y # The line at the top of the widget is now `scroll_y`, not zero!
row_index = y // 4 # four lines per row
white = self.get_component_rich_style("checkerboard--white-square")
black = self.get_component_rich_style("checkerboard--black-square")
cursor = self.get_component_rich_style("checkerboard--cursor-square")
if row_index >= self.board_size:
return Strip.blank(self.size.width)
is_odd = row_index % 2
def get_square_style(column: int, row: int) -> Style:
"""Get the cursor style at the given position on the checkerboard."""
if self.cursor_square == Offset(column, row):
square_style = cursor
else:
square_style = black if (column + is_odd) % 2 else white
return square_style
segments = [
Segment(" " * 8, get_square_style(column, row_index))
for column in range(self.board_size)
]
strip = Strip(segments, self.board_size * 8)
# Crop the strip so that is covers the visible area
strip = strip.crop(scroll_x, scroll_x + self.size.width)
return strip
| CheckerBoard |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/config_builder.py | {
"start": 92,
"end": 514
} | class ____:
def __init__(self):
self._config = {"enable_experimental_streams": True}
def with_start_date(self, start_date: str):
self._config["start_date"] = start_date
return self
def with_auth(self, credentials: Mapping[str, str]):
self._config["credentials"] = credentials
return self
def build(self) -> Mapping[str, Any]:
return self._config
| ConfigBuilder |
python | altair-viz__altair | tools/vega_expr.py | {
"start": 6308,
"end": 7014
} | class ____({base}, metaclass={metaclass}):
"""{doc}\n{links}"""
@override
def __new__(cls: type[{base}], expr: str) -> {base}: {type_ignore}
return {base}(expr=expr)
'''
METHOD_SIGNATURE = (
"""def {title}(cls{sep}{param_list}{marker}) -> {return_ann}:{type_ignore}"""
)
METHOD_TEMPLATE = '''\
{decorator}
{signature}
"""
{doc}
"""
return {return_wrapper}({name}, {body_params})
'''
def _override_predicate(obj: Any, /) -> bool:
return callable(obj) and not (name := obj.__name__).startswith("_") # noqa: F841
_SCHEMA_BASE_MEMBERS: frozenset[str] = frozenset(
nm for nm, _ in getmembers(_SchemaBase, _override_predicate)
)
| expr |
python | cython__cython | Cython/Compiler/Tests/TestBuiltin.py | {
"start": 1558,
"end": 3453
} | class ____(unittest.TestCase):
def test_python_builtin_compatibility(self):
expected_builtins = set(KNOWN_PYTHON_BUILTINS)
if sys.platform != 'win32':
expected_builtins.discard("WindowsError")
runtime_builtins = frozenset(
name for name in dir(builtins)
if name not in ('__doc__', '__loader__', '__name__', '__package__', '__spec__'))
if sys.version_info < KNOWN_PYTHON_BUILTINS_VERSION:
missing_builtins = expected_builtins - runtime_builtins
if missing_builtins:
self.skipTest(f'skipping test, older Python release found. Missing builtins: {", ".join(sorted(missing_builtins))}')
self.skipTest('skipping test, older Python release found.')
self.assertSetEqual(runtime_builtins, expected_builtins)
def test_unsafe_compile_time_methods(self):
"""Validate the table of builtin methods that are not safe for compile time evaluation
against the table of known builtin methods (and their types).
"""
for builtin_type_name, unsafe_methods in unsafe_compile_time_methods.items():
self.assertIsInstance(unsafe_methods, set)
builtin_type = getattr(builtins, builtin_type_name) # All named types must exist as builtin types.
known_methods = sorted(
inferred_method_return_types[builtin_type_name]) # All types are also in "inferred_method_return_types".
self.assertFalse(unsafe_methods.difference(known_methods)) # Only known methods are listed.
for method_name in known_methods:
builtin_method = getattr(builtin_type, method_name, None)
if builtin_method is None:
self.assertIn(method_name, unsafe_methods) # Non-portable methods are always unsafe.
continue
| TestBuiltinCompatibility |
python | python-visualization__folium | folium/vector_layers.py | {
"start": 11722,
"end": 13194
} | class ____(Marker):
"""
A circle of a fixed size with radius specified in pixels.
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
location: tuple[float, float]
Latitude and Longitude pair (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
radius: float, default 10
Radius of the circle marker, in pixels.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference.html#circlemarker
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.circleMarker(
{{ this.location|tojson }},
{{ this.options|tojson }}
).addTo({{ this._parent.get_name() }});
{% endmacro %}
"""
)
def __init__(
self,
location: Optional[Sequence[float]] = None,
radius: float = 10,
popup: Union[Popup, str, None] = None,
tooltip: Union[Tooltip, str, None] = None,
**kwargs: TypePathOptions,
):
super().__init__(location, popup=popup, tooltip=tooltip)
self._name = "CircleMarker"
self.options = path_options(line=False, radius=radius, **kwargs)
| CircleMarker |
python | pytorch__pytorch | torch/distributed/_tools/fsdp2_mem_tracker.py | {
"start": 2360,
"end": 2454
} | class ____(NamedTuple):
pre_backward: Callable
post_backward: Callable
| _SavedFSDPMethods |
python | prabhupant__python-ds | data_structures/bst/dfs_iterative.py | {
"start": 0,
"end": 1502
} | class ____():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def inorder(root):
if not root:
return None
stack = []
# Keep adding left until there is none
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.right
def postorder(root):
if not root:
return None
stack = []
curr = root
while curr or stack:
if curr:
stack.append(curr)
curr = curr.left
else:
temp = stack[-1].right
if not temp:
temp = stack.pop()
print(temp.val, end=" ")
while stack and temp == stack[-1].right:
temp = stack.pop()
print(temp.val, end=" ")
else:
curr = temp
def preorder(root):
if not root:
return None
stack = [root]
while stack:
root = stack.pop()
print(root.val, end=" ")
if root.right:
stack.append(root.right)
if root.left:
stack.append(root.left)
root = Node(5)
root.left = Node(2)
root.right = Node(7)
root.left.left = Node(1)
root.left.right = Node(3)
root.right.right = Node(8)
root.right.left = Node(6)
#inorder(root)
#preorder(root)
postorder(root)
| Node |
python | keon__algorithms | algorithms/compression/huffman_coding.py | {
"start": 6280,
"end": 10199
} | class ____:
def __init__(self):
pass
@staticmethod
def decode_file(file_in_name, file_out_name):
with open(file_in_name, "rb") as file_in, open(file_out_name, "wb") as file_out:
reader = HuffmanReader(file_in)
additional_bits = reader.get_number_of_additional_bits_in_the_last_byte()
tree = reader.load_tree()
HuffmanCoding._decode_and_write_signs_to_file(file_out, reader, tree, additional_bits)
print("File decoded.")
@staticmethod
def _decode_and_write_signs_to_file(file, reader: HuffmanReader, tree: Node, additional_bits: int):
tree_finder = TreeFinder(tree)
is_end_of_file = False
while not is_end_of_file:
bit = reader.get_bit()
if bit != -1:
while not tree_finder.find(bit): # read whole code
bit = reader.get_bit(0)
file.write(bytes([tree_finder.found]))
else: # There is last byte in buffer to parse
is_end_of_file = True
last_byte = reader.buffer
last_byte = last_byte[:-additional_bits] # remove additional "0" used to fill byte
for bit in last_byte:
if tree_finder.find(bit):
file.write(bytes([tree_finder.found]))
@staticmethod
def encode_file(file_in_name, file_out_name):
with open(file_in_name, "rb") as file_in, open(file_out_name, mode="wb+") as file_out:
signs_frequency = HuffmanCoding._get_char_frequency(file_in)
file_in.seek(0)
tree = HuffmanCoding._create_tree(signs_frequency)
codes = HuffmanCoding._generate_codes(tree)
writer = HuffmanWriter(file_out)
writer.write_bits("000") # leave space to save how many bits will be appended to fill the last byte
writer.save_tree(tree)
HuffmanCoding._encode_and_write_signs_to_file(file_in, writer, codes)
writer.close()
print("File encoded.")
@staticmethod
def _encode_and_write_signs_to_file(file, writer: HuffmanWriter, codes: dict):
sign = file.read(1)
while sign:
int_char = int.from_bytes(sign, "big")
writer.write_bits(codes[int_char])
sign = file.read(1)
@staticmethod
def _get_char_frequency(file) -> dict:
is_end_of_file = False
signs_frequency = defaultdict(lambda: 0)
while not is_end_of_file:
prev_pos = file.tell()
sign = file.read(1)
curr_pos = file.tell()
if prev_pos == curr_pos:
is_end_of_file = True
else:
signs_frequency[int.from_bytes(sign, "big")] += 1
return signs_frequency
@staticmethod
def _generate_codes(tree: Node) -> dict:
codes = dict()
HuffmanCoding._go_through_tree_and_create_codes(tree, "", codes)
return codes
@staticmethod
def _create_tree(signs_frequency: dict) -> Node:
nodes = [Node(frequency=frequency, sign=char_int) for char_int, frequency in signs_frequency.items()]
heapq.heapify(nodes)
while len(nodes) > 1:
left = heapq.heappop(nodes)
right = heapq.heappop(nodes)
new_node = Node(frequency=left.frequency + right.frequency, left=left, right=right)
heapq.heappush(nodes, new_node)
return nodes[0] # root
@staticmethod
def _go_through_tree_and_create_codes(tree: Node, code: str, dict_codes: dict):
if tree.sign is not None:
dict_codes[tree.sign] = code
if tree.left:
HuffmanCoding._go_through_tree_and_create_codes(tree.left, code + "0", dict_codes)
if tree.right:
HuffmanCoding._go_through_tree_and_create_codes(tree.right, code + "1", dict_codes)
| HuffmanCoding |
python | ray-project__ray | python/ray/train/v2/_internal/callbacks/accelerators.py | {
"start": 779,
"end": 5548
} | class ____(WorkerGroupCallback):
"""Perform accelerator setup for workers.
For example, this callback can be used to share CUDA_VISIBLE_DEVICES
among workers on the same node.
"""
def __init__(self, backend_config: BackendConfig, scaling_config: ScalingConfig):
self._backend = backend_config.backend_cls()
self._scaling_config = scaling_config
def before_init_train_context(
self, workers: List["Worker"]
) -> Dict[str, List[Any]]:
self._maybe_share_cuda_visible_devices(workers)
# TODO: Add support for sharing other accelerator resources.
return {}
def _maybe_share_cuda_visible_devices(self, workers: List["Worker"]):
"""Set CUDA visible devices environment variables on workers."""
share_cuda_visible_devices_enabled = env_bool(
ENABLE_SHARE_CUDA_VISIBLE_DEVICES_ENV,
self._backend.share_cuda_visible_devices,
)
if (
self._scaling_config._resources_per_worker_not_none.get("GPU", 0) > 0
and share_cuda_visible_devices_enabled
):
_share_cuda_visible_devices(workers)
def _share_cuda_visible_devices(workers: List["Worker"]):
"""Sets CUDA_VISIBLE_DEVICES on all workers.
For each worker, CUDA_VISIBLE_DEVICES will be set to the GPU IDs
visible to all workers on that worker's node.
This allows GPU workers on the same node to communicate with one
another.
Example:
Setup:
- Node1:
- Worker1: {0, 1}
- Worker2: {2, 3}
- Node2:
- Worker3: {0, 1}
CUDA_VISIBLE_DEVICES:
- Worker1: "0,1,2,3"
- Worker2: "0,1,2,3"
- Worker3: "0,1"
Args:
workers: List of worker objects.
"""
_share_accelerator_ids(workers, ray_constants.GPU, CUDA_VISIBLE_DEVICES_ENV_VAR)
def _share_accelerator_ids(
workers: List["Worker"], accelerator_name: str, env_var: str
):
"""Sets the given env_var on all workers.
For each worker, the cores/devices are visible to all the
workers on that worker's node. This allows workers on the
same node to communicate with one another.
Example:
Setup:
- Node1:
- Worker1: {0, 1}
- Worker2: {2, 3}
- Node2:
- Worker3: {0, 1}
NEURON_RT_VISIBLE_CORES/TPU_VISIBLE_CHIPS/...:
- Worker1: "0,1,2,3"
- Worker2: "0,1,2,3"
- Worker3: "0,1"
Args:
workers: List of worker objects.
accelerator_name: The name of the accelerator.
env_var: The name of the environment variable to set.
"""
worker_metadatas = [worker.metadata for worker in workers]
visible_accelerator_ids_per_worker = _get_visible_accelerator_ids_per_worker(
worker_metadatas=worker_metadatas, accelerator_name=accelerator_name
)
def set_accelerator_ids(accelerator_ids):
os.environ[env_var] = accelerator_ids
futures = []
for rank, visible_accelerator_ids in enumerate(visible_accelerator_ids_per_worker):
futures.append(
workers[rank].execute_async(
set_accelerator_ids, accelerator_ids=visible_accelerator_ids
)
)
ray_get_safe(futures)
def _get_visible_accelerator_ids_per_worker(
worker_metadatas: List[ActorMetadata], accelerator_name: str
) -> List[str]:
"""Returns a list of comma-separated accelerator IDs visible to each worker.
All workers on a node should have the same set of visible accelerators,
which is the union of accelerator ids of the workers.
Returns:
visible_accelerator_ids_per_worker: A list of comma-separated accelerator ID
strings. This list is the same length as the number of workers.
"""
for metadata in worker_metadatas:
if accelerator_name not in metadata.accelerator_ids:
raise ValueError(
f"Accelerator '{accelerator_name}' is not available on all workers. "
f"Got these available accelerators instead: {metadata.accelerator_ids}"
)
node_id_to_accelerator_ids = defaultdict(set)
for metadata in worker_metadatas:
node_id_to_accelerator_ids[metadata.node_id].update(
metadata.accelerator_ids[accelerator_name]
)
visible_accelerator_ids_per_worker = []
for worker_id in range(len(worker_metadatas)):
node_id = worker_metadatas[worker_id].node_id
accelerator_ids = sorted(node_id_to_accelerator_ids[node_id])
all_resource_ids = ",".join([str(id) for id in accelerator_ids])
visible_accelerator_ids_per_worker.append(all_resource_ids)
return visible_accelerator_ids_per_worker
| AcceleratorSetupCallback |
python | allegroai__clearml | clearml/backend_api/services/v2_23/workers.py | {
"start": 50545,
"end": 53854
} | class ____(Request):
"""
Returns information on all registered workers.
:param last_seen: Filter out workers not active for more than last_seen
seconds. A value or 0 or 'none' will disable the filter.
:type last_seen: int
:param tags: The list of allowed worker tags. Prepend tag value with '-' in
order to exclude
:type tags: Sequence[str]
:param system_tags: The list of allowed worker system tags. Prepend tag value
with '-' in order to exclude
:type system_tags: Sequence[str]
"""
_service = "workers"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"last_seen": {
"default": 3600,
"description": "Filter out workers not active for more than last_seen seconds.\n A value or 0 or 'none' will disable the filter.",
"type": ["integer", "null"],
},
"system_tags": {
"description": "The list of allowed worker system tags. Prepend tag value with '-' in order to exclude",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of allowed worker tags. Prepend tag value with '-' in order to exclude",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
last_seen: Optional[int] = 3600,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(GetAllRequest, self).__init__(**kwargs)
self.last_seen = last_seen
self.tags = tags
self.system_tags = system_tags
@schema_property("last_seen")
def last_seen(self) -> Optional[int]:
return self._property_last_seen
@last_seen.setter
def last_seen(self, value: Optional[int]) -> None:
if value is None:
self._property_last_seen = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "last_seen", six.integer_types)
self._property_last_seen = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
| GetAllRequest |
python | keon__algorithms | tests/test_strings.py | {
"start": 9005,
"end": 9564
} | class ____(unittest.TestCase):
"""[summary]
Test for the file one_edit_distance.py
Arguments:
unittest {[type]} -- [description]
"""
def test_is_one_edit(self):
self.assertTrue(is_one_edit("abc", "abd"))
self.assertFalse(is_one_edit("abc", "aed"))
self.assertFalse(is_one_edit("abcd", "abcd"))
def test_is_one_edit2(self):
self.assertTrue(is_one_edit2("abc", "abd"))
self.assertFalse(is_one_edit2("abc", "aed"))
self.assertFalse(is_one_edit2("abcd", "abcd"))
| TestOneEditDistance |
python | apache__airflow | airflow-core/tests/unit/core/test_settings.py | {
"start": 7823,
"end": 8588
} | class ____:
@staticmethod
@patch("airflow.settings.conf")
@patch("airflow.settings.is_sqlalchemy_v1")
def test_encoding_present_in_v1(is_v1, mock_conf):
from airflow import settings
is_v1.return_value = True
mock_conf.getjson.return_value = {}
engine_args = settings.prepare_engine_args()
assert "encoding" in engine_args
@staticmethod
@patch("airflow.settings.conf")
@patch("airflow.settings.is_sqlalchemy_v1")
def test_encoding_absent_in_v2(is_v1, mock_conf):
from airflow import settings
is_v1.return_value = False
mock_conf.getjson.return_value = {}
engine_args = settings.prepare_engine_args()
assert "encoding" not in engine_args
| TestEngineArgs |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-ragie/unit_tests/test_ragie_writer.py | {
"start": 480,
"end": 4784
} | class ____(unittest.TestCase):
def setUp(self):
self.mock_client = Mock()
self.mock_config = Mock()
# Mock config values
self.mock_config.metadata_static_dict = {"source": "airbyte"}
self.mock_config.content_fields = ["message"]
self.mock_config.document_name_field = "doc.name"
self.mock_config.metadata_fields = ["meta.author", "meta.tags"]
self.mock_config.external_id_field = "external_id"
self.mock_config.processing_mode = "fast"
self.mock_config.partition = "test-partition"
# Create a stream with proper AirbyteStream object
stream = ConfiguredAirbyteStream(
stream=AirbyteStream(
name="my_stream", namespace="default", json_schema={"type": "object"}, supported_sync_modes=[SyncMode.incremental]
),
destination_sync_mode=DestinationSyncMode.append_dedup,
sync_mode=SyncMode.incremental,
)
self.mock_catalog = ConfiguredAirbyteCatalog(streams=[stream])
# Setup writer
self.writer = RagieWriter(client=self.mock_client, config=self.mock_config, catalog=self.mock_catalog)
# Initialize write_buffer if not set in __init__
if not hasattr(self.writer, "write_buffer"):
self.writer.write_buffer = []
def _make_record(self, message="hello", author="john", tags=None):
if tags is None:
tags = ["tag1", "tag2"]
return AirbyteRecordMessage(
stream="my_stream",
namespace="default",
emitted_at=int(datetime.datetime.now().timestamp() * 1000),
data={"message": message, "doc": {"name": "my_doc"}, "meta": {"author": author, "tags": tags}},
)
def test_get_value_from_path(self):
data = {"a": {"b": {"c": 123}}}
result = self.writer._get_value_from_path(data, "a.b.c")
self.assertEqual(result, 123)
def test_calculate_content_hash_consistency(self):
content = {"msg": "abc"}
metadata = {"meta": "xyz"}
hash1 = self.writer._calculate_content_hash(content, metadata)
hash2 = self.writer._calculate_content_hash(content, metadata)
self.assertEqual(hash1, hash2)
def test_stream_tuple_to_id(self):
result = self.writer._stream_tuple_to_id("namespace", "name")
self.assertEqual(result, "namespace_name")
def test_queue_write_operation_skips_duplicates(self):
record = self._make_record()
hash_val = self.writer._calculate_content_hash(
{"message": "hello"},
{
"source": "airbyte",
"meta_author": "john",
"meta_tags": ["tag1", "tag2"],
"airbyte_stream": "default_my_stream",
"airbyte_content_hash": "dummy",
},
)
self.writer.seen_hashes["default_my_stream"] = {hash_val}
self.mock_client.find_docs_by_metadata.return_value = [{"metadata": {"airbyte_content_hash": hash_val}}]
self.writer.queue_write_operation(record)
self.assertEqual(len(self.writer.write_buffer), 0)
def test_preload_hashes_if_needed_loads_hashes(self):
self.mock_client.find_docs_by_metadata.return_value = [
{"metadata": {"airbyte_content_hash": "abc"}},
{"metadata": {"airbyte_content_hash": "def"}},
]
self.writer._preload_hashes_if_needed("my_stream_id")
self.assertIn("abc", self.writer.seen_hashes["my_stream_id"])
self.assertIn("def", self.writer.seen_hashes["my_stream_id"])
def test_delete_streams_to_overwrite_calls_delete(self):
stream = ConfiguredAirbyteStream(
stream=AirbyteStream(name="test", json_schema={"type": "object"}, supported_sync_modes=[SyncMode.full_refresh]),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
)
self.writer.streams = {"default_overwrite_stream": stream}
self.mock_client.find_ids_by_metadata.return_value = ["id1", "id2"]
self.writer.delete_streams_to_overwrite()
self.assertCountEqual(self.mock_client.delete_documents_by_id.call_args[0][0], ["id1", "id2"])
if __name__ == "__main__":
unittest.main()
| TestRagieWriter |
python | tensorflow__tensorflow | tensorflow/python/training/server_lib_sparse_job_test.py | {
"start": 1008,
"end": 1581
} | class ____(test.TestCase):
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testSparseJob(self):
server = server_lib.Server({"local": {37: "localhost:0"}})
with ops.device("/job:local/task:37"):
a = constant_op.constant(1.0)
with session.Session(server.target) as sess:
self.assertEqual(1.0, self.evaluate(a))
if __name__ == "__main__":
test.main()
| SparseJobTest |
python | huggingface__transformers | src/transformers/models/layoutlmv2/configuration_layoutlmv2.py | {
"start": 902,
"end": 11124
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LayoutLMv2Model`]. It is used to instantiate an
LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LayoutLMv2
[microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LayoutLMv2Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv2Model`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
max_rel_pos (`int`, *optional*, defaults to 128):
The maximum number of relative positions to be used in the self-attention mechanism.
rel_pos_bins (`int`, *optional*, defaults to 32):
The number of relative position bins to be used in the self-attention mechanism.
fast_qkv (`bool`, *optional*, defaults to `True`):
Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.
max_rel_2d_pos (`int`, *optional*, defaults to 256):
The maximum number of relative 2D positions in the self-attention mechanism.
rel_2d_pos_bins (`int`, *optional*, defaults to 64):
The number of 2D relative position bins in the self-attention mechanism.
convert_sync_batchnorm (`bool`, *optional*, defaults to `True`):
Whether or not to convert batch normalization layers to synchronized batch normalization layers.
image_feature_pool_shape (`list[int]`, *optional*, defaults to `[7, 7, 256]`):
The shape of the average-pooled feature map.
coordinate_size (`int`, *optional*, defaults to 128):
Dimension of the coordinate embeddings.
shape_size (`int`, *optional*, defaults to 128):
Dimension of the width and height embeddings.
has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a relative attention bias in the self-attention mechanism.
has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a spatial attention bias in the self-attention mechanism.
has_visual_segment_embedding (`bool`, *optional*, defaults to `False`):
Whether or not to add visual segment embeddings.
detectron2_config_args (`dict`, *optional*):
Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to [this
file](https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py)
for details regarding default values.
Example:
```python
>>> from transformers import LayoutLMv2Config, LayoutLMv2Model
>>> # Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration
>>> configuration = LayoutLMv2Config()
>>> # Initializing a model (with random weights) from the microsoft/layoutlmv2-base-uncased style configuration
>>> model = LayoutLMv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "layoutlmv2"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
max_2d_position_embeddings=1024,
max_rel_pos=128,
rel_pos_bins=32,
fast_qkv=True,
max_rel_2d_pos=256,
rel_2d_pos_bins=64,
convert_sync_batchnorm=True,
image_feature_pool_shape=[7, 7, 256],
coordinate_size=128,
shape_size=128,
has_relative_attention_bias=True,
has_spatial_attention_bias=True,
has_visual_segment_embedding=False,
detectron2_config_args=None,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
**kwargs,
)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fast_qkv = fast_qkv
self.max_rel_2d_pos = max_rel_2d_pos
self.rel_2d_pos_bins = rel_2d_pos_bins
self.convert_sync_batchnorm = convert_sync_batchnorm
self.image_feature_pool_shape = image_feature_pool_shape
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.has_relative_attention_bias = has_relative_attention_bias
self.has_spatial_attention_bias = has_spatial_attention_bias
self.has_visual_segment_embedding = has_visual_segment_embedding
self.detectron2_config_args = (
detectron2_config_args if detectron2_config_args is not None else self.get_default_detectron2_config()
)
@classmethod
def get_default_detectron2_config(cls):
return {
"MODEL.MASK_ON": True,
"MODEL.PIXEL_STD": [57.375, 57.120, 58.395],
"MODEL.BACKBONE.NAME": "build_resnet_fpn_backbone",
"MODEL.FPN.IN_FEATURES": ["res2", "res3", "res4", "res5"],
"MODEL.ANCHOR_GENERATOR.SIZES": [[32], [64], [128], [256], [512]],
"MODEL.RPN.IN_FEATURES": ["p2", "p3", "p4", "p5", "p6"],
"MODEL.RPN.PRE_NMS_TOPK_TRAIN": 2000,
"MODEL.RPN.PRE_NMS_TOPK_TEST": 1000,
"MODEL.RPN.POST_NMS_TOPK_TRAIN": 1000,
"MODEL.POST_NMS_TOPK_TEST": 1000,
"MODEL.ROI_HEADS.NAME": "StandardROIHeads",
"MODEL.ROI_HEADS.NUM_CLASSES": 5,
"MODEL.ROI_HEADS.IN_FEATURES": ["p2", "p3", "p4", "p5"],
"MODEL.ROI_BOX_HEAD.NAME": "FastRCNNConvFCHead",
"MODEL.ROI_BOX_HEAD.NUM_FC": 2,
"MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION": 14,
"MODEL.ROI_MASK_HEAD.NAME": "MaskRCNNConvUpsampleHead",
"MODEL.ROI_MASK_HEAD.NUM_CONV": 4,
"MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION": 7,
"MODEL.RESNETS.DEPTH": 101,
"MODEL.RESNETS.SIZES": [[32], [64], [128], [256], [512]],
"MODEL.RESNETS.ASPECT_RATIOS": [[0.5, 1.0, 2.0]],
"MODEL.RESNETS.OUT_FEATURES": ["res2", "res3", "res4", "res5"],
"MODEL.RESNETS.NUM_GROUPS": 32,
"MODEL.RESNETS.WIDTH_PER_GROUP": 8,
"MODEL.RESNETS.STRIDE_IN_1X1": False,
}
def get_detectron2_config(self):
detectron2_config = detectron2.config.get_cfg()
for k, v in self.detectron2_config_args.items():
attributes = k.split(".")
to_set = detectron2_config
for attribute in attributes[:-1]:
to_set = getattr(to_set, attribute)
setattr(to_set, attributes[-1], v)
return detectron2_config
__all__ = ["LayoutLMv2Config"]
| LayoutLMv2Config |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N815.py | {
"start": 542,
"end": 648
} | class ____(D):
lower: int
CONSTANT: str
mixedCase: bool
_mixedCase: list
mixed_Case: set
| E |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels09.py | {
"start": 315,
"end": 1636
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [45740416, 45705856]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"data_labels": {"value": 1, "position": "above"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
"data_labels": {"value": 1, "position": "below"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/migrations/0967_large_tables_legacy_json_field.py | {
"start": 188,
"end": 3939
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0966_groupopenperiod_data_pending_inc_detector_id_index"),
]
operations = [
migrations.AlterField(
model_name="controlfile",
name="headers",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="externalissue",
name="metadata",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict, null=True),
),
migrations.AlterField(
model_name="featureadoption",
name="data",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="file",
name="headers",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="grouphashmetadata",
name="hashing_metadata",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(null=True),
),
migrations.AlterField(
model_name="groupinbox",
name="reason_details",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict, null=True),
),
migrations.AlterField(
model_name="organizationonboardingtask",
name="data",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="projectdebugfile",
name="data",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict, null=True),
),
migrations.AlterField(
model_name="projectkey",
name="data",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="promptsactivity",
name="data",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="pullrequestcomment",
name="reactions",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(null=True),
),
migrations.AlterField(
model_name="release",
name="data",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
migrations.AlterField(
model_name="repository",
name="config",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict),
),
]
| Migration |
python | django-import-export__django-import-export | import_export/tmp_storages.py | {
"start": 1189,
"end": 1710
} | class ____(BaseStorage):
"""
By default memcache maximum size per key is 1MB, be careful with large files.
"""
CACHE_LIFETIME = 86400
CACHE_PREFIX = "django-import-export-"
def save(self, data):
if not self.name:
self.name = uuid4().hex
cache.set(self.CACHE_PREFIX + self.name, data, self.CACHE_LIFETIME)
def read(self):
return cache.get(self.CACHE_PREFIX + self.name)
def remove(self):
cache.delete(self.CACHE_PREFIX + self.name)
| CacheStorage |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/__init__.py | {
"start": 12285,
"end": 21236
} | class ____:
"""Parser for the consolidated sanity test ignore file."""
NO_CODE = '_'
def __init__(self, args: SanityConfig) -> None:
if data_context().content.collection:
ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2])
ansible_label = 'Ansible %s' % ansible_version
file_name = 'ignore-%s.txt' % ansible_version
else:
ansible_label = 'Ansible'
file_name = 'ignore.txt'
self.args = args
self.relative_path = os.path.join(data_context().content.sanity_path, file_name)
self.path = os.path.join(data_context().content.root, self.relative_path)
self.ignores: dict[str, dict[str, dict[str, int]]] = collections.defaultdict(lambda: collections.defaultdict(dict))
self.skips: dict[str, dict[str, int]] = collections.defaultdict(lambda: collections.defaultdict(int))
self.parse_errors: list[tuple[int, int, str]] = []
self.file_not_found_errors: list[tuple[int, str]] = []
lines = read_lines_without_comments(self.path, optional=True)
targets = SanityTargets.get_targets()
paths = set(target.path for target in targets)
tests_by_name: dict[str, SanityTest] = {}
versioned_test_names: set[str] = set()
unversioned_test_names: dict[str, str] = {}
directories = paths_to_dirs(list(paths))
paths_by_test: dict[str, set[str]] = {}
display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1)
for test in sanity_get_tests():
test_targets = SanityTargets.filter_and_inject_targets(test, targets)
if isinstance(test, SanityMultipleVersion):
versioned_test_names.add(test.name)
for python_version in test.supported_python_versions:
test_name = '%s-%s' % (test.name, python_version)
paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, python_version))
tests_by_name[test_name] = test
else:
unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS))
paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, ''))
tests_by_name[test.name] = test
for line_no, line in enumerate(lines, start=1):
if not line:
self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment"))
continue
parts = line.split(' ')
path = parts[0]
codes = parts[1:]
if not path:
self.parse_errors.append((line_no, 1, "Line cannot start with a space"))
continue
if path.endswith(os.path.sep):
if path not in directories:
self.file_not_found_errors.append((line_no, path))
continue
else:
if path not in paths:
self.file_not_found_errors.append((line_no, path))
continue
if not codes:
self.parse_errors.append((line_no, len(path), "Error code required after path"))
continue
code = codes[0]
if not code:
self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty"))
continue
if len(codes) > 1:
self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces"))
continue
parts = code.split('!')
code = parts[0]
commands = parts[1:]
parts = code.split(':')
test_name = parts[0]
error_codes = parts[1:]
test = tests_by_name.get(test_name)
if not test:
unversioned_name = unversioned_test_names.get(test_name)
if unversioned_name:
self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % (
unversioned_name, test_name)))
elif test_name in versioned_test_names:
self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % (
test_name, test_name, args.controller_python.version)))
else:
self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name))
continue
if path.endswith(os.path.sep) and not test.include_directories:
self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name))
continue
if path not in paths_by_test[test_name] and not test.no_targets:
self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path)))
continue
if commands and error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters"))
continue
if commands:
command = commands[0]
if len(commands) > 1:
self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters"))
continue
if command == 'skip':
if not test.can_skip:
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name))
continue
existing_line_no = self.skips.get(test_name, {}).get(path)
if existing_line_no:
self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no)))
continue
self.skips[test_name][path] = line_no
continue
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command))
continue
if not test.can_ignore:
self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name))
continue
if test.error_code:
if not error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name))
continue
error_code = error_codes[0]
if len(error_codes) > 1:
self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters"))
continue
if error_code in test.optional_error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % (
error_code)))
continue
else:
if error_codes:
self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name))
continue
error_code = self.NO_CODE
existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code)
if existing:
if test.error_code:
self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % (
test_name, error_code, path, existing)))
else:
self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % (
test_name, path, existing)))
continue
self.ignores[test_name][path][error_code] = line_no
@staticmethod
def load(args: SanityConfig) -> SanityIgnoreParser:
"""Return the current SanityIgnore instance, initializing it if needed."""
try:
return SanityIgnoreParser.instance # type: ignore[attr-defined]
except AttributeError:
pass
instance = SanityIgnoreParser(args)
SanityIgnoreParser.instance = instance # type: ignore[attr-defined]
return instance
| SanityIgnoreParser |
python | celery__celery | t/unit/worker/test_components.py | {
"start": 343,
"end": 520
} | class ____:
def test_create__eventloop(self):
w = Mock(name='w')
w.use_eventloop = True
Timer(w).create(w)
assert not w.timer.queue
| test_Timer |
python | kamyu104__LeetCode-Solutions | Python/short-encoding-of-words.py | {
"start": 145,
"end": 634
} | class ____(object):
def minimumLengthEncoding(self, words):
"""
:type words: List[str]
:rtype: int
"""
words = list(set(words))
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
nodes = [functools.reduce(dict.__getitem__, word[::-1], trie)
for word in words]
return sum(len(word) + 1
for i, word in enumerate(words)
if len(nodes[i]) == 0)
| Solution |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/utils.py | {
"start": 26267,
"end": 41799
} | class ____(Provider):
def __init__(self) -> None:
try:
from oci.generative_ai_inference import models
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
# Completion
self.oci_completion_request = models.LlamaLlmInferenceRequest
# Generic chat request and message models
self.oci_chat_request = models.GenericChatRequest
self.oci_chat_message = {
"USER": models.UserMessage,
"SYSTEM": models.SystemMessage,
"ASSISTANT": models.AssistantMessage,
"TOOL": models.ToolMessage,
}
# Content models
self.oci_chat_message_text_content = models.TextContent
self.oci_chat_message_image_content = models.ImageContent
self.oci_chat_message_image_url = models.ImageUrl
# Tooling models for Generic format
self.oci_tool = models.FunctionDefinition
self.oci_tool_call = models.FunctionCall
self.chat_api_format = models.BaseChatRequest.API_FORMAT_GENERIC
def completion_response_to_text(self, response: Any) -> str:
return response.data.inference_response.choices[0].text
def completion_stream_to_text(self, event_data: Any) -> str:
return event_data["text"]
def chat_response_to_text(self, response: Any) -> str:
message = response.data.chat_response.choices[0].message
content = message.content[0] if getattr(message, "content", None) else None
return content.text if content else ""
def chat_stream_to_text(self, event_data: Dict) -> str:
content = event_data.get("message", {}).get("content", None)
if not content:
return ""
return content[0]["text"]
def chat_generation_info(self, response: Any) -> Dict[str, Any]:
chat_resp = response.data.chat_response
info: Dict[str, Any] = {
"finish_reason": chat_resp.choices[0].finish_reason,
}
if hasattr(chat_resp, "time_created"):
try:
info["time_created"] = str(chat_resp.time_created)
except Exception:
pass
# Extract tool calls from assistant message (Generic format)
try:
print("Hello")
assistant_message = chat_resp.choices[0].message
tool_calls = getattr(assistant_message, "tool_calls", None)
if tool_calls:
formatted: List[Dict[str, Any]] = []
for tc in tool_calls:
name = getattr(tc, "name", None)
arguments = getattr(tc, "arguments", None) or getattr(
tc, "parameters", None
)
tc_id = getattr(tc, "id", None) or uuid.uuid4().hex[:]
if name is None:
continue
if isinstance(arguments, dict):
input_str = json.dumps(arguments)
else:
input_str = arguments
formatted.append(
{"toolUseId": tc_id, "name": name, "input": input_str}
)
if formatted:
info["tool_calls"] = formatted
except Exception:
pass
return info
def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]:
info: Dict[str, Any] = {
"finish_reason": event_data.get("finishReason"),
}
tc_list = (
event_data.get("functionCalls")
or event_data.get("toolCalls")
or event_data.get("tool_calls")
)
if tc_list:
formatted: List[Dict[str, Any]] = []
for tc in tc_list:
if isinstance(tc, dict):
name = tc.get("name")
arguments = tc.get("arguments") or tc.get("parameters")
tc_id = tc.get("id") or uuid.uuid4().hex[:]
else:
name = getattr(tc, "name", None)
arguments = getattr(tc, "arguments", None) or getattr(
tc, "parameters", None
)
tc_id = getattr(tc, "id", None) or uuid.uuid4().hex[:]
if name is None:
continue
if isinstance(arguments, dict):
input_str = json.dumps(arguments)
else:
input_str = arguments
formatted.append({"toolUseId": tc_id, "name": name, "input": input_str})
if formatted:
info["tool_calls"] = formatted
return {k: v for k, v in info.items() if v is not None}
def messages_to_oci_params(self, messages: Sequence[ChatMessage]) -> Dict[str, Any]:
role_map = {
"user": "USER",
"system": "SYSTEM",
"chatbot": "ASSISTANT",
"assistant": "ASSISTANT",
"tool": "TOOL",
"function": "TOOL",
}
oci_messages: List[Any] = []
for msg in messages:
content_blocks = getattr(msg, "blocks", None) or msg.content or ""
role_key = msg.role if isinstance(msg.role, str) else msg.role.value
role = role_map[role_key]
if role == "TOOL":
tool_call_id = msg.additional_kwargs.get("tool_call_id")
if tool_call_id is None:
continue
contents = self._process_message_content(content_blocks)
oci_messages.append(
self.oci_chat_message[role](
role="TOOL", tool_call_id=tool_call_id, content=contents
)
)
continue
if role == "ASSISTANT":
contents = (
self._process_message_content(content_blocks)
if content_blocks
else None
)
tool_calls_li = msg.additional_kwargs.get("tool_calls", [])
oci_tool_calls = None
if tool_calls_li and self.oci_tool_call is not None:
oci_tool_calls = []
for tc in tool_calls_li:
validate_tool_call(tc)
name = tc.get("name")
arguments = tc.get("input")
if not isinstance(arguments, str):
arguments = json.dumps(arguments)
tc_id = tc.get("toolUseId")
try:
if tc_id is not None:
oci_tool_calls.append(
self.oci_tool_call(
name=name, arguments=arguments, id=tc_id
)
)
else:
oci_tool_calls.append(
self.oci_tool_call(name=name, arguments=arguments)
)
except TypeError:
oci_tool_calls.append(
self.oci_tool_call(name=name, arguments=arguments)
)
kwargs: Dict[str, Any] = {}
if contents is not None:
kwargs["content"] = contents
if oci_tool_calls:
kwargs["tool_calls"] = oci_tool_calls
oci_messages.append(self.oci_chat_message[role](**kwargs))
continue
contents = self._process_message_content(content_blocks)
oci_messages.append(self.oci_chat_message[role](content=contents))
return {
"messages": oci_messages,
"api_format": self.chat_api_format,
"top_k": -1,
}
def _process_message_content(
self, content: Union[str, List[Union[TextBlock, ImageBlock]]]
) -> List[Any]:
if isinstance(content, str):
return [self.oci_chat_message_text_content(text=content)]
if not isinstance(content, list):
raise ValueError("Message content must be a string or blocks.")
processed: List[Any] = []
for item in content:
if isinstance(item, TextBlock):
processed.append(self.oci_chat_message_text_content(text=item.text))
elif isinstance(item, ImageBlock):
processed.append(
self.oci_chat_message_image_content(
image_url=self.oci_chat_message_image_url(url=str(item.url))
)
)
else:
raise ValueError(
f"Items in blocks must be TextBlock or ImageBlock, got: {type(item)}"
)
return processed
def convert_to_oci_tool(
self,
tool: Union[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
) -> Dict[str, Any]:
try:
from oci.generative_ai_inference import models
except ImportError as ex:
raise ModuleNotFoundError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
def _fn_properties_from_callable(fn: Callable) -> Dict[str, Any]:
sig = inspect.signature(fn)
properties: Dict[str, Any] = {}
required: List[str] = []
py_to_json = {
str: "string",
int: "integer",
float: "number",
bool: "boolean",
}
for name, param in sig.parameters.items():
ann = param.annotation
json_type = (
py_to_json.get(ann, "string") if isinstance(ann, type) else "string"
)
if param.default is inspect._empty:
required.append(name)
properties[name] = {
"type": json_type,
"description": f"Parameter: {name}",
}
return {"properties": properties, "required": required}
if isinstance(tool, BaseTool):
tool_name = getattr(tool, "name", None) or getattr(
tool.metadata, "name", None
)
tool_desc = getattr(tool, "description", None)
if tool_desc is None and (tool_fn := getattr(tool, "fn", None)) is not None:
tool_desc = tool_fn.__doc__
if tool_name is None:
tool_name = tool_fn.__name__
if tool_desc is None:
tool_desc = getattr(tool.metadata, "description", None)
if not tool_name or not tool_desc:
raise ValueError(f"Tool {tool} does not have a name or description.")
params_dict = tool.metadata.get_parameters_dict() or {}
properties = params_dict.get("properties", {})
required = params_dict.get("required", [])
return models.FunctionDefinition(
type="FUNCTION",
name=tool_name,
description=tool_desc,
parameters={
"type": "object",
"properties": properties,
"required": required,
},
)
if isinstance(tool, dict):
if not all(k in tool for k in ("title", "description", "properties")):
raise ValueError(
"Unsupported dict type. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type."
)
return models.FunctionDefinition(
type="FUNCTION",
name=tool.get("title"),
description=tool.get("description"),
parameters={
"type": "object",
"properties": tool.get("properties", {}),
"required": tool.get("required", []),
},
)
if isinstance(tool, type) and issubclass(tool, BaseModel):
schema = tool.model_json_schema()
return models.FunctionDefinition(
type="FUNCTION",
name=schema.get("title", tool.__name__),
description=schema.get("description", tool.__name__),
parameters={
"type": "object",
"properties": schema.get("properties", {}),
"required": schema.get("required", []),
},
)
if callable(tool):
schema_like = _fn_properties_from_callable(tool)
return models.FunctionDefinition(
type="FUNCTION",
name=tool.__name__,
description=tool.__doc__ or f"Callable function: {tool.__name__}",
parameters={
"type": "object",
"properties": schema_like["properties"],
"required": schema_like["required"],
},
)
raise ValueError(
f"Unsupported tool type {type(tool)}. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type."
)
PROVIDERS = {"cohere": CohereProvider(), "meta": MetaProvider(), "xai": XAIProvider()}
def get_provider(model: str, provider_name: str = None) -> Any:
if provider_name is None:
provider_name = model.split(".")[0].lower()
if provider_name not in PROVIDERS:
raise ValueError(
f"Invalid provider derived from model_id: {model} "
"Please explicitly pass in the supported provider "
"when using custom endpoint"
)
return PROVIDERS[provider_name]
def get_context_size(model: str, context_size: int = None) -> int:
if context_size is None:
try:
return OCIGENAI_LLMS[model]
except KeyError as e:
if model.startswith(CUSTOM_ENDPOINT_PREFIX):
raise ValueError(
f"Invalid context size derived from model_id: {model} "
"Please explicitly pass in the context size "
"when using custom endpoint",
e,
) from e
else:
raise ValueError(
f"Invalid model name {model} "
"Please double check the following OCI documentation if the model is supported "
"https://docs.public.oneportal.content.oci.oraclecloud.com/en-us/iaas/Content/generative-ai/pretrained-models.htm#pretrained-models",
e,
) from e
else:
return context_size
def validate_tool_call(tool_call: Dict[str, Any]):
if (
"input" not in tool_call
or "toolUseId" not in tool_call
or "name" not in tool_call
):
raise ValueError("Invalid tool call.")
def force_single_tool_call(response: ChatResponse) -> None:
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) > 1:
response.message.additional_kwargs["tool_calls"] = [tool_calls[0]]
| XAIProvider |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 26106,
"end": 26723
} | class ____(TestBooleanField):
"""
Valid and invalid values for `BooleanField` when `allow_null=True`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['Must be a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.BooleanField(allow_null=True)
# String types...
| TestNullableBooleanField |
python | allegroai__clearml | clearml/backend_api/session/client/client.py | {
"start": 1174,
"end": 2934
} | class ____(Exception):
"""
Class for representing an API error.
self.data - ``dict`` of all returned JSON data
self.code - HTTP response code
self.subcode - server response subcode
self.codes - (self.code, self.subcode) tuple
self.message - result message sent from server
"""
def __init__(self, response: CallResult, extra_info: Any = None) -> None:
"""
Create a new APIError from a server response
"""
super(APIError, self).__init__()
self._response: CallResult = response
self.extra_info = extra_info
self.data: Dict = response.response_data
self.meta: ResponseMeta = response.meta
self.code: int = response.meta.result_code
self.subcode: int = response.meta.result_subcode
self.message: Text = response.meta.result_msg
self.codes: Tuple[int, int] = (self.code, self.subcode)
def get_traceback(self) -> Optional[List[str]]:
"""
Return server traceback for error, or None if doesn't exist.
"""
try:
return self.meta.error_stack
except AttributeError:
return None
def __str__(self) -> str:
message = "{}: ".format(type(self).__name__)
if self.extra_info:
message += "{}: ".format(self.extra_info)
if not self.meta:
message += "no meta available"
return message
if not self.code:
message += "no error code available"
return message
message += "code {0.code}".format(self)
if self.subcode:
message += "/{.subcode}".format(self)
if self.message:
message += ": {.message}".format(self)
return message
| APIError |
python | jazzband__django-simple-history | simple_history/tests/admin.py | {
"start": 1756,
"end": 2585
} | class ____(SimpleHistoryAdmin):
def get_historical_record_context_helper(self, request, historical_record):
return HistoricalPollWithManyToManyContextHelper(self.model, historical_record)
admin.site.register(Book, SimpleHistoryAdmin)
admin.site.register(Choice, ChoiceAdmin)
admin.site.register(ConcreteExternal, SimpleHistoryAdmin)
admin.site.register(Document, SimpleHistoryAdmin)
admin.site.register(Employee, SimpleHistoryAdmin)
admin.site.register(ExternalModelWithCustomUserIdField, SimpleHistoryAdmin)
admin.site.register(FileModel, FileModelAdmin)
admin.site.register(Paper, SimpleHistoryAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Planet, PlanetAdmin)
admin.site.register(Poll, SimpleHistoryAdmin)
admin.site.register(PollWithManyToMany, PollWithManyToManyAdmin)
| PollWithManyToManyAdmin |
python | pytorch__pytorch | torch/_higher_order_ops/schema.py | {
"start": 791,
"end": 1503
} | class ____:
@staticmethod
def from_example(
example_value: Any,
*,
name: str = "",
default_value: Optional[Any] = None,
is_mutated: bool = False,
kw_only: bool = False,
) -> HopArgumentInfo:
if default_value is not None:
assert type(example_value) is type(default_value), (
f"example_value type {type(example_value)} doesn't match default_value type: {type(default_value)}"
)
return HopArgumentInfo(
name=name,
example_value=example_value,
default_value=default_value,
is_mutated=is_mutated,
kw_only=kw_only,
)
| HopArgumentInfoGen |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_admonitions.py | {
"start": 66,
"end": 5878
} | class ____(util.MdCase):
"""Test Blocks admonitions cases."""
extension = ['pymdownx.blocks.admonition']
extension_configs = {
'pymdownx.blocks.admonition': {
'types': [
'note',
'custom',
{'name': 'custom2'},
{'name': 'custom3', 'class': 'different'},
{'name': 'custom4', 'class': 'different', 'title': 'Default'},
{'name': 'custom5', 'title': 'Default'}
]
}
}
def test_optional_title(self):
"""Test that tab is not processed if title is omitted."""
self.check_markdown(
R'''
/// admonition
Some *content*
///
''',
r'''
<div class="admonition">
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_type_no_title(self):
"""Test test type as title."""
self.check_markdown(
R'''
/// admonition
type: note
attrs: {class: other}
Some *content*
///
''',
r'''
<div class="admonition note other">
<p class="admonition-title">Note</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_type_empty_title(self):
"""Test test empty title."""
self.check_markdown(
R'''
/// admonition |
type: note
attrs: {class: other}
Some *content*
///
''',
r'''
<div class="admonition note other">
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_admonition(self):
"""Test admonition with title."""
self.check_markdown(
R'''
/// admonition | A Title
Some *content*
///
''',
r'''
<div class="admonition">
<p class="admonition-title">A Title</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_note_no_title(self):
"""Test note with no title."""
self.check_markdown(
R'''
/// note
Some *content*
///
''',
r'''
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_note_with_title(self):
"""Test note with no title."""
self.check_markdown(
R'''
/// note | A Title
Some *content*
///
''',
r'''
<div class="admonition note">
<p class="admonition-title">A Title</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_custom(self):
"""Test custom type (one not shipped by default)."""
self.check_markdown(
R'''
/// custom | A Title
Some *content*
///
''',
r'''
<div class="admonition custom">
<p class="admonition-title">A Title</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_custom_title(self):
"""Test custom title."""
self.check_markdown(
R'''
/// custom
Some *content*
///
''',
r'''
<div class="admonition custom">
<p class="admonition-title">Custom</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_custom_dict_title(self):
"""Test custom title with dictionary form."""
self.check_markdown(
R'''
/// custom2
Some *content*
///
''',
r'''
<div class="admonition custom2">
<p class="admonition-title">Custom2</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_custom_explicit_title(self):
"""Test custom with an explicit, default title."""
self.check_markdown(
R'''
/// custom5
Some *content*
///
''',
r'''
<div class="admonition custom5">
<p class="admonition-title">Default</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_custom_with_class(self):
"""Test custom title with configured custom class."""
self.check_markdown(
R'''
/// custom3
Some *content*
///
''',
r'''
<div class="admonition different">
<p class="admonition-title">Different</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
def test_custom_with_class_and_title(self):
"""Test custom title with configured custom class and title."""
self.check_markdown(
R'''
/// custom4
Some *content*
///
''',
r'''
<div class="admonition different">
<p class="admonition-title">Default</p>
<p>Some <em>content</em></p>
</div>
''',
True
)
| TestBlocksAdmonitions |
python | h5py__h5py | examples/swmr_multiprocess.py | {
"start": 1900,
"end": 3865
} | class ____(Process):
def __init__(self, event, fname, dsetname):
super().__init__()
self._event = event
self._fname = fname
self._dsetname = dsetname
def run(self):
self.log = logging.getLogger('writer')
self.log.info("Creating file %s", self._fname)
f = h5py.File(self._fname, 'w', libver='latest')
try:
arr = np.array([1,2,3,4])
dset = f.create_dataset(self._dsetname, chunks=(2,), maxshape=(None,), data=arr)
assert not f.swmr_mode
self.log.info("SWMR mode")
f.swmr_mode = True
assert f.swmr_mode
self.log.debug("Sending initial event")
self._event.set()
# Write loop
for i in range(5):
new_shape = ((i+1) * len(arr), )
self.log.info("Resizing dset shape: %s"%str(new_shape))
dset.resize( new_shape )
self.log.debug("Writing data")
dset[i*len(arr):] = arr
#dset.write_direct( arr, np.s_[:], np.s_[i*len(arr):] )
self.log.debug("Flushing data")
dset.flush()
self.log.info("Sending event")
self._event.set()
finally:
f.close()
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)10s %(asctime)s %(name)10s %(message)s',level=logging.INFO)
fname = 'swmrmp.h5'
dsetname = 'data'
if len(sys.argv) > 1:
fname = sys.argv[1]
if len(sys.argv) > 2:
dsetname = sys.argv[2]
event = Event()
reader = SwmrReader(event, fname, dsetname)
writer = SwmrWriter(event, fname, dsetname)
logging.info("Starting reader")
reader.start()
logging.info("Starting reader")
writer.start()
logging.info("Waiting for writer to finish")
writer.join()
logging.info("Waiting for reader to finish")
reader.join()
| SwmrWriter |
python | pyinstaller__pyinstaller | tests/functional/modules/pyi_testmod_relimp3a/aa/pyi_testmod_relimp3c.py | {
"start": 509,
"end": 547
} | class ____:
string = "... and this"
| c1 |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_latex.py | {
"start": 10593,
"end": 11573
} | class ____:
def test_to_latex_bold_rows(self):
# GH 16707
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(bold_rows=True)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
\textbf{0} & 1 & b1 \\
\textbf{1} & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_no_bold_rows(self):
# GH 16707
df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
result = df.to_latex(bold_rows=False)
expected = _dedent(
r"""
\begin{tabular}{lrl}
\toprule
& a & b \\
\midrule
0 & 1 & b1 \\
1 & 2 & b2 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
| TestToLatexBold |
python | django__django | tests/fixtures/models.py | {
"start": 3990,
"end": 4236
} | class ____(models.Model):
key = models.CharField(max_length=3, unique=True)
obj = models.ForeignKey("CircularB", models.SET_NULL, null=True)
objects = NaturalKeyManager()
def natural_key(self):
return (self.key,)
| CircularA |
python | py-pdf__pypdf | pypdf/generic/_link.py | {
"start": 2771,
"end": 4951
} | class ____:
"""Direct reference link being preserved until we can resolve it correctly."""
def __init__(self, reference: ArrayObject) -> None:
"""reference: an ArrayObject whose first element is the Page indirect object"""
self._reference = reference
def find_referenced_page(self) -> IndirectObject:
return self._reference[0]
def patch_reference(self, target_pdf: "PdfWriter", new_page: IndirectObject) -> None:
"""target_pdf: PdfWriter which the new link went into"""
self._reference[0] = new_page
ReferenceLink = Union[NamedReferenceLink, DirectReferenceLink]
def extract_links(new_page: "PageObject", old_page: "PageObject") -> list[tuple[ReferenceLink, ReferenceLink]]:
"""Extracts links from two pages on the assumption that the two pages are
the same. Produces one list of (new link, old link) tuples.
"""
new_links = [_build_link(link, new_page) for link in new_page.get("/Annots", [])]
old_links = [_build_link(link, old_page) for link in old_page.get("/Annots", [])]
return [
(new_link, old_link) for (new_link, old_link)
in zip(new_links, old_links)
if new_link and old_link
]
def _build_link(indirect_object: IndirectObject, page: "PageObject") -> Optional[ReferenceLink]:
src = cast("PdfReader", page.pdf)
link = cast(DictionaryObject, indirect_object.get_object())
if (not isinstance(link, DictionaryObject)) or link.get("/Subtype") != "/Link":
return None
if "/A" in link:
action = cast(DictionaryObject, link["/A"])
if action.get("/S") != "/GoTo":
return None
if "/D" not in action:
return None
return _create_link(action["/D"], src)
if "/Dest" in link:
return _create_link(link["/Dest"], src)
return None # Nothing to do here
def _create_link(reference: PdfObject, source_pdf: "PdfReader")-> Optional[ReferenceLink]:
if isinstance(reference, TextStringObject):
return NamedReferenceLink(reference, source_pdf)
if isinstance(reference, ArrayObject):
return DirectReferenceLink(reference)
return None
| DirectReferenceLink |
python | getsentry__sentry | tests/sentry/deletions/test_project.py | {
"start": 10640,
"end": 13573
} | class ____(DeleteProjectTest):
def setUp(self) -> None:
self.workflow_engine_project = self.create_project(name="workflow_engine_test")
self.snuba_query = self.create_snuba_query()
self.subscription = QuerySubscription.objects.create(
project=self.workflow_engine_project,
status=QuerySubscription.Status.ACTIVE.value,
subscription_id="123",
snuba_query=self.snuba_query,
)
self.data_source = self.create_data_source(
organization=self.organization, source_id=self.subscription.id
)
self.detector_data_condition_group = self.create_data_condition_group(
organization=self.organization
)
(
self.workflow,
self.detector,
self.detector_workflow,
_, # the workflow trigger group for a migrated metric alert rule is None
) = self.create_detector_and_workflow(project=self.workflow_engine_project)
self.detector.update(workflow_condition_group=self.detector_data_condition_group)
self.detector_trigger = self.create_data_condition(
comparison=200,
condition_result=DetectorPriorityLevel.HIGH,
type=Condition.GREATER_OR_EQUAL,
condition_group=self.detector_data_condition_group,
)
self.data_source_detector = self.create_data_source_detector(
data_source=self.data_source, detector=self.detector
)
def test_delete_detector_data_source(self) -> None:
self.ScheduledDeletion.schedule(instance=self.workflow_engine_project, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Detector.objects.filter(id=self.detector.id).exists()
assert not DataSource.objects.filter(id=self.data_source.id).exists()
assert not DataSourceDetector.objects.filter(id=self.data_source_detector.id).exists()
assert not QuerySubscription.objects.filter(id=self.subscription.id).exists()
assert not SnubaQuery.objects.filter(id=self.snuba_query.id).exists()
def test_delete_detector_data_conditions(self) -> None:
self.ScheduledDeletion.schedule(instance=self.workflow_engine_project, days=0)
with self.tasks():
run_scheduled_deletions()
assert not DataConditionGroup.objects.filter(
id=self.detector_data_condition_group.id
).exists()
assert not DataCondition.objects.filter(id=self.detector_trigger.id).exists()
def test_not_delete_workflow(self) -> None:
self.ScheduledDeletion.schedule(instance=self.workflow_engine_project, days=0)
with self.tasks():
run_scheduled_deletions()
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
assert Workflow.objects.filter(id=self.workflow.id).exists()
| DeleteWorkflowEngineModelsTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor33.py | {
"start": 533,
"end": 573
} | class ____(TD1):
b: str
@dataclass
| TD2 |
python | jazzband__django-oauth-toolkit | oauth2_provider/oauth2_validators.py | {
"start": 2196,
"end": 42797
} | class ____(RequestValidator):
# Return the given claim only if the given scope is present.
# Extended as needed for non-standard OIDC claims/scopes.
# Override by setting to None to ignore scopes.
# see https://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims
# For example, for the "nickname" claim, you need the "profile" scope.
oidc_claim_scope = {
"sub": "openid",
"name": "profile",
"family_name": "profile",
"given_name": "profile",
"middle_name": "profile",
"nickname": "profile",
"preferred_username": "profile",
"profile": "profile",
"picture": "profile",
"website": "profile",
"gender": "profile",
"birthdate": "profile",
"zoneinfo": "profile",
"locale": "profile",
"updated_at": "profile",
"email": "email",
"email_verified": "email",
"address": "address",
"phone_number": "phone",
"phone_number_verified": "phone",
}
def _extract_basic_auth(self, request):
"""
Return authentication string if request contains basic auth credentials,
otherwise return None
"""
auth = request.headers.get("HTTP_AUTHORIZATION", None)
if not auth:
return None
split = auth.split(" ", 1)
if len(split) != 2:
return None
auth_type, auth_string = split
if auth_type != "Basic":
return None
return auth_string
def _check_secret(self, provided_secret, stored_secret):
"""
Checks whether the provided client secret is valid.
Supports both hashed and unhashed secrets.
"""
try:
identify_hasher(stored_secret)
return check_password(provided_secret, stored_secret)
except ValueError: # Raised if the stored_secret is not hashed.
return constant_time_compare(provided_secret, stored_secret)
def _authenticate_basic_auth(self, request):
"""
Authenticates with HTTP Basic Auth.
Note: as stated in rfc:`2.3.1`, client_id and client_secret must be encoded with
"application/x-www-form-urlencoded" encoding algorithm.
"""
auth_string = self._extract_basic_auth(request)
if not auth_string:
return False
try:
encoding = request.encoding or settings.DEFAULT_CHARSET or "utf-8"
except AttributeError:
encoding = "utf-8"
try:
b64_decoded = base64.b64decode(auth_string)
except (TypeError, binascii.Error):
log.debug("Failed basic auth: %r can't be decoded as base64", auth_string)
return False
try:
auth_string_decoded = b64_decoded.decode(encoding)
except UnicodeDecodeError:
log.debug("Failed basic auth: %r can't be decoded as unicode by %r", auth_string, encoding)
return False
try:
client_id, client_secret = map(unquote_plus, auth_string_decoded.split(":", 1))
except ValueError:
log.debug("Failed basic auth, Invalid base64 encoding.")
return False
if self._load_application(client_id, request) is None:
log.debug("Failed basic auth: Application %s does not exist" % client_id)
return False
elif request.client.client_id != client_id:
log.debug("Failed basic auth: wrong client id %s" % client_id)
return False
elif (
request.client.client_type == "public"
and request.grant_type == "urn:ietf:params:oauth:grant-type:device_code"
):
return True
elif not self._check_secret(client_secret, request.client.client_secret):
log.debug("Failed basic auth: wrong client secret %s" % client_secret)
return False
else:
return True
def _authenticate_request_body(self, request):
"""
Try to authenticate the client using client_id and client_secret
parameters included in body.
Remember that this method is NOT RECOMMENDED and SHOULD be limited to
clients unable to directly utilize the HTTP Basic authentication scheme.
See rfc:`2.3.1` for more details.
"""
# TODO: check if oauthlib has already unquoted client_id and client_secret
try:
client_id = request.client_id
client_secret = getattr(request, "client_secret", "") or ""
except AttributeError:
return False
if self._load_application(client_id, request) is None:
log.debug("Failed body auth: Application %s does not exists" % client_id)
return False
elif (
request.client.client_type == "public"
and request.grant_type == "urn:ietf:params:oauth:grant-type:device_code"
):
return True
elif not self._check_secret(client_secret, request.client.client_secret):
log.debug("Failed body auth: wrong client secret %s" % client_secret)
return False
else:
return True
def _load_application(self, client_id, request):
"""
If request.client was not set, load application instance for given
client_id and store it in request.client
"""
if request.client:
# check for cached client, to save the db hit if this has already been loaded
if not isinstance(request.client, Application):
# resetting request.client (client_id=%r): not an Application, something else set request.client erroneously
request.client = None
elif request.client.client_id != client_id:
# resetting request.client (client_id=%r): request.client.client_id does not match the given client_id
request.client = None
elif not request.client.is_usable(request):
# resetting request.client (client_id=%r): request.client is a valid Application, but is not usable
request.client = None
else:
# request.client is a valid Application, reusing it
return request.client
try:
# cache not hit, loading application from database for client_id %r
client = Application.objects.get(client_id=client_id)
if not client.is_usable(request):
# Failed to load application: Application %r is not usable
return None
request.client = client
# Loaded application with client_id %r from database
return request.client
except Application.DoesNotExist:
# Failed to load application: Application with client_id %r does not exist
return None
def _set_oauth2_error_on_request(self, request, access_token, scopes):
if access_token is None:
error = OrderedDict(
[
("error", "invalid_token"),
("error_description", _("The access token is invalid.")),
]
)
elif access_token.is_expired():
error = OrderedDict(
[
("error", "invalid_token"),
("error_description", _("The access token has expired.")),
]
)
elif not access_token.allow_scopes(scopes):
error = OrderedDict(
[
("error", "insufficient_scope"),
("error_description", _("The access token is valid but does not have enough scope.")),
]
)
else:
log.warning("OAuth2 access token is invalid for an unknown reason.")
error = OrderedDict(
[
("error", "invalid_token"),
]
)
request.oauth2_error = error
return request
def client_authentication_required(self, request, *args, **kwargs):
"""
Determine if the client has to be authenticated
This method is called only for grant types that supports client authentication:
* Authorization code grant
* Resource owner password grant
* Refresh token grant
If the request contains authorization headers, always authenticate the client
no matter the grant type.
If the request does not contain authorization headers, proceed with authentication
only if the client is of type `Confidential`.
If something goes wrong, call oauthlib implementation of the method.
"""
if self._extract_basic_auth(request):
return True
try:
if request.client_id and request.client_secret:
return True
except AttributeError:
log.debug("Client ID or client secret not provided...")
pass
self._load_application(request.client_id, request)
log.debug("Determining if client authentication is required for client %r", request.client)
if request.client:
return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
return super().client_authentication_required(request, *args, **kwargs)
def authenticate_client(self, request, *args, **kwargs):
"""
Check if client exists and is authenticating itself as in rfc:`3.2.1`
First we try to authenticate with HTTP Basic Auth, and that is the PREFERRED
authentication method.
Whether this fails we support including the client credentials in the request-body,
but this method is NOT RECOMMENDED and SHOULD be limited to clients unable to
directly utilize the HTTP Basic authentication scheme.
See rfc:`2.3.1` for more details
"""
authenticated = self._authenticate_basic_auth(request)
if not authenticated:
authenticated = self._authenticate_request_body(request)
return authenticated
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""
If we are here, the client did not authenticate itself as in rfc:`3.2.1` and we can
proceed only if the client exists and is not of type "Confidential".
"""
if self._load_application(client_id, request) is not None:
return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
"""
Ensure the redirect_uri is listed in the Application instance redirect_uris field
"""
grant = Grant.objects.get(code=code, application=client)
return grant.redirect_uri_allowed(redirect_uri)
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Remove the temporary grant used to swap the authorization token.
:raises: InvalidGrantError if the grant does not exist.
"""
deleted_grant_count, _ = Grant.objects.filter(code=code, application=request.client).delete()
if not deleted_grant_count:
raise errors.InvalidGrantError(request=request)
def validate_client_id(self, client_id, request, *args, **kwargs):
"""
Ensure an Application exists with given client_id.
If it exists, it's assigned to request.client.
"""
return self._load_application(client_id, request) is not None
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
return request.client.default_redirect_uri
def get_or_create_user_from_content(self, content):
"""
An optional layer to define where to store the profile in `UserModel` or a separate model.
For example `UserOAuth`, where `user = models.OneToOneField(UserModel)` .
The function is called after checking that username is in the content.
Returns an UserModel instance;
"""
user, _ = UserModel.objects.get_or_create(**{UserModel.USERNAME_FIELD: content["username"]})
return user
def _get_token_from_authentication_server(
self, token, introspection_url, introspection_token, introspection_credentials
):
"""Use external introspection endpoint to "crack open" the token.
:param introspection_url: introspection endpoint URL
:param introspection_token: Bearer token
:param introspection_credentials: Basic Auth credentials (id,secret)
:return: :class:`models.AccessToken`
Some RFC 7662 implementations (including this one) use a Bearer token while others use Basic
Auth. Depending on the external AS's implementation, provide either the introspection_token
or the introspection_credentials.
If the resulting access_token identifies a username (e.g. Authorization Code grant), add
that user to the UserModel. Also cache the access_token up until its expiry time or a
configured maximum time.
"""
headers = None
if introspection_token:
headers = {"Authorization": "Bearer {}".format(introspection_token)}
elif introspection_credentials:
client_id = introspection_credentials[0].encode("utf-8")
client_secret = introspection_credentials[1].encode("utf-8")
basic_auth = base64.b64encode(client_id + b":" + client_secret)
headers = {"Authorization": "Basic {}".format(basic_auth.decode("utf-8"))}
try:
response = requests.post(introspection_url, data={"token": token}, headers=headers)
except requests.exceptions.RequestException:
log.exception("Introspection: Failed POST to %r in token lookup", introspection_url)
return None
# Log an exception when response from auth server is not successful
if response.status_code != http.client.OK:
log.exception(
"Introspection: Failed to get a valid response "
"from authentication server. Status code: {}, "
"Reason: {}.".format(response.status_code, response.reason)
)
return None
try:
content = response.json()
except ValueError:
log.exception("Introspection: Failed to parse response as json")
return None
if "active" in content and content["active"] is True:
if "username" in content:
user = self.get_or_create_user_from_content(content)
else:
user = None
max_caching_time = datetime.now() + timedelta(
seconds=oauth2_settings.RESOURCE_SERVER_TOKEN_CACHING_SECONDS
)
if "exp" in content:
expires = datetime.utcfromtimestamp(content["exp"])
if expires > max_caching_time:
expires = max_caching_time
else:
expires = max_caching_time
scope = content.get("scope", "")
if settings.USE_TZ:
expires = make_aware(
expires, timezone=get_timezone(oauth2_settings.AUTHENTICATION_SERVER_EXP_TIME_ZONE)
)
access_token, _created = AccessToken.objects.update_or_create(
token=token,
defaults={
"user": user,
"application": None,
"scope": scope,
"expires": expires,
},
)
return access_token
def validate_bearer_token(self, token, scopes, request):
"""
When users try to access resources, check that provided token is valid
"""
if not token:
return False
introspection_url = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_URL
introspection_token = oauth2_settings.RESOURCE_SERVER_AUTH_TOKEN
introspection_credentials = oauth2_settings.RESOURCE_SERVER_INTROSPECTION_CREDENTIALS
access_token = self._load_access_token(token)
# if there is no token or it's invalid then introspect the token if there's an external OAuth server
if not access_token or not access_token.is_valid(scopes):
if introspection_url and (introspection_token or introspection_credentials):
access_token = self._get_token_from_authentication_server(
token, introspection_url, introspection_token, introspection_credentials
)
if access_token and access_token.is_valid(scopes):
request.client = access_token.application
request.user = access_token.user
request.scopes = list(access_token.scopes)
# this is needed by django rest framework
request.access_token = access_token
return True
else:
self._set_oauth2_error_on_request(request, access_token, scopes)
return False
def _load_access_token(self, token):
token_checksum = hashlib.sha256(token.encode("utf-8")).hexdigest()
return (
AccessToken.objects.select_related("application", "user")
.filter(token_checksum=token_checksum)
.first()
)
def validate_code(self, client_id, code, client, request, *args, **kwargs):
try:
grant = Grant.objects.get(code=code, application=client)
if not grant.is_expired():
request.scopes = grant.scope.split(" ")
request.user = grant.user
if grant.nonce:
request.nonce = grant.nonce
if grant.claims:
request.claims = json.loads(grant.claims)
return True
return False
except Grant.DoesNotExist:
return False
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""
Validate both grant_type is a valid string and grant_type is allowed for current workflow
"""
assert grant_type in GRANT_TYPE_MAPPING # mapping misconfiguration
return request.client.allows_grant_type(*GRANT_TYPE_MAPPING[grant_type])
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""
We currently do not support the Authorization Endpoint Response Types registry as in
rfc:`8.4`, so validate the response_type only if it matches "code" or "token"
"""
if response_type == "code":
return client.allows_grant_type(AbstractApplication.GRANT_AUTHORIZATION_CODE)
elif response_type == "token":
return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
elif response_type == "id_token":
return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
elif response_type == "id_token token":
return client.allows_grant_type(AbstractApplication.GRANT_IMPLICIT)
elif response_type == "code id_token":
return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)
elif response_type == "code token":
return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)
elif response_type == "code id_token token":
return client.allows_grant_type(AbstractApplication.GRANT_OPENID_HYBRID)
else:
return False
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure required scopes are permitted (as specified in the settings file)
"""
available_scopes = get_scopes_backend().get_available_scopes(application=client, request=request)
return set(scopes).issubset(set(available_scopes))
def get_default_scopes(self, client_id, request, *args, **kwargs):
default_scopes = get_scopes_backend().get_default_scopes(application=request.client, request=request)
return default_scopes
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
return request.client.redirect_uri_allowed(redirect_uri)
def is_pkce_required(self, client_id, request):
"""
Enables or disables PKCE verification.
Uses the setting PKCE_REQUIRED, which can be either a bool or a callable that
receives the client id and returns a bool.
"""
if callable(oauth2_settings.PKCE_REQUIRED):
return oauth2_settings.PKCE_REQUIRED(client_id)
return oauth2_settings.PKCE_REQUIRED
def get_code_challenge(self, code, request):
grant = Grant.objects.get(code=code, application=request.client)
return grant.code_challenge or None
def get_code_challenge_method(self, code, request):
grant = Grant.objects.get(code=code, application=request.client)
return grant.code_challenge_method or None
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
self._create_authorization_code(request, code)
def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):
scopes = Grant.objects.filter(code=code).values_list("scope", flat=True).first()
if scopes:
return utils.scope_to_list(scopes)
return []
def rotate_refresh_token(self, request):
"""
Checks if rotate refresh token is enabled
"""
return oauth2_settings.ROTATE_REFRESH_TOKEN
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Save access and refresh token.
Override _save_bearer_token and not this function when adding custom logic
for the storing of these token. This allows the transaction logic to be
separate from the token handling.
"""
# Use the AccessToken's database instead of making the assumption it is in 'default'.
with transaction.atomic(using=router.db_for_write(AccessToken)):
return self._save_bearer_token(token, request, *args, **kwargs)
def _save_bearer_token(self, token, request, *args, **kwargs):
"""
Save access and refresh token.
If refresh token is issued, remove or reuse old refresh token as in rfc:`6`.
@see: https://rfc-editor.org/rfc/rfc6749.html#section-6
"""
if "scope" not in token:
raise FatalClientError("Failed to renew access token: missing scope")
# expires_in is passed to Server on initialization
# custom server class can have logic to override this
expires = timezone.now() + timedelta(
seconds=token.get(
"expires_in",
oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS,
)
)
if request.grant_type == "client_credentials":
request.user = None
# This comes from OAuthLib:
# https://github.com/idan/oauthlib/blob/1.0.3/oauthlib/oauth2/rfc6749/tokens.py#L267
# Its value is either a new random code; or if we are reusing
# refresh tokens, then it is the same value that the request passed in
# (stored in `request.refresh_token`)
refresh_token_code = token.get("refresh_token", None)
if refresh_token_code:
# an instance of `RefreshToken` that matches the old refresh code.
# Set on the request in `validate_refresh_token`
refresh_token_instance = getattr(request, "refresh_token_instance", None)
# If we are to reuse tokens, and we can: do so
if (
not self.rotate_refresh_token(request)
and isinstance(refresh_token_instance, RefreshToken)
and refresh_token_instance.access_token
):
access_token = AccessToken.objects.select_for_update().get(
pk=refresh_token_instance.access_token.pk
)
access_token.user = request.user
access_token.scope = token["scope"]
access_token.expires = expires
access_token.token = token["access_token"]
access_token.application = request.client
access_token.save()
# else create fresh with access & refresh tokens
else:
# revoke existing tokens if possible to allow reuse of grant
if isinstance(refresh_token_instance, RefreshToken):
# First, to ensure we don't have concurrency issues, we refresh the refresh token
# from the db while acquiring a lock on it
# We also put it in the "request cache"
refresh_token_instance = RefreshToken.objects.select_for_update().get(
pk=refresh_token_instance.pk
)
request.refresh_token_instance = refresh_token_instance
previous_access_token = AccessToken.objects.filter(
source_refresh_token=refresh_token_instance
).first()
try:
refresh_token_instance.revoke()
except (AccessToken.DoesNotExist, RefreshToken.DoesNotExist):
pass
else:
setattr(request, "refresh_token_instance", None)
else:
previous_access_token = None
# If the refresh token has already been used to create an
# access token (ie it's within the grace period), return that
# access token
if not previous_access_token:
access_token = self._create_access_token(
expires,
request,
token,
source_refresh_token=refresh_token_instance,
)
self._create_refresh_token(
request, refresh_token_code, access_token, refresh_token_instance
)
else:
# make sure that the token data we're returning matches
# the existing token
token["access_token"] = previous_access_token.token
token["refresh_token"] = (
RefreshToken.objects.filter(access_token=previous_access_token).first().token
)
token["scope"] = previous_access_token.scope
# No refresh token should be created, just access token
else:
self._create_access_token(expires, request, token)
def _create_access_token(self, expires, request, token, source_refresh_token=None):
id_token = token.get("id_token", None)
if id_token:
id_token = self._load_id_token(id_token)
return AccessToken.objects.create(
user=request.user,
scope=token["scope"],
expires=expires,
token=token["access_token"],
id_token=id_token,
application=request.client,
source_refresh_token=source_refresh_token,
)
def _create_authorization_code(self, request, code, expires=None):
if not expires:
expires = timezone.now() + timedelta(seconds=oauth2_settings.AUTHORIZATION_CODE_EXPIRE_SECONDS)
return Grant.objects.create(
application=request.client,
user=request.user,
code=code["code"],
expires=expires,
redirect_uri=request.redirect_uri,
scope=" ".join(request.scopes),
code_challenge=request.code_challenge or "",
code_challenge_method=request.code_challenge_method or "",
nonce=request.nonce or "",
claims=json.dumps(request.claims or {}),
)
def _create_refresh_token(self, request, refresh_token_code, access_token, previous_refresh_token):
if previous_refresh_token:
token_family = previous_refresh_token.token_family
else:
token_family = uuid.uuid4()
return RefreshToken.objects.create(
user=request.user,
token=refresh_token_code,
application=request.client,
access_token=access_token,
token_family=token_family,
)
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""
Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
"""
if token_type_hint not in ["access_token", "refresh_token"]:
token_type_hint = None
token_types = {
"access_token": AccessToken,
"refresh_token": RefreshToken,
}
token_type = token_types.get(token_type_hint, AccessToken)
try:
token_type.objects.get(token=token).revoke()
except ObjectDoesNotExist:
for other_type in [_t for _t in token_types.values() if _t != token_type]:
# slightly inefficient on Python2, but the queryset contains only one instance
list(map(lambda t: t.revoke(), other_type.objects.filter(token=token)))
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Check username and password correspond to a valid and active User
"""
# Passing the optional HttpRequest adds compatibility for backends
# which depend on its presence. Create one with attributes likely
# to be used.
http_request = HttpRequest()
http_request.path = request.uri
http_request.method = request.http_method
getattr(http_request, request.http_method).update(dict(request.decoded_body))
http_request.META = request.headers
u = authenticate(http_request, username=username, password=password)
if u is not None and u.is_active:
request.user = u
return True
return False
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
# Avoid second query for RefreshToken since this method is invoked *after*
# validate_refresh_token.
rt = request.refresh_token_instance
if not rt.access_token_id:
try:
return AccessToken.objects.get(source_refresh_token_id=rt.pk).scope
except AccessToken.DoesNotExist:
return []
return rt.access_token.scope
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""
Check refresh_token exists and refers to the right client.
Also attach User instance to the request object
"""
rt = RefreshToken.objects.filter(token=refresh_token).select_related("access_token").first()
if not rt:
return False
if rt.revoked is not None and rt.revoked <= timezone.now() - timedelta(
seconds=oauth2_settings.REFRESH_TOKEN_GRACE_PERIOD_SECONDS
):
if oauth2_settings.REFRESH_TOKEN_REUSE_PROTECTION and rt.token_family:
rt_token_family = RefreshToken.objects.filter(token_family=rt.token_family)
for related_rt in rt_token_family.all():
related_rt.revoke()
return False
request.user = rt.user
request.refresh_token = rt.token
# Temporary store RefreshToken instance to be reused by get_original_scopes and save_bearer_token.
request.refresh_token_instance = rt
return rt.application == client
def _save_id_token(self, jti, request, expires, *args, **kwargs):
scopes = request.scope or " ".join(request.scopes)
id_token = IDToken.objects.create(
user=request.user,
scope=scopes,
expires=expires,
jti=jti,
application=request.client,
)
return id_token
@classmethod
def _get_additional_claims_is_request_agnostic(cls):
return len(inspect.signature(cls.get_additional_claims).parameters) == 1
def get_jwt_bearer_token(self, token, token_handler, request):
return self.get_id_token(token, token_handler, request)
def get_claim_dict(self, request):
if self._get_additional_claims_is_request_agnostic():
claims = {"sub": lambda r: str(r.user.pk)}
else:
claims = {"sub": str(request.user.pk)}
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
if self._get_additional_claims_is_request_agnostic():
add = self.get_additional_claims()
else:
add = self.get_additional_claims(request)
claims.update(add)
return claims
def get_discovery_claims(self, request):
claims = ["sub"]
if self._get_additional_claims_is_request_agnostic():
claims += list(self.get_claim_dict(request).keys())
return claims
def get_oidc_claims(self, token, token_handler, request):
data = self.get_claim_dict(request)
claims = {}
# TODO if request.claims then return only the claims requested, but limited by granted scopes.
for k, v in data.items():
if not self.oidc_claim_scope or self.oidc_claim_scope.get(k) in request.scopes:
claims[k] = v(request) if callable(v) else v
return claims
def get_id_token_dictionary(self, token, token_handler, request):
"""
Get the claims to put in the ID Token.
These claims are in addition to the claims automatically added by
``oauthlib`` - aud, iat, nonce, at_hash, c_hash.
This function adds in iss, exp and auth_time, plus any claims added from
calling ``get_oidc_claims()``
"""
claims = self.get_oidc_claims(token, token_handler, request)
expiration_time = timezone.now() + timedelta(seconds=oauth2_settings.ID_TOKEN_EXPIRE_SECONDS)
# Required ID Token claims
claims.update(
**{
"iss": self.get_oidc_issuer_endpoint(request),
"exp": int(dateformat.format(expiration_time, "U")),
"auth_time": int(dateformat.format(request.user.last_login, "U")),
"jti": str(uuid.uuid4()),
}
)
return claims, expiration_time
def get_oidc_issuer_endpoint(self, request):
return oauth2_settings.oidc_issuer(request)
def finalize_id_token(self, id_token, token, token_handler, request):
claims, expiration_time = self.get_id_token_dictionary(token, token_handler, request)
id_token.update(**claims)
# Workaround for oauthlib bug #746
# https://github.com/oauthlib/oauthlib/issues/746
if "nonce" not in id_token and request.nonce:
id_token["nonce"] = request.nonce
header = {
"typ": "JWT",
"alg": request.client.algorithm,
}
# RS256 consumers expect a kid in the header for verifying the token
if request.client.algorithm == AbstractApplication.RS256_ALGORITHM:
header["kid"] = request.client.jwk_key.thumbprint()
jwt_token = jwt.JWT(
header=json.dumps(header, default=str),
claims=json.dumps(id_token, default=str),
)
jwt_token.make_signed_token(request.client.jwk_key)
# Use the IDToken's database instead of making the assumption it is in 'default'.
with transaction.atomic(using=router.db_for_write(IDToken)):
id_token = self._save_id_token(id_token["jti"], request, expiration_time)
# this is needed by django rest framework
request.access_token = id_token
request.id_token = id_token
return jwt_token.serialize()
def validate_jwt_bearer_token(self, token, scopes, request):
return self.validate_id_token(token, scopes, request)
def validate_id_token(self, token, scopes, request):
"""
When users try to access resources, check that provided id_token is valid
"""
if not token:
return False
id_token = self._load_id_token(token)
if not id_token:
return False
if not id_token.allow_scopes(scopes):
return False
request.client = id_token.application
request.user = id_token.user
request.scopes = scopes
# this is needed by django rest framework
request.access_token = id_token
return True
def _load_id_token(self, token):
key = self._get_key_for_token(token)
if not key:
return None
try:
jwt_token = jwt.JWT(key=key, jwt=token)
claims = json.loads(jwt_token.claims)
return IDToken.objects.get(jti=claims["jti"])
except (JWException, JWTExpired, IDToken.DoesNotExist):
return None
def _get_key_for_token(self, token):
"""
Peek at the unvalidated token to discover who it was issued for
and then use that to load that application and its key.
"""
unverified_token = jws.JWS()
unverified_token.deserialize(token)
claims = json.loads(unverified_token.objects["payload"].decode("utf-8"))
if "aud" not in claims:
return None
application = self._get_client_by_audience(claims["aud"])
if application:
return application.jwk_key
def _get_client_by_audience(self, audience):
"""
Load a client by the aud claim in a JWT.
aud may be multi-valued, if your provider makes it so.
This function is separate to allow further customization.
"""
if isinstance(audience, str):
audience = [audience]
return Application.objects.filter(client_id__in=audience).first()
def validate_user_match(self, id_token_hint, scopes, claims, request):
# TODO: Fix to validate when necessary according
# https://github.com/idan/oauthlib/blob/master/oauthlib/oauth2/rfc6749/request_validator.py#L556
# http://openid.net/specs/openid-connect-core-1_0.html#AuthRequest id_token_hint section
return True
def get_authorization_code_nonce(self, client_id, code, redirect_uri, request):
"""Extracts nonce from saved authorization code.
If present in the Authentication Request, Authorization
Servers MUST include a nonce Claim in the ID Token with the
Claim Value being the nonce value sent in the Authentication
Request. Authorization Servers SHOULD perform no other
processing on nonce values used. The nonce value is a
case-sensitive string.
Only code param should be sufficient to retrieve grant code from
any storage you are using. However, `client_id` and `redirect_uri`
have been validated and can be used also.
:param client_id: Unicode client identifier
:param code: Unicode authorization code grant
:param redirect_uri: Unicode absolute URI
:return: Unicode nonce
Method is used by:
- Authorization Token Grant Dispatcher
"""
nonce = Grant.objects.filter(code=code).values_list("nonce", flat=True).first()
if nonce:
return nonce
def get_userinfo_claims(self, request):
"""
Generates and saves a new JWT for this request, and returns it as the
current user's claims.
"""
return self.get_oidc_claims(request.access_token, None, request)
def get_additional_claims(self, request):
return {}
def is_origin_allowed(self, client_id, origin, request, *args, **kwargs):
"""Indicate if the given origin is allowed to access the token endpoint
via Cross-Origin Resource Sharing (CORS). CORS is used by browser-based
clients, such as Single-Page Applications, to perform the Authorization
Code Grant.
Verifies if request's origin is within Application's allowed origins list.
"""
return request.client.origin_allowed(origin)
| OAuth2Validator |
python | tensorflow__tensorflow | tensorflow/compiler/tests/xla_call_module_test.py | {
"start": 1691,
"end": 62337
} | class ____(xla_test.XLATestCase, parameterized.TestCase):
def _assertOpOutputMatchesExpected(self,
op,
args,
expected,
equality_fn=None):
"""Asserts op(*args) == expected."""
with self.test_scope():
tf_func = def_function.function(op, autograph=False, jit_compile=True)
result = tf_func(*args)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def testing_platform(self):
"""Current testing platform, one of CPU, GPU, TPU."""
if self.device in ['CPU', 'XLA_CPU']:
return 'CPU'
elif self.device in ['GPU', 'XLA_GPU']:
if test.is_built_with_rocm():
return 'ROCM'
else:
return 'CUDA'
elif self.device in ['TPU', 'XLA_TPU']:
return 'TPU'
else:
assert False, f'Unexpected {self.device=}'
def test_basic(self):
x = np.array([1., 2., 3.], dtype=np.float32)
def f(x):
# sin(cos(x))
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg0: tensor<3xf32>) -> tensor<3xf32> {
%0 = stablehlo.cosine %arg0 : tensor<3xf32>
%1 = stablehlo.sine %0 : tensor<3xf32>
return %1 : tensor<3xf32>
}
}
""")
return xla.call_module([x], version=version,
module=module, Tout=[x.dtype], Sout=[x.shape],
platforms=[self.testing_platform()])
self._assertOpOutputMatchesExpected(f, (x,), (np.sin(np.cos(x)),))
def test_basic_with_token_v8(self):
x = np.array([1.0, 2.0, 3.0], dtype=np.float32)
def f(x):
# sin(cos(x))
module, _ = serialize("""
module @jit_f.0 {
func.func public @main(%arg0: !stablehlo.token, %arg1: tensor<3xf32>) -> (!stablehlo.token, tensor<3xf32>) {
%0 = stablehlo.cosine %arg1 : tensor<3xf32>
%1 = stablehlo.sine %0 : tensor<3xf32>
return %arg0, %1 : !stablehlo.token, tensor<3xf32>
}
}
""")
return xla.call_module(
[x],
version=8, # Version 8 uses only one prefix token
module=module,
Tout=[x.dtype],
Sout=[x.shape],
has_token_input_output=True, # Version 8 cares about this
platforms=[self.testing_platform()],
)
self._assertOpOutputMatchesExpected(f, (x,), (np.sin(np.cos(x)),))
def test_basic_with_multiple_tokens(self):
x = np.array([1.0, 2.0, 3.0], dtype=np.float32)
def f(x):
# sin(cos(x))
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg0: !stablehlo.token {jax.token = true}, %arg1: !stablehlo.token {jax.token = true}, %arg2: tensor<3xf32>) -> (!stablehlo.token, !stablehlo.token, tensor<3xf32>) {
%0 = stablehlo.cosine %arg2 : tensor<3xf32>
%1 = stablehlo.sine %0 : tensor<3xf32>
return %arg0, %arg1, %1 : !stablehlo.token, !stablehlo.token, tensor<3xf32>
}
}
""")
return xla.call_module(
[x],
version=version,
module=module,
Tout=[x.dtype],
Sout=[x.shape],
platforms=[self.testing_platform()],
)
self._assertOpOutputMatchesExpected(f, (x,), (np.sin(np.cos(x)),))
def test_basic_with_tokens_preceeded_by_other_args(self):
x = np.array([1.0, 2.0, 3.0], dtype=np.float32)
def f(x):
# sin(cos(x))
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg0: tensor<i32>, %arg1: !stablehlo.token {jax.token = true}, %arg2: !stablehlo.token {jax.token = true}, %arg3: tensor<3xf32>) -> (!stablehlo.token, !stablehlo.token, tensor<3xf32>) {
%0 = stablehlo.cosine %arg3 : tensor<3xf32>
%1 = stablehlo.sine %0 : tensor<3xf32>
return %arg1, %arg2, %1 : !stablehlo.token, !stablehlo.token, tensor<3xf32>
}
}
""")
return xla.call_module(
[np.int32(0), x],
version=version,
module=module,
Tout=[x.dtype],
Sout=[x.shape],
platforms=[self.testing_platform()],
)
self._assertOpOutputMatchesExpected(f, (x,), (np.sin(np.cos(x)),))
def test_compare(self):
x = np.uint32(2)
res = np.bool_(True)
def f(x):
# return x >= 1
module, version = serialize("""
module @jit_f_jax.0 {
func.func public @main(%arg0: tensor<ui32>) -> tensor<i1> {
%0 = stablehlo.constant dense<1> : tensor<ui32>
%1 = "stablehlo.compare"(%arg0, %0) {compare_type = #stablehlo<comparison_type UNSIGNED>, comparison_direction = #stablehlo<comparison_direction GE>} : (tensor<ui32>, tensor<ui32>) -> tensor<i1>
return %1 : tensor<i1>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_multiple_args_results(self):
x = np.array([1., 2., 3.], dtype=np.float32)
y = np.array([11., 12., 13., 14.], dtype=np.float64)
def f(x, y):
# (sin(x), cos(y))
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg0: tensor<3xf32>, %arg1: tensor<4xf64>) -> (tensor<3xf32>, tensor<4xf64>) {
%0 = stablehlo.sine %arg0 : tensor<3xf32>
%1 = stablehlo.cosine %arg1 : tensor<4xf64>
return %0, %1 : tensor<3xf32>, tensor<4xf64>
}
}
""")
return xla.call_module([x, y], version=version,
module=module,
Tout=[x.dtype, y.dtype],
Sout=[x.shape, y.shape],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x, y), (np.sin(x), np.cos(y)))
# TODO(b/305813026): asan test failure for the i64 test variant.
@parameterized.named_parameters(
dict(testcase_name='_' + dim_var_type,
dim_var_type=dim_var_type)
for dim_var_type in ('i32',)
)
def test_poly_basic(self, *, dim_var_type: str):
x = np.arange(6, dtype=np.float32).reshape((2, 3))
def f(x): # x: f32[2, b]
# (sin(x), x.shape[1])
module, version = serialize(f"""
module @jit_f.0 attributes {{jax.uses_shape_polymorphism = true}} {{
func.func public @main(%arg1: tensor<2x?xf32>) -> (tensor<2x?xf32>, tensor<{dim_var_type}>) {{
%arg0_new_i32 = "stablehlo.get_dimension_size"(%arg1) {{dimension = 1 : i64}} : (tensor<2x?xf32>) -> tensor<i32>
%arg0_new = stablehlo.convert %arg0_new_i32 : (tensor<i32>) -> tensor<{dim_var_type}>
%0, %1 = call @dyn_main(%arg0_new, %arg1) : (tensor<{dim_var_type}>, tensor<2x?xf32>) -> (tensor<2x?xf32>, tensor<{dim_var_type}>)
return %0, %1 : tensor<2x?xf32>, tensor<{dim_var_type}>
}}
func.func private @dyn_main(%arg0: tensor<{dim_var_type}> {{jax.global_constant = "b"}}, %arg1: tensor<2x?xf32>) -> (tensor<2x?xf32>, tensor<{dim_var_type}>) {{
%0 = stablehlo.sine %arg1 : tensor<2x?xf32>
return %0, %arg0 : tensor<2x?xf32>, tensor<{dim_var_type}>
}}
}}
""")
return xla.call_module([x],
module=module, version=version,
Tout=[x.dtype, np.int32],
Sout=[(None, 3), ()],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (np.sin(x), x.shape[1]))
def test_wrong_actual_args_errors(self):
x = np.arange(6, dtype=np.float32).reshape((3, 2))
y = np.arange(6, dtype=np.int32).reshape((2, 3))
# x: f32[a, 2], return x
module, version = serialize("""
module @jit_f.0 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg0: tensor<?x2xf32>, %arg1: tensor<*xi32>) -> tensor<?x2xf32> {
return %arg0 : tensor<?x2xf32>
}
}
""")
def f(x, y):
return xla.call_module(
[x, y],
module=module,
version=version,
Tout=[x.dtype],
Sout=[(None, 2)],
platforms=[self.testing_platform()],
)
self._assertOpOutputMatchesExpected(f, (x, y), (x,))
x_bad_etype = x.astype(np.int32)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
re.escape(
'invalid refinement for argument 0, refinement element types must'
' match in tensor<?x2xf32> -> tensor<3x2xi32>'
),
):
self._assertOpOutputMatchesExpected(f, (x_bad_etype, y), (x_bad_etype,))
y_bad_etype = y.astype(np.float32)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
re.escape(
'invalid refinement for argument 1, refinement element types must'
' match in tensor<*xi32> -> tensor<2x3xf32>'
),
):
self._assertOpOutputMatchesExpected(f, (x, y_bad_etype), (x,))
x_bad_shape = np.arange(15, dtype=np.float32).reshape(5, 3)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
re.escape(
'invalid refinement for argument 0, refinement dimension sizes must'
' match for static dimensions in tensor<?x2xf32> -> tensor<5x3xf32>'
),
):
self._assertOpOutputMatchesExpected(f, (x_bad_shape, y), (x_bad_shape,))
@parameterized.named_parameters(
dict(testcase_name='_' + platform_idx_type,
platform_idx_type=platform_idx_type)
for platform_idx_type in ('i32', 'i64')
)
def test_platforms_basic(self, *, platform_idx_type: str):
x = np.float32(0.)
# returns x + 2. on CPU, x + 3. on GPU (CUDA or ROCM) and x + 4. on TPU
module, version = serialize(f"""
module @jit_f.0 {{
func.func public @main(%arg_platform_idx: tensor<{platform_idx_type}> {{jax.global_constant = "_platform_index"}}, %arg0: tensor<f32>) -> tensor<f32> {{
%0 = stablehlo.convert %arg_platform_idx : (tensor<{platform_idx_type}>) -> tensor<i32>
%to_add = "stablehlo.case"(%0) ({{
%cpu_val = stablehlo.constant dense<2.> : tensor<f32>
stablehlo.return %cpu_val : tensor<f32>
}}, {{
%gpu_val = stablehlo.constant dense<3.> : tensor<f32>
stablehlo.return %gpu_val : tensor<f32>
}}, {{
%tpu_val = stablehlo.constant dense<4.> : tensor<f32>
stablehlo.return %tpu_val : tensor<f32>
}}) : (tensor<i32>) -> tensor<f32>
%1 = stablehlo.add %arg0, %to_add : tensor<f32>
return %1 : tensor<f32>
}}
}}
""")
platforms = ['CPU', 'CUDA', 'ROCM', 'TPU']
def f(x):
return xla.call_module([x], version=version,
module=module,
Tout=[np.float32],
Sout=[()],
platforms=platforms)
expected_value = (
x + dict(CPU=2.0, CUDA=3.0, ROCM=3.0, TPU=4.0)[self.testing_platform()]
)
self._assertOpOutputMatchesExpected(f, (x,), (expected_value,))
def test_platforms_unknown_custom_call(self):
# One of the platform branches ("ROCM") has custom call unknown to other
# platforms.
if self.testing_platform() == 'ROCM':
raise unittest.SkipTest('Not intended for ROCM')
x = np.float32(0.)
# returns x + 2. on CPU, x + 3. on GPU, and x + 4. on TPU
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg_platform_idx: tensor<i32> {jax.global_constant = "_platform_index"}, %arg0: tensor<f32>) -> tensor<f32> {
%to_add = "stablehlo.case"(%arg_platform_idx) ({
%cpu_val = stablehlo.constant dense<2.> : tensor<f32>
stablehlo.return %cpu_val : tensor<f32>
}, {
%gpu_val = stablehlo.constant dense<3.> : tensor<f32>
stablehlo.return %gpu_val : tensor<f32>
}, {
%tpu_val = stablehlo.constant dense<4.> : tensor<f32>
stablehlo.return %tpu_val : tensor<f32>
}, {
%rocm_val = stablehlo.custom_call @non_existent_target(%arg0) : (tensor<f32>) -> tensor<f32>
stablehlo.return %rocm_val : tensor<f32>
}) : (tensor<i32>) -> tensor<f32>
%0 = stablehlo.add %arg0, %to_add : tensor<f32>
return %0 : tensor<f32>
}
}
""")
platforms = ['CPU', 'CUDA', 'TPU', 'ROCM']
def f(x):
return xla.call_module([x], version=version,
module=module,
Tout=[np.float32],
Sout=[()],
platforms=platforms)
expected_value = (
x + dict(CPU=2.0, CUDA=3.0, TPU=4.0)[self.testing_platform()]
)
self._assertOpOutputMatchesExpected(f, (x,), (expected_value,))
def test_platforms_and_poly(self):
x = np.arange(6, dtype=np.float32)
# returns x + 2. on CPU, x + 3. on GPU (CUDA or ROCM) and x + 4. on TPU
module, version = serialize("""
module @jit_f_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_platform_idx: tensor<i32> {jax.global_constant = "_platform_index"}, %arg0: tensor<?xf32>) -> (tensor<?xf32>) {
%0 = stablehlo.get_dimension_size %arg0, dim = 0 : (tensor<?xf32>) -> tensor<i32>
%5 = call @_wrapped_jax_export_main(%arg_platform_idx, %0, %arg0) : (tensor<i32>, tensor<i32>, tensor<?xf32>) -> tensor<?xf32>
return %5 : tensor<?xf32>
}
func.func private @_wrapped_jax_export_main(%arg_platform_idx: tensor<i32> {jax.global_constant = "_platform_index"}, %arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?xf32>) -> (tensor<?xf32>) {
%to_add = "stablehlo.case"(%arg_platform_idx) ({
%cpu_val = stablehlo.constant dense<2.> : tensor<f32>
stablehlo.return %cpu_val : tensor<f32>
}, {
%gpu_val = stablehlo.constant dense<3.> : tensor<f32>
stablehlo.return %gpu_val : tensor<f32>
}, {
%tpu_val = stablehlo.constant dense<4.> : tensor<f32>
stablehlo.return %tpu_val : tensor<f32>
}) : (tensor<i32>) -> tensor<f32>
%1 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.dynamic_broadcast_in_dim %to_add, %1, dims = [] : (tensor<f32>, tensor<1xi32>) -> tensor<?xf32>
%4 = stablehlo.add %3, %arg1 : tensor<?xf32>
return %4 : tensor<?xf32>
}
}
""")
platforms = ['CPU', 'CUDA', 'ROCM', 'TPU']
def f(x):
return xla.call_module([x], version=version,
module=module,
Tout=[np.float32],
Sout=[()],
platforms=platforms)
expected_value = (
x + dict(CPU=2.0, CUDA=3.0, ROCM=3.0, TPU=4.0)[self.testing_platform()]
)
self._assertOpOutputMatchesExpected(f, (x,), (expected_value,))
def test_platforms_and_poly_and_tokens(self):
x = np.arange(6, dtype=np.float32)
# returns x + 2. on CPU, x + 3. on GPU (CUDA or ROCM) and x + 4. on TPU
module, version = serialize("""
module @jit_f_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_platform_idx: tensor<i32> {jax.global_constant = "_platform_index"}, %arg_tok: !stablehlo.token {jax.token = true}, %arg0: tensor<?xf32>) -> (!stablehlo.token, tensor<?xf32>) {
%0 = stablehlo.get_dimension_size %arg0, dim = 0 : (tensor<?xf32>) -> tensor<i32>
%5:2 = call @_wrapped_jax_export_main(%arg_platform_idx, %0, %arg_tok, %arg0) : (tensor<i32>, tensor<i32>, !stablehlo.token, tensor<?xf32>) -> (!stablehlo.token, tensor<?xf32>)
return %5#0, %5#1 : !stablehlo.token, tensor<?xf32>
}
func.func private @_wrapped_jax_export_main(%arg_platform_idx: tensor<i32> {jax.global_constant = "_platform_index"}, %arg0: tensor<i32> {jax.global_constant = "b"}, %arg_tok: !stablehlo.token {jax.token = true}, %arg1: tensor<?xf32>) -> (!stablehlo.token, tensor<?xf32>) {
%to_add = "stablehlo.case"(%arg_platform_idx) ({
%cpu_val = stablehlo.constant dense<2.> : tensor<f32>
stablehlo.return %cpu_val : tensor<f32>
}, {
%gpu_val = stablehlo.constant dense<3.> : tensor<f32>
stablehlo.return %gpu_val : tensor<f32>
}, {
%tpu_val = stablehlo.constant dense<4.> : tensor<f32>
stablehlo.return %tpu_val : tensor<f32>
}) : (tensor<i32>) -> tensor<f32>
%1 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.dynamic_broadcast_in_dim %to_add, %1, dims = [] : (tensor<f32>, tensor<1xi32>) -> tensor<?xf32>
%4 = stablehlo.add %3, %arg1 : tensor<?xf32>
return %arg_tok, %4 : !stablehlo.token, tensor<?xf32>
}
}
""")
platforms = ['CPU', 'CUDA', 'ROCM', 'TPU']
def f(x):
return xla.call_module([x], version=version,
module=module,
Tout=[np.float32],
Sout=[()],
platforms=platforms)
expected_value = (
x + dict(CPU=2.0, CUDA=3.0, ROCM=3.0, TPU=4.0)[self.testing_platform()]
)
self._assertOpOutputMatchesExpected(f, (x,), (expected_value,))
# A module used for testing errors related to use of "platforms".
platforms_errors_module_str = """
module @jit_f.0 {
func.func public @main(%arg_platform_idx: tensor<i32>, %arg0: tensor<f32>) -> tensor<f32> {
return %arg0 : tensor<f32>
}
}
"""
def platforms_errors_helper(
self,
*,
module_str: str,
platforms: Sequence[str] = ('CPU', 'CUDA', 'ROCM', 'TPU'),
disabled_checks: Sequence[str] = (),
expected_error: Optional[Exception] = None,
expected_error_message: str = '',
):
module, version = serialize(module_str)
x = np.float32(0.0)
def f(x):
return xla.call_module(
[x],
version=version,
module=module,
Tout=[np.float32],
Sout=[()],
platforms=platforms,
disabled_checks=disabled_checks,
)
if expected_error is None:
self._assertOpOutputMatchesExpected(f, (x,), (x,))
else:
with self.assertRaisesRegex(expected_error, expected_error_message):
self._assertOpOutputMatchesExpected(f, (x,), (x,))
def platforms_errors_singleton_platform(self):
# With singleton `platforms`, there should be no platform_index argument
self.platforms_errors_helper(
module_str=self.platforms_errors_module_str,
platforms=(self.testing_platform(),),
expected_error=errors.InvalidArgumentError,
expected_error_message=(
'Incorrect number of arguments passed to XlaCallModule = 1. The'
' module main function takes 2 arguments of which 0 platform index'
' arguments, 0 dimension arguments and 0 token arguments.'
),
)
def platforms_errors_no_platform_index_arg(self):
module_str = self.platforms_errors_module_str.replace(
'%arg_platform_idx: tensor<i32>, %arg0: tensor<f32>', ''
)
self.platforms_errors_helper(
module_str=module_str,
expected_error=errors.InvalidArgumentError,
expected_error_message=(
'The module should have a platform index argument but it has no '
'arguments'
),
)
def platforms_errors_platform_index_i16(self):
module_str = self.platforms_errors_module_str.replace('i32', 'i16')
self.platforms_errors_helper(
module_str=module_str,
expected_error=errors.InvalidArgumentError,
expected_error_message=(
'Module argument at index 0 should be a 0-dimensional '
'32-bit or 64-bit integer-tensor platform index argument '
'.* has type tensor<i16>'
),
)
def platforms_errors_platform_index_non_scalar(self):
module_str = self.platforms_errors_module_str.replace(
'tensor<i32>', 'tensor<1xi32>'
)
self.platforms_errors_helper(
module_str=module_str,
expected_error=errors.InvalidArgumentError,
expected_error_message=(
'Module argument at index 0 should be a 0-dimensional '
'32-bit integer-tensor platform index argument .* has type '
'tensor<1xi32>'
),
)
def platforms_errors_platform_index_unranked(self):
module_str = self.platforms_errors_module_str.replace(
'tensor<i32>', 'tensor<*xi32>'
)
self.platforms_errors_helper(
module_str=module_str,
expected_error=errors.InvalidArgumentError,
expected_error_message=(
'Module argument at index 0 should be a 0-dimensional '
'32-bit integer-tensor platform index argument'
),
)
def platforms_errors_different_from_current(self):
platform_check_disabled_by_flags = (
'--tf_xla_call_module_disabled_checks=platform'
in os.getenv('TF_XLA_FLAGS', '')
)
self.platforms_errors_helper(
module_str=self.platforms_errors_module_str,
platforms=['RANDOM_PLATFORM_1', 'RANDOM_PLATFORM_2'],
expected_error=(
None if platform_check_disabled_by_flags else errors.NotFoundError
),
expected_error_message='current platform .* is not among the platforms'
)
def platforms_errors_dissabled_check(self):
self.platforms_errors_helper(
module_str=self.platforms_errors_module_str,
platforms=('RANDOM_PLATFORM_1', 'RANDOM_PLATFORM_2'),
disabled_checks=(xla.call_module_disable_check_platform(),),
expected_error=None,
expected_error_message='current platform .* is not among the platforms'
)
def platforms_errors_empty(self):
self.platforms_errors_helper(
module_str=self.platforms_errors_module_str,
platforms=[],
disabled_checks=[xla.call_module_disable_check_platform()],
expected_error=None,
expected_error_message='current platform .* is not among the platforms'
)
def test_shape_assertion_success(self):
x = np.ones((3, 5), dtype=np.int32)
res = np.int32(x.shape[0])
def f(x): # x: f32[b, 5] and b = 3
# return x.shape[0]
module, version = serialize("""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x5xi32>) -> tensor<i32> {
%b = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x5xi32>) -> tensor<i32>
%3 = stablehlo.constant dense<3> : tensor<i32>
%ok = stablehlo.compare EQ, %b, %3, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
stablehlo.custom_call @shape_assertion(%ok) {
error_message = "The error message",
has_side_effect = true
} : (tensor<i1>) -> ()
return %b : tensor<i32>
}
}
""")
return xla.call_module([x,], version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_shape_assertion_failure(self):
x = np.ones((3, 5), dtype=np.int32)
res = np.int32(x.shape[0])
def f(x): # x: f32[b, 5] and b = 3, with a constraint b == 4.
# return x.shape[0]
module, version = serialize("""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x5xi32>) -> tensor<i32> {
%b = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x5xi32>) -> tensor<i32>
%4 = stablehlo.constant dense<4> : tensor<i32>
%5 = stablehlo.constant dense<5> : tensor<i32>
%11 = stablehlo.constant dense<11> : tensor<i32>
%ok = stablehlo.compare EQ, %b, %4, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
stablehlo.custom_call @shape_assertion(%ok, %b, %4, %5, %4, %5, %4, %5, %4, %5, %4, %5, %11) {
error_message = "Expecting {0} == {1}. Extra {2,=5}, {3}, {{0}, {4}, {5}, {6}, {7}, {11}.",
has_side_effect = true
} : (tensor<i1>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) -> ()
return %b : tensor<i32>
}
}
""")
return xla.call_module([x,], version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],)
# This test runs as part of two targets, with and without
# disabling shape_assertions.
disabled_shape_assertions_check = (
'--tf_xla_call_module_disabled_checks=shape_assertions'
in os.getenv('TF_XLA_FLAGS', ''))
if disabled_shape_assertions_check:
# No error even though the constraint is false.
self._assertOpOutputMatchesExpected(f, (x,), (res,))
else:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
re.escape('Expecting 3 == 4. Extra 5 , 4, {0}, 5, 4, 5, 4, 11.')):
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_invalid_shape_assertion(self):
arg_i1 = np.bool_(True)
arg_i32 = np.int32(2)
res = arg_i32
# This test runs as part of two targets, with and without
# disabling shape_assertions.
disabled_shape_assertions_check = (
'--tf_xla_call_module_disabled_checks=shape_assertions'
in os.getenv('TF_XLA_FLAGS', ''))
if disabled_shape_assertions_check:
self.skipTest('Test is N/A when shape_assertions are disabled')
subtest_count = 1
def one_subtest(error_msg: str, module_str: str):
def f(*args):
module, version = serialize(module_str)
return xla.call_module(
list(args),
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
)
nonlocal subtest_count
subtest_count += 1
with self.subTest(count=subtest_count, error_msg=error_msg):
with self.assertRaisesRegex(errors.InvalidArgumentError, error_msg):
self._assertOpOutputMatchesExpected(f, (arg_i1, arg_i32), (res,))
one_subtest(
'expects assert_what .* to be a constant of type tensor<i1>',
"""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_i1: tensor<i1>, %arg_i32: tensor<i32>) -> tensor<i32> {
%ok = stablehlo.constant dense<0> : tensor<i32>
stablehlo.custom_call @shape_assertion(%ok) {
error_message = "Some error",
has_side_effect = true
} : (tensor<i32>) -> ()
return %arg_i32 : tensor<i32>
}
}
""",
)
one_subtest(
'expects static assert_what',
"""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_i1: tensor<i1>, %arg_i32: tensor<i32>) -> tensor<i32> {
stablehlo.custom_call @shape_assertion(%arg_i1) {
error_message = "Some error",
has_side_effect = true
} : (tensor<i1>) -> ()
return %arg_i32 : tensor<i32>
}
}
""",
)
one_subtest(
'`shape_assertion` custom calls must set `has_side_effect = true`.',
"""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_i1: tensor<i1>, %arg_i32: tensor<i32>) -> tensor<i32> {
%ok = stablehlo.constant dense<false> : tensor<i1>
stablehlo.custom_call @shape_assertion(%ok) {
error_message = "Some error",
has_side_effect = false
} : (tensor<i1>) -> ()
return %arg_i32 : tensor<i32>
}
}
""",
)
one_subtest(
'expects error_message .* Found specifier {0}',
"""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_i1: tensor<i1>, %arg_i32: tensor<i32>) -> tensor<i32> {
%ok = stablehlo.constant dense<false> : tensor<i1>
stablehlo.custom_call @shape_assertion(%ok) {
error_message = "Some error {0}",
has_side_effect = true
} : (tensor<i1>) -> ()
return %arg_i32 : tensor<i32>
}
}
""",
)
one_subtest(
'expects static error_message_input',
"""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_i1: tensor<i1>, %arg_i32: tensor<i32>) -> tensor<i32> {
%ok = stablehlo.constant dense<false> : tensor<i1>
stablehlo.custom_call @shape_assertion(%ok, %arg_i32) {
error_message = "Some error {0}",
has_side_effect = true
} : (tensor<i1>, tensor<i32>) -> ()
return %arg_i32 : tensor<i32>
}
}
""",
)
one_subtest(
'expects error_message_input .* to be a constant of type tensor<i32>',
"""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg_i1: tensor<i1>, %arg_i32: tensor<i32>) -> tensor<i32> {
%ok = stablehlo.constant dense<false> : tensor<i1>
%c = stablehlo.constant dense<2.0> : tensor<f32>
stablehlo.custom_call @shape_assertion(%ok, %c) {
error_message = "Some error {0}",
has_side_effect = true
} : (tensor<i1>, tensor<f32>) -> ()
return %arg_i32 : tensor<i32>
}
}
""",
)
def test_dynamic_iota(self):
x = np.ones((3, 5), dtype=np.int32)
res = np.arange(x.shape[0], dtype=np.int32)
def f(x): # x: f32[b, 5]
# return np.arange(x.shape[0], dtype=np.int32)
module, version = serialize("""
module @jit_fun.1 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x5xi32>) -> tensor<?xi32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x5xi32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?x5xi32>) -> tensor<?xi32>
return %0 : tensor<?xi32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x5xi32>) -> tensor<?xi32> {
%0 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%1 = "stablehlo.dynamic_iota"(%0) {iota_dimension = 0 : i64} : (tensor<1xi32>) -> tensor<?xi32>
return %1 : tensor<?xi32>
}
}
""")
return xla.call_module([x,], version=version,
module=module,
Tout=[res.dtype],
Sout=[(None,)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_build_graph_with_any_platform(self):
"""We can construct the tf.Graph on all platforms."""
x = np.float32(0.)
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg_platform_idx: tensor<i32>, %arg0: tensor<f32>) -> tensor<f32> {
return %arg0 : tensor<f32>
}
}
""")
platforms = ['TPU'] # the module is compilable only on TPU
def f(x):
return xla.call_module([x], version=version,
module=module,
Tout=[np.float32],
Sout=[()],
platforms=platforms)
tf_graph = def_function.function(f).get_concrete_function(x).graph
self.assertIn('XlaCallModule', str(tf_graph.as_graph_def()))
def test_dynamic_reshape(self):
x = np.ones((4, 3), dtype=np.float32)
res = x.reshape((-1,))
def f(x): # x: f32[b, 3]
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x3xf32>) -> tensor<?xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x3xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?x3xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x3xf32>) -> tensor<?xf32> {
%0 = stablehlo.constant dense<3> : tensor<i32>
%1 = stablehlo.multiply %arg0, %0 : tensor<i32>
%2 = stablehlo.reshape %1 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.dynamic_reshape %arg1, %2 : (tensor<?x3xf32>, tensor<1xi32>) -> tensor<?xf32>
return %3 : tensor<?xf32>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[(None,)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_dynamic_gather(self):
x = np.ones((3, 4), dtype=np.float32)
res = np.ones((3, 2), dtype=np.float32)
def f(x): # x: f32[b, 4]
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x4xf32>) -> tensor<?x2xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x4xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?x4xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x4xf32>) -> tensor<?x2xf32> {
%0 = stablehlo.constant dense<0> : tensor<i64>
%1 = stablehlo.constant dense<0> : tensor<1xi64>
%2 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.constant dense<2> : tensor<1xi32>
%4 = stablehlo.concatenate %2, %3, dim = 0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
%5 = "stablehlo.dynamic_gather"(%arg1, %1, %4) {dimension_numbers = #stablehlo.gather<offset_dims = [0, 1], start_index_map = [1]>, indices_are_sorted = true} : (tensor<?x4xf32>, tensor<1xi64>, tensor<2xi32>) -> tensor<?x2xf32>
return %5 : tensor<?x2xf32>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[(None, 2)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_real_dynamic_slice(self):
x = np.ones((3, 4), dtype=np.float32)
res = x[-1, :]
def f(x): # x: f32[b, 4]
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x4xf32>) -> tensor<4xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x4xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?x4xf32>) -> tensor<4xf32>
return %0 : tensor<4xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x4xf32>) -> tensor<4xf32> {
%0 = stablehlo.constant dense<-1> : tensor<i32>
%1 = stablehlo.add %arg0, %0 : tensor<i32>
%2 = stablehlo.reshape %1 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.constant dense<0> : tensor<1xi32>
%4 = stablehlo.concatenate %2, %3, dim = 0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
%5 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%6 = stablehlo.constant dense<4> : tensor<1xi32>
%7 = stablehlo.concatenate %5, %6, dim = 0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
%10 = stablehlo.constant dense<1> : tensor<2xi32>
%11 = stablehlo.real_dynamic_slice %arg1, %4, %7, %10 : (tensor<?x4xf32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<1x4xf32>
%12 = stablehlo.reshape %11 : (tensor<1x4xf32>) -> tensor<4xf32>
return %12 : tensor<4xf32>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[x.dtype],
Sout=[(4,)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_dynamic_update_slice(self):
x = np.ones((3, 4), dtype=np.float32)
idx = np.int32(-2)
res = x # The update should be a nop
def f(x, idx): # x: f32[b, 4] idx: i32
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x4xf32>, %arg2: tensor<i32>) -> tensor<?x4xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x4xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1, %arg2) : (tensor<i32>, tensor<?x4xf32>, tensor<i32>) -> tensor<?x4xf32>
return %0 : tensor<?x4xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x4xf32>, %arg2: tensor<i32>) -> tensor<?x4xf32> {
%0 = stablehlo.constant dense<0> : tensor<i32>
%1 = stablehlo.compare LT, %arg2, %0, SIGNED : (tensor<i32>, tensor<i32>) -> tensor<i1>
%2 = stablehlo.add %arg2, %arg0 : tensor<i32>
%3 = stablehlo.select %1, %2, %arg2 : tensor<i1>, tensor<i32>
%4 = stablehlo.constant dense<0> : tensor<i32>
%5 = stablehlo.dynamic_update_slice %arg1, %arg1, %3, %4 : (tensor<?x4xf32>, tensor<?x4xf32>, tensor<i32>, tensor<i32>) -> tensor<?x4xf32>
return %5 : tensor<?x4xf32>
}
}
""")
return xla.call_module([x, idx], version=version,
module=module,
Tout=[res.dtype],
Sout=[(None, 4)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x, idx), (res,))
def test_dynamic_broadcast_in_dim(self):
x = np.ones((3, 4), dtype=np.float32)
y = np.ones((2, 3, 4), dtype=np.float32)
res = (np.broadcast_to(x, y.shape), x + y)
def f(x, y): # x: f32[b, 4] y: f32[2, b, 4]
# return (np.broadcast_to(x, y.shape), x + y)
module, version = serialize("""
module @jit_fun.0 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x4xf32>, %arg2: tensor<2x?x4xf32>) -> (tensor<2x?x4xf32>, tensor<2x?x4xf32>) {
%arg0_new = "stablehlo.get_dimension_size"(%arg2) {dimension = 1 : i64} : (tensor<2x?x4xf32>) -> tensor<i32>
%0, %1 = call @dyn_main(%arg0_new, %arg1, %arg2) : (tensor<i32>, tensor<?x4xf32>, tensor<2x?x4xf32>) -> (tensor<2x?x4xf32>, tensor<2x?x4xf32>)
return %0, %1 : tensor<2x?x4xf32>, tensor<2x?x4xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x4xf32>, %arg2: tensor<2x?x4xf32>) -> (tensor<2x?x4xf32>, tensor<2x?x4xf32>) {
%0 = stablehlo.constant dense<2> : tensor<1xi32>
%2 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.constant dense<4> : tensor<1xi32>
%4 = "stablehlo.concatenate"(%0, %2, %3) {dimension = 0 : i64} : (tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<3xi32>
%5 = "stablehlo.dynamic_broadcast_in_dim"(%arg1, %4) {broadcast_dimensions = array<i64: 1, 2>} : (tensor<?x4xf32>, tensor<3xi32>) -> tensor<2x?x4xf32>
%6 = stablehlo.add %5, %arg2 : (tensor<2x?x4xf32>, tensor<2x?x4xf32>) -> tensor<2x?x4xf32>
return %5, %6 : tensor<2x?x4xf32>, tensor<2x?x4xf32>
}
}
""")
return xla.call_module([x, y], version=version,
module=module,
Tout=[res[0].dtype, res[1].dtype],
Sout=[(2, None, 4), (2, None, 4)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x, y), res)
@unittest.skip('TODO(necula): test is flaky')
def test_reduce(self):
x = np.arange(5, dtype=np.int32)
res = np.sum(x) * x.shape[0]
def f(x): # x: i32[b]
module, version = serialize("""
module @jit_fun attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?xi32>) -> tensor<i32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg2) {dimension = 0 : i64} : (tensor<?xi32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?xi32>) -> tensor<i32>
return %0 : tensor<i32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?xi32>) -> tensor<i32> {
%0 = stablehlo.constant dense<0> : tensor<i32>
%1 = stablehlo.reduce(%arg1 init: %0) across dimensions = [0] : (tensor<?xi32>, tensor<i32>) -> tensor<i32>
reducer(%arg2: tensor<i32>, %arg3: tensor<i32>) {
%4 = stablehlo.add %arg2, %arg3 : tensor<i32>
"stablehlo.return"(%4) : (tensor<i32>) -> ()
}
%2 = stablehlo.multiply %1, %arg0 : tensor<i32>
return %2 : tensor<i32>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_reduce_broadcast(self):
x = np.broadcast_to(np.arange(3, dtype=np.float32).reshape(3, 1), (3, 5))
res = np.arange(3, dtype=np.float32).reshape(3, 1) * 5
def f(x): # x: f32[b, 5]
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?x5xf32>) -> tensor<?x1xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?x5xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?x5xf32>) -> tensor<?x1xf32>
return %0 : tensor<?x1xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?x5xf32>) -> tensor<?x1xf32> {
%0 = stablehlo.constant dense<0.000000e+00> : tensor<f32>
%1 = stablehlo.reduce(%arg1 init: %0) across dimensions = [1] : (tensor<?x5xf32>, tensor<f32>) -> tensor<?xf32>
reducer(%arg2: tensor<f32>, %arg3: tensor<f32>) {
%6 = stablehlo.add %arg2, %arg3 : tensor<f32>
stablehlo.return %6 : tensor<f32>
}
%2 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.constant dense<1> : tensor<1xi32>
%4 = stablehlo.concatenate %2, %3, dim = 0 : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
%5 = stablehlo.dynamic_broadcast_in_dim %1, %4, dims = [0] : (tensor<?xf32>, tensor<2xi32>) -> tensor<?x1xf32>
return %5 : tensor<?x1xf32>
}
}
""")
return xla.call_module([x,], version=version,
module=module,
Tout=[res.dtype],
Sout=[(None, 1)],
platforms=[self.testing_platform()],)
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_call(self):
"""A chain of calls."""
x = np.ones((5,), dtype=np.float32)
res = np.arange(x.shape[0], dtype=np.int32)
def f(x): # x: f32[b]
module, version = serialize("""
module @jit_fun_3 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?xf32>) -> tensor<?xi32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?xf32>) -> tensor<?xi32>
return %0 : tensor<?xi32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?xf32>) -> tensor<?xi32> {
%0 = call @f(%arg0, %arg1) : (tensor<i32>, tensor<?xf32>) -> tensor<?xi32>
return %0 : tensor<?xi32>
}
func.func private @f(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?xf32>) -> tensor<?xi32> {
%0 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%1 = "stablehlo.dynamic_iota"(%0) {iota_dimension = 0 : i64} : (tensor<1xi32>) -> tensor<?xi32>
return %1 : tensor<?xi32>
}
}
""")
return xla.call_module([x,], version=version,
module=module,
Tout=[res.dtype],
Sout=[()],
platforms=[self.testing_platform()])
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_identity(self):
x = np.ones((5,), dtype=np.float32)
res = x
def f(x): # x: f32[b]
module, version = serialize("""
module @jit_fun_3 attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?xf32>) -> tensor<?xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?xf32>) -> tensor<?xf32> {
return %arg1 : tensor<?xf32>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[()],
platforms=[self.testing_platform()])
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_while(self):
"""A while loop with carryied dynamic shapes."""
x = np.ones((5,), dtype=np.float32)
# Compute the result in Python first
res0 = np.copy(x)
for _ in range(5):
res0 += np.arange(x.shape[0], dtype=np.float32)
res1 = np.int64(5)
def f(x): # x: f32[b]
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg1: tensor<?xf32>) -> (tensor<?xf32>, tensor<i64>) {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?xf32>) -> tensor<i32>
%0, %1 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?xf32>) -> (tensor<?xf32>, tensor<i64>)
return %0, %1 : tensor<?xf32>, tensor<i64>
}
func.func private @dyn_main(%arg0: tensor<i32> {jax.global_constant = "b"}, %arg1: tensor<?xf32>) -> (tensor<?xf32>, tensor<i64>) {
%0 = stablehlo.constant dense<0> : tensor<i64>
%1:2 = "stablehlo.while"(%arg1, %0) ({
^bb0(%arg2: tensor<?xf32>, %arg3: tensor<i64>):
%2 = stablehlo.constant dense<5> : tensor<i64>
%3 = stablehlo.compare LT, %arg3, %2, SIGNED : (tensor<i64>, tensor<i64>) -> tensor<i1>
stablehlo.return %3 : tensor<i1>
}, {
^bb0(%arg2: tensor<?xf32>, %arg3: tensor<i64>):
%2 = stablehlo.reshape %arg0 : (tensor<i32>) -> tensor<1xi32>
%3 = stablehlo.dynamic_iota %2, dim = 0 : (tensor<1xi32>) -> tensor<?xf32>
%4 = stablehlo.add %arg2, %3 : tensor<?xf32>
%5 = stablehlo.constant dense<1> : tensor<i64>
%6 = stablehlo.add %arg3, %5 : tensor<i64>
stablehlo.return %4, %6 : tensor<?xf32>, tensor<i64>
}) : (tensor<?xf32>, tensor<i64>) -> (tensor<?xf32>, tensor<i64>)
return %1#0, %1#1 : tensor<?xf32>, tensor<i64>
}
}
""")
return xla.call_module([x,], version=version,
module=module,
Tout=[res0.dtype, res1.dtype],
Sout=[(None,), res1.shape],
platforms=[self.testing_platform()])
self._assertOpOutputMatchesExpected(f, (x,), (res0, res1))
def test_skip_shape_refinement(self):
# We skipped the shape refinement, but there are dynamic shapes.
x = np.ones((5,), dtype=np.float32)
res = x
module_attrs = '' # attribute is missing
def f(x): # x: f32[b]
module, version = serialize(f"""
module @jit_fun_3 {module_attrs} {{
func.func public @main(%arg1: tensor<?xf32>) -> tensor<?xf32> {{
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {{dimension = 0 : i64}} : (tensor<?xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}}
func.func private @dyn_main(%arg0: tensor<i32> {{jax.global_constant = "b"}}, %arg1: tensor<?xf32>) -> tensor<?xf32> {{
return %arg1 : tensor<?xf32>
}}
}}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[()],
platforms=[self.testing_platform()])
module_attrs = '' # attribute is missing
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Module has dynamic shapes'):
self._assertOpOutputMatchesExpected(f, (x,), (res,))
module_attrs = 'attributes {jax.uses_shape_polymorphism = false}'
with self.assertRaisesRegex(errors.InvalidArgumentError,
'Module has dynamic shapes'):
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_uses_shape_polymorphism_before_version_8(self):
x = np.ones((5,), dtype=np.float32)
res = x
def f(x): # x: f32[b]
# No `uses_shape_polymorphism` attribute, but it default for version 7
version = 7
module, _ = serialize("""
module @jit_fun_3 {
func.func public @main(%arg1: tensor<?xf32>) -> tensor<?xf32> {
%arg0_new = "stablehlo.get_dimension_size"(%arg1) {dimension = 0 : i64} : (tensor<?xf32>) -> tensor<i32>
%0 = call @dyn_main(%arg0_new, %arg1) : (tensor<i32>, tensor<?xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}
func.func private @dyn_main(%arg0: tensor<i32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
return %arg1 : tensor<?xf32>
}
}
""")
return xla.call_module([x], version=version,
module=module,
Tout=[res.dtype],
Sout=[()],
platforms=[self.testing_platform()])
self._assertOpOutputMatchesExpected(f, (x,), (res,))
def test_tf_call_function(self):
"""A TensorFlow function call inside StableHLO."""
x = np.int32(2)
y = np.int32(3)
res = x + y
@function.Defun(dtypes.int32, dtypes.int32)
def foo(x, y):
return x + y
def f(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 0}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
function_list=(foo,),
)
self._assertOpOutputMatchesExpected(f, (x, y), (res,))
def test_tf_call_function_multiple_funcs(self):
"""Multiple TensorFlow function calls inside StableHLO."""
x = np.int32(2)
y = np.int32(3)
res = (x + y) + (x + y)
@function.Defun(dtypes.int32, dtypes.int32)
def foo(x, y):
return x + y
@function.Defun(dtypes.int32, dtypes.int32)
def bar(x, y):
return foo(x, y)
def f(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 0}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
%1 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 1}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
%2 = stablehlo.custom_call @tf.call_tf_function(%0, %1) {
tf.backend_config = {called_index = 1}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %2 : tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
function_list=(foo, bar),
)
self._assertOpOutputMatchesExpected(f, (x, y), (res,))
def test_shape_polymorphic_tf_call_function(self):
"""A TensorFlow function call inside StableHLO."""
x = np.full((2,), 2, dtype=np.int32)
y = np.full((2,), 3, dtype=np.int32)
res = x + y
@function.Defun(dtypes.int32, dtypes.int32)
def foo(x, y):
return x + y
def f(x, y):
module, version = serialize("""
module @jit_fun_flat_jax attributes {jax.uses_shape_polymorphism = true} {
func.func public @main(%arg0: tensor<?xi32>, %arg1: tensor<?xi32>) -> tensor<?xi32> {
%0 = stablehlo.get_dimension_size %arg0, dim = 0 : (tensor<?xi32>) -> tensor<i32>
%1 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1, %0) {
tf.backend_config = {called_index = 0},
indices_of_shape_operands = dense<[2]> : tensor<1xi64>
} : (tensor<?xi32>, tensor<?xi32>, tensor<i32>) -> tensor<?xi32>
return %1 : tensor<?xi32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
function_list=(foo,),
)
self._assertOpOutputMatchesExpected(f, (x, y), (res,))
def test_tf_call_function_with_token(self):
"""A TensorFlow function call inside StableHLO."""
x = np.int32(2)
y = np.int32(3)
res = x + y
@function.Defun(dtypes.int32, dtypes.int32)
def foo(x, y):
return x + y
def f(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func public @main(%arg0: !stablehlo.token, %arg1: tensor<i32>, %arg2: tensor<i32>) -> (!stablehlo.token, tensor<i32>) {
%0:2 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1, %arg2) {
tf.backend_config = {called_index = 0, has_token_input_output = true}
} : (!stablehlo.token, tensor<i32>, tensor<i32>) -> (!stablehlo.token, tensor<i32>)
return %0#0, %0#1 : !stablehlo.token, tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
function_list=(foo,),
)
self._assertOpOutputMatchesExpected(f, (x, y), (res,))
def test_tf_call_function_nested(self):
"""Nested XlaCallModule inside TensorFlow function calls."""
x = np.int32(2)
y = np.int32(3)
res = x + y
@function.Defun(dtypes.int32, dtypes.int32)
def add(x, y):
return x + y
@function.Defun(dtypes.int32, dtypes.int32)
def nested_xla_call(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 0}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
function_list=(add,),
)
@function.Defun(dtypes.int32, dtypes.int32)
def call(x, y):
return nested_xla_call(x, y)
def f(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 0}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res.dtype],
Sout=[res.shape],
platforms=[self.testing_platform()],
function_list=(call,),
)
self._assertOpOutputMatchesExpected(f, (x, y), (res,))
def test_tf_call_function_nested_func_renaming(self):
"""Multiple custom calls with identically named private functions."""
x = np.int32(2)
y = np.int32(3)
res0 = x + y
res1 = x - y
# Verify that multiple inner TF function calls with the same private
# functions are properly renamed during StableHLO import. This test case is
# carefully constructed such that one outer XlaCallModule op has two custom
# calls, each of which has the same private "@call" function with different
# body. This is to catch bugs in the func renaming logic.
@function.Defun(dtypes.int32, dtypes.int32)
def add(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func private @call(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<i32>
return %0 : tensor<i32>
}
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = func.call @call(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res0.dtype],
Sout=[res0.shape],
platforms=[self.testing_platform()],
)
@function.Defun(dtypes.int32, dtypes.int32)
def subtract(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func private @call(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = stablehlo.subtract %arg0, %arg1 : tensor<i32>
return %0 : tensor<i32>
}
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> tensor<i32> {
%0 = func.call @call(%arg0, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %0 : tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res1.dtype],
Sout=[res1.shape],
platforms=[self.testing_platform()],
)
def f(x, y):
module, version = serialize("""
module @jit_fun_flat_jax {
func.func public @main(%arg0: tensor<i32>, %arg1: tensor<i32>) -> (tensor<i32>, tensor<i32>) {
%0 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 0}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
%1 = stablehlo.custom_call @tf.call_tf_function(%arg0, %arg1) {
tf.backend_config = {called_index = 1}
} : (tensor<i32>, tensor<i32>) -> tensor<i32>
return %0, %1 : tensor<i32>, tensor<i32>
}
}
""")
return xla.call_module(
[x, y],
version=version,
module=module,
Tout=[res0.dtype, res1.dtype],
Sout=[res0.shape, res1.shape],
platforms=[self.testing_platform()],
function_list=(add, subtract),
)
self._assertOpOutputMatchesExpected(f, (x, y), (res0, res1))
def test_op_backward_compatibility(self):
"""Test for ensuring XlaCallModuleOp backward compatibility."""
x = np.array([1.0, 2.0, 3.0], dtype=np.float32)
def f(x):
# sin(cos(x))
module, version = serialize("""
module @jit_f.0 {
func.func public @main(%arg0: tensor<3xf32>) -> tensor<3xf32> {
%0 = stablehlo.cosine %arg0 : tensor<3xf32>
%1 = stablehlo.sine %0 : tensor<3xf32>
return %1 : tensor<3xf32>
}
}
""")
# Create the raw XlaCallModule op directly instead of calling
# `xla.call_module`, which handles default values for unpresent
# attributes.
return gen_xla_ops.xla_call_module(
[x],
version=version,
module=module,
Tout=[x.dtype],
Sout=[x.shape],
platforms=[self.testing_platform()],
)
self._assertOpOutputMatchesExpected(f, (x,), (np.sin(np.cos(x)),))
def test_op_backward_incompatibility(self):
"""Test for ensuring XlaCallModuleOp with invalid bytecode."""
x = np.array([1.0, 2.0, 3.0], dtype=np.float32)
def f(x):
# Use an invalid MLIR string that will fail to parse when loading the
# call module op, emulating a backward incompatibility.
corrupted_module = 'stablehlo.invalid_op'
return gen_xla_ops.xla_call_module(
[x],
version=xla.call_module_maximum_supported_version(),
module=corrupted_module,
Tout=[x.dtype],
Sout=[x.shape],
platforms=[self.testing_platform()],
)
# Expect any error message to be included after `:`
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Cannot deserialize computation: .+',
):
f(x)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True)
)
googletest.main()
| XlaCallModuleOpTest |
python | openai__openai-python | src/openai/types/conversations/conversation_item.py | {
"start": 1759,
"end": 2197
} | class ____(BaseModel):
id: str
"""The unique ID of the image generation call."""
result: Optional[str] = None
"""The generated image encoded in base64."""
status: Literal["in_progress", "completed", "generating", "failed"]
"""The status of the image generation call."""
type: Literal["image_generation_call"]
"""The type of the image generation call. Always `image_generation_call`."""
| ImageGenerationCall |
python | Pylons__pyramid | src/pyramid/testing.py | {
"start": 20096,
"end": 22096
} | class ____:
def __init__(self, response):
self._received = {}
self.response = response
def __getattr__(self, attrname):
return self
def __getitem__(self, attrname):
return self
def __call__(self, *arg, **kw):
self._received.update(kw)
return self.response
def skip_on(*platforms): # pragma: no cover
skip = False
for platform in platforms:
if skip_on.os_name.startswith(platform):
skip = True
if platform == 'pypy' and PYPY:
skip = True
def decorator(func):
if isinstance(func, type):
if skip:
return None
else:
return func
else:
def wrapper(*args, **kw):
if skip:
return
return func(*args, **kw)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
return decorator
skip_on.os_name = os.name # for testing
@contextmanager
def testConfig(
registry=None, request=None, hook_zca=True, autocommit=True, settings=None
):
"""Returns a context manager for test set up.
This context manager calls :func:`pyramid.testing.setUp` when
entering and :func:`pyramid.testing.tearDown` when exiting.
All arguments are passed directly to :func:`pyramid.testing.setUp`.
If the ZCA is hooked, it will always be un-hooked in tearDown.
This context manager allows you to write test code like this:
.. code-block:: python
:linenos:
with testConfig() as config:
config.add_route('bar', '/bar/{id}')
req = DummyRequest()
resp = myview(req)
"""
config = setUp(
registry=registry,
request=request,
hook_zca=hook_zca,
autocommit=autocommit,
settings=settings,
)
try:
yield config
finally:
tearDown(unhook_zca=hook_zca)
| MockTemplate |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 42442,
"end": 43036
} | class ____(BaseModel, extra="forbid"):
"""
Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges
"""
top_left: "GeoPoint" = Field(
...,
description="Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges",
)
bottom_right: "GeoPoint" = Field(
...,
description="Geo filter request Matches coordinates inside the rectangle, described by coordinates of lop-left and bottom-right edges",
)
| GeoBoundingBox |
python | kamyu104__LeetCode-Solutions | Python/expression-add-operators.py | {
"start": 31,
"end": 1727
} | class ____(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
result, expr = [], []
val, i = 0, 0
val_str = ""
while i < len(num):
val = val * 10 + ord(num[i]) - ord('0')
val_str += num[i]
# Avoid "00...".
if str(val) != val_str:
break
expr.append(val_str)
self.addOperatorsDFS(num, target, i + 1, 0, val, expr, result)
expr.pop()
i += 1
return result
def addOperatorsDFS(self, num, target, pos, operand1, operand2, expr, result):
if pos == len(num) and operand1 + operand2 == target:
result.append("".join(expr))
else:
val, i = 0, pos
val_str = ""
while i < len(num):
val = val * 10 + ord(num[i]) - ord('0')
val_str += num[i]
# Avoid "00...".
if str(val) != val_str:
break
# Case '+':
expr.append("+" + val_str)
self.addOperatorsDFS(num, target, i + 1, operand1 + operand2, val, expr, result)
expr.pop()
# Case '-':
expr.append("-" + val_str)
self.addOperatorsDFS(num, target, i + 1, operand1 + operand2, -val, expr, result)
expr.pop()
# Case '*':
expr.append("*" + val_str)
self.addOperatorsDFS(num, target, i + 1, operand1, operand2 * val, expr, result)
expr.pop()
i += 1
| Solution |
python | ipython__ipython | IPython/utils/contexts.py | {
"start": 177,
"end": 1610
} | class ____:
"""Preserve a set of keys in a dictionary.
Upon entering the context manager the current values of the keys
will be saved. Upon exiting, the dictionary will be updated to
restore the original value of the preserved keys. Preserved keys
which did not exist when entering the context manager will be
deleted.
Examples
--------
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> with preserve_keys(d, 'b', 'c', 'd'):
... del d['a']
... del d['b'] # will be reset to 2
... d['c'] = None # will be reset to 3
... d['d'] = 4 # will be deleted
... d['e'] = 5
... print(sorted(d.items()))
...
[('c', None), ('d', 4), ('e', 5)]
>>> print(sorted(d.items()))
[('b', 2), ('c', 3), ('e', 5)]
"""
def __init__(self, dictionary, *keys):
self.dictionary = dictionary
self.keys = keys
def __enter__(self):
# Actions to perform upon exiting.
to_delete = []
to_update = {}
d = self.dictionary
for k in self.keys:
if k in d:
to_update[k] = d[k]
else:
to_delete.append(k)
self.to_delete = to_delete
self.to_update = to_update
def __exit__(self, *exc_info):
d = self.dictionary
for k in self.to_delete:
d.pop(k, None)
d.update(self.to_update)
| preserve_keys |
python | facebookresearch__faiss | contrib/rpc.py | {
"start": 1058,
"end": 2079
} | class ____:
" wraps a socket so that it is usable by pickle/cPickle "
def __init__(self,sock):
self.sock = sock
self.nr=0
def write(self, buf):
# print("sending %d bytes"%len(buf))
#self.sock.sendall(buf)
# print("...done")
bs = 512 * 1024
ns = 0
while ns < len(buf):
sent = self.sock.send(buf[ns:ns + bs])
ns += sent
def read(self,bs=512*1024):
#if self.nr==10000: pdb.set_trace()
self.nr+=1
# print("read bs=%d"%bs)
b = []
nb = 0
while len(b)<bs:
# print(' loop')
rb = self.sock.recv(bs - nb)
if not rb: break
b.append(rb)
nb += len(rb)
return b''.join(b)
def readline(self):
# print("readline!")
"""may be optimized..."""
s=bytes()
while True:
c=self.read(1)
s+=c
if len(c)==0 or chr(c[0])=='\n':
return s
| FileSock |
python | pypa__pip | src/pip/_vendor/rich/spinner.py | {
"start": 309,
"end": 4214
} | class ____:
"""A spinner animation.
Args:
name (str): Name of spinner (run python -m rich.spinner).
text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
style (StyleType, optional): Style for spinner animation. Defaults to None.
speed (float, optional): Speed factor for animation. Defaults to 1.0.
Raises:
KeyError: If name isn't one of the supported spinner animations.
"""
def __init__(
self,
name: str,
text: "RenderableType" = "",
*,
style: Optional["StyleType"] = None,
speed: float = 1.0,
) -> None:
try:
spinner = SPINNERS[name]
except KeyError:
raise KeyError(f"no spinner called {name!r}")
self.text: "Union[RenderableType, Text]" = (
Text.from_markup(text) if isinstance(text, str) else text
)
self.name = name
self.frames = cast(List[str], spinner["frames"])[:]
self.interval = cast(float, spinner["interval"])
self.start_time: Optional[float] = None
self.style = style
self.speed = speed
self.frame_no_offset: float = 0.0
self._update_speed = 0.0
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield self.render(console.get_time())
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
text = self.render(0)
return Measurement.get(console, options, text)
def render(self, time: float) -> "RenderableType":
"""Render the spinner for a given time.
Args:
time (float): Time in seconds.
Returns:
RenderableType: A renderable containing animation frame.
"""
if self.start_time is None:
self.start_time = time
frame_no = ((time - self.start_time) * self.speed) / (
self.interval / 1000.0
) + self.frame_no_offset
frame = Text(
self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
)
if self._update_speed:
self.frame_no_offset = frame_no
self.start_time = time
self.speed = self._update_speed
self._update_speed = 0.0
if not self.text:
return frame
elif isinstance(self.text, (str, Text)):
return Text.assemble(frame, " ", self.text)
else:
table = Table.grid(padding=1)
table.add_row(frame, self.text)
return table
def update(
self,
*,
text: "RenderableType" = "",
style: Optional["StyleType"] = None,
speed: Optional[float] = None,
) -> None:
"""Updates attributes of a spinner after it has been started.
Args:
text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
style (StyleType, optional): Style for spinner animation. Defaults to None.
speed (float, optional): Speed factor for animation. Defaults to None.
"""
if text:
self.text = Text.from_markup(text) if isinstance(text, str) else text
if style:
self.style = style
if speed:
self._update_speed = speed
if __name__ == "__main__": # pragma: no cover
from time import sleep
from .console import Group
from .live import Live
all_spinners = Group(
*[
Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
for spinner_name in sorted(SPINNERS.keys())
]
)
with Live(all_spinners, refresh_per_second=20) as live:
while True:
sleep(0.1)
| Spinner |
python | tensorflow__tensorflow | third_party/xla/third_party/gpus/find_rocm_config.py | {
"start": 1423,
"end": 15131
} | class ____(Exception):
pass
def _get_default_rocm_path():
return "/opt/rocm"
def _get_rocm_install_path():
"""Determines and returns the ROCm installation path."""
rocm_install_path = _get_default_rocm_path()
if "ROCM_PATH" in os.environ:
rocm_install_path = os.environ["ROCM_PATH"]
# rocm_install_path = os.path.realpath(rocm_install_path)
return rocm_install_path
def _get_composite_version_number(major, minor, patch):
return 10000 * major + 100 * minor + patch
def _get_header_version(path, name):
"""Returns preprocessor defines in C header file."""
for line in io.open(path, "r", encoding="utf-8"):
match = re.match(r"#define %s +(\d+)" % name, line)
if match:
value = match.group(1)
return int(value)
raise ConfigError('#define "{}" is either\n'.format(name) +
" not present in file {} OR\n".format(path) +
" its value is not an integer literal")
def _find_rocm_config(rocm_install_path):
def rocm_version_numbers(path):
possible_version_files = [
"include/rocm-core/rocm_version.h", # ROCm 5.2
"include/rocm_version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError(
"ROCm version file not found in {}".format(possible_version_files))
major = _get_header_version(version_file, "ROCM_VERSION_MAJOR")
minor = _get_header_version(version_file, "ROCM_VERSION_MINOR")
patch = _get_header_version(version_file, "ROCM_VERSION_PATCH")
return major, minor, patch
major, minor, patch = rocm_version_numbers(rocm_install_path)
rocm_config = {
"rocm_version_number": _get_composite_version_number(major, minor, patch)
}
return rocm_config
def _find_hipruntime_config(rocm_install_path):
def hipruntime_version_number(path):
possible_version_files = [
"include/hip/hip_version.h", # ROCm 5.2
"hip/include/hip/hip_version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError("HIP Runtime version file not found in {}".format(
possible_version_files))
# This header file has an explicit #define for HIP_VERSION, whose value
# is (HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR)
# Retreive the major + minor and re-calculate here, since we do not
# want get into the business of parsing arith exprs
major = _get_header_version(version_file, "HIP_VERSION_MAJOR")
minor = _get_header_version(version_file, "HIP_VERSION_MINOR")
return 100 * major + minor
hipruntime_config = {
"hipruntime_version_number": hipruntime_version_number(rocm_install_path)
}
return hipruntime_config
def _find_miopen_config(rocm_install_path):
def miopen_version_numbers(path):
possible_version_files = [
"include/miopen/version.h", # ROCm 5.2 and prior
"miopen/include/miopen/version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError(
'MIOpen version file "{}" not found'.format(version_file))
major = _get_header_version(version_file, "MIOPEN_VERSION_MAJOR")
minor = _get_header_version(version_file, "MIOPEN_VERSION_MINOR")
patch = _get_header_version(version_file, "MIOPEN_VERSION_PATCH")
return major, minor, patch
major, minor, patch = miopen_version_numbers(rocm_install_path)
miopen_config = {
"miopen_version_number":
_get_composite_version_number(major, minor, patch)
}
return miopen_config
def _find_rocblas_config(rocm_install_path):
def rocblas_version_numbers(path):
possible_version_files = [
"include/rocblas/internal/rocblas-version.h", # ROCm 5.2
"rocblas/include/internal/rocblas-version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError(
"rocblas version file not found in {}".format(
possible_version_files))
major = _get_header_version(version_file, "ROCBLAS_VERSION_MAJOR")
minor = _get_header_version(version_file, "ROCBLAS_VERSION_MINOR")
patch = _get_header_version(version_file, "ROCBLAS_VERSION_PATCH")
return major, minor, patch
major, minor, patch = rocblas_version_numbers(rocm_install_path)
rocblas_config = {
"rocblas_version_number":
_get_composite_version_number(major, minor, patch)
}
return rocblas_config
def _find_rocrand_config(rocm_install_path):
def rocrand_version_number(path):
possible_version_files = [
"include/rocrand/rocrand_version.h", # ROCm 5.1
"rocrand/include/rocrand_version.h", # ROCm 5.0 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError(
"rocrand version file not found in {}".format(possible_version_files))
version_number = _get_header_version(version_file, "ROCRAND_VERSION")
return version_number
rocrand_config = {
"rocrand_version_number": rocrand_version_number(rocm_install_path)
}
return rocrand_config
def _find_rocfft_config(rocm_install_path):
def rocfft_version_numbers(path):
possible_version_files = [
"include/rocfft/rocfft-version.h", # ROCm 5.2
"rocfft/include/rocfft-version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError(
"rocfft version file not found in {}".format(possible_version_files))
major = _get_header_version(version_file, "rocfft_version_major")
minor = _get_header_version(version_file, "rocfft_version_minor")
patch = _get_header_version(version_file, "rocfft_version_patch")
return major, minor, patch
major, minor, patch = rocfft_version_numbers(rocm_install_path)
rocfft_config = {
"rocfft_version_number":
_get_composite_version_number(major, minor, patch)
}
return rocfft_config
def _find_hipfft_config(rocm_install_path):
def hipfft_version_numbers(path):
possible_version_files = [
"include/hipfft/hipfft-version.h", # ROCm 5.2
"hipfft/include/hipfft-version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError(
"hipfft version file not found in {}".format(possible_version_files))
major = _get_header_version(version_file, "hipfftVersionMajor")
minor = _get_header_version(version_file, "hipfftVersionMinor")
patch = _get_header_version(version_file, "hipfftVersionPatch")
return major, minor, patch
major, minor, patch = hipfft_version_numbers(rocm_install_path)
hipfft_config = {
"hipfft_version_number":
_get_composite_version_number(major, minor, patch)
}
return hipfft_config
def _find_roctracer_config(rocm_install_path):
def roctracer_version_numbers(path):
possible_version_files = [
"include/roctracer/roctracer.h", # ROCm 5.2
"roctracer/include/roctracer.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError("roctracer version file not found in {}".format(
possible_version_files))
major = _get_header_version(version_file, "ROCTRACER_VERSION_MAJOR")
minor = _get_header_version(version_file, "ROCTRACER_VERSION_MINOR")
# roctracer header does not have a patch version number
patch = 0
return major, minor, patch
major, minor, patch = roctracer_version_numbers(rocm_install_path)
roctracer_config = {
"roctracer_version_number":
_get_composite_version_number(major, minor, patch)
}
return roctracer_config
def _find_hipsparse_config(rocm_install_path):
def hipsparse_version_numbers(path):
possible_version_files = [
"include/hipsparse/hipsparse-version.h", # ROCm 5.2
"hipsparse/include/hipsparse-version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError("hipsparse version file not found in {}".format(
possible_version_files))
major = _get_header_version(version_file, "hipsparseVersionMajor")
minor = _get_header_version(version_file, "hipsparseVersionMinor")
patch = _get_header_version(version_file, "hipsparseVersionPatch")
return major, minor, patch
major, minor, patch = hipsparse_version_numbers(rocm_install_path)
hipsparse_config = {
"hipsparse_version_number":
_get_composite_version_number(major, minor, patch)
}
return hipsparse_config
def _find_hipsolver_config(rocm_install_path):
def hipsolver_version_numbers(path):
possible_version_files = [
"include/hipsolver/internal/hipsolver-version.h", # ROCm 5.2
"hipsolver/include/internal/hipsolver-version.h", # ROCm 5.1
"hipsolver/include/hipsolver-version.h", # ROCm 5.0 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError("hipsolver version file not found in {}".format(
possible_version_files))
major = _get_header_version(version_file, "hipsolverVersionMajor")
minor = _get_header_version(version_file, "hipsolverVersionMinor")
patch = _get_header_version(version_file, "hipsolverVersionPatch")
return major, minor, patch
major, minor, patch = hipsolver_version_numbers(rocm_install_path)
hipsolver_config = {
"hipsolver_version_number":
_get_composite_version_number(major, minor, patch)
}
return hipsolver_config
def _find_rocsolver_config(rocm_install_path):
def rocsolver_version_numbers(path):
possible_version_files = [
"include/rocsolver/rocsolver-version.h", # ROCm 5.2
"rocsolver/include/rocsolver-version.h", # ROCm 5.1 and prior
]
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if not version_file:
raise ConfigError("rocsolver version file not found in {}".format(
possible_version_files))
major = _get_header_version(version_file, "ROCSOLVER_VERSION_MAJOR")
minor = _get_header_version(version_file, "ROCSOLVER_VERSION_MINOR")
patch = _get_header_version(version_file, "ROCSOLVER_VERSION_PATCH")
return major, minor, patch
major, minor, patch = rocsolver_version_numbers(rocm_install_path)
rocsolver_config = {
"rocsolver_version_number":
_get_composite_version_number(major, minor, patch)
}
return rocsolver_config
def find_rocm_config():
"""Returns a dictionary of ROCm components config info."""
rocm_install_path = _get_rocm_install_path()
if not os.path.exists(rocm_install_path):
raise ConfigError(
'Specified ROCM_PATH "{}" does not exist'.format(rocm_install_path))
result = {}
result["rocm_toolkit_path"] = rocm_install_path
result.update(_find_rocm_config(rocm_install_path))
result.update(_find_hipruntime_config(rocm_install_path))
result.update(_find_miopen_config(rocm_install_path))
result.update(_find_rocblas_config(rocm_install_path))
result.update(_find_rocrand_config(rocm_install_path))
result.update(_find_rocfft_config(rocm_install_path))
if result["rocm_version_number"] >= 40100:
result.update(_find_hipfft_config(rocm_install_path))
result.update(_find_roctracer_config(rocm_install_path))
result.update(_find_hipsparse_config(rocm_install_path))
if result["rocm_version_number"] >= 40500:
result.update(_find_hipsolver_config(rocm_install_path))
result.update(_find_rocsolver_config(rocm_install_path))
return result
def main():
try:
for key, value in sorted(find_rocm_config().items()):
print("%s: %s" % (key, value))
except ConfigError as e:
sys.stderr.write("\nERROR: {}\n\n".format(str(e)))
sys.exit(1)
if __name__ == "__main__":
main()
| ConfigError |
python | viewflow__viewflow | viewflow/workflow/flow/views/update.py | {
"start": 274,
"end": 1233
} | class ____(
FormLayoutMixin,
FormAjaxCompleteMixin,
FormDependentSelectMixin,
mixins.SuccessMessageMixin,
mixins.TaskSuccessUrlMixin,
mixins.TaskViewTemplateNames,
generic.UpdateView,
):
"""Default view to update a process"""
success_message = _("Task {task} has been completed.")
template_filename = "task.html"
def get_object(self):
"""Return the process for the task activation."""
return self.request.activation.process
def form_valid(self, form):
"""If the form is valid, save the associated model and finish the task."""
self.object = form.save()
if "seed" in form.cleaned_data:
self.object.seed = form.cleaned_data["seed"]
if "artifact" in form.cleaned_data:
self.object.artifact = form.cleaned_data["artifact"]
self.request.activation.execute()
return HttpResponseRedirect(self.get_success_url())
| UpdateProcessView |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/functionMember2.py | {
"start": 332,
"end": 913
} | class ____:
def method1(self) -> None: ...
@classmethod
def method2(cls) -> None: ...
@staticmethod
def method3() -> None: ...
s2 = A().method1.__self__
reveal_type(s2, expected_text="A")
s3 = A.method2.__self__
reveal_type(s3, expected_text="type[A]")
s3 = A.method2.__self__
reveal_type(s3, expected_text="type[A]")
s4 = A().method2.__self__
reveal_type(s4, expected_text="type[A]")
# This should generate an error because method3 is static.
s5 = A().method3.__self__
# This should generate an error because method3 is static.
s6 = A.method3.__self__
| A |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 7847,
"end": 8958
} | class ____:
param_names = ["shapes", "data_type"]
params = [
get_benchmark_shapes("MergeCategoricals"),
["object", "category"],
]
def setup(self, shapes, data_type):
assert len(shapes) == 2
assert shapes[1] == 2
size = (shapes[0],)
self.left = IMPL.DataFrame(
{
"X": np.random.choice(range(0, 10), size=size),
"Y": np.random.choice(["one", "two", "three"], size=size),
}
)
self.right = IMPL.DataFrame(
{
"X": np.random.choice(range(0, 10), size=size),
"Z": np.random.choice(["jjj", "kkk", "sss"], size=size),
}
)
if data_type == "category":
self.left = self.left.assign(Y=self.left["Y"].astype("category"))
execute(self.left)
self.right = self.right.assign(Z=self.right["Z"].astype("category"))
execute(self.right)
def time_merge_categoricals(self, shapes, data_type):
execute(IMPL.merge(self.left, self.right, on="X"))
| TimeMergeCategoricals |
python | ansible__ansible | test/units/module_utils/basic/test_run_command.py | {
"start": 6971,
"end": 7650
} | class ____:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_false(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
(rc, stdout, stderr) = rc_am.run_command('/bin/false', check_rc=False)
assert rc == 1
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_true(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
with pytest.raises(SystemExit):
rc_am.run_command('/bin/false', check_rc=True)
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == 1
| TestRunCommandRc |
python | openai__gym | gym/core.py | {
"start": 14519,
"end": 16876
} | class ____(Wrapper):
"""Superclass of wrappers that can modify observations using :meth:`observation` for :meth:`reset` and :meth:`step`.
If you would like to apply a function to the observation that is returned by the base environment before
passing it to learning code, you can simply inherit from :class:`ObservationWrapper` and overwrite the method
:meth:`observation` to implement that transformation. The transformation defined in that method must be
defined on the base environment’s observation space. However, it may take values in a different space.
In that case, you need to specify the new observation space of the wrapper by setting :attr:`self.observation_space`
in the :meth:`__init__` method of your wrapper.
For example, you might have a 2D navigation task where the environment returns dictionaries as observations with
keys ``"agent_position"`` and ``"target_position"``. A common thing to do might be to throw away some degrees of
freedom and only consider the position of the target relative to the agent, i.e.
``observation["target_position"] - observation["agent_position"]``. For this, you could implement an
observation wrapper like this::
class RelativePosition(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
self.observation_space = Box(shape=(2,), low=-np.inf, high=np.inf)
def observation(self, obs):
return obs["target"] - obs["agent"]
Among others, Gym provides the observation wrapper :class:`TimeAwareObservation`, which adds information about the
index of the timestep to the observation.
"""
def reset(self, **kwargs):
"""Resets the environment, returning a modified observation using :meth:`self.observation`."""
obs, info = self.env.reset(**kwargs)
return self.observation(obs), info
def step(self, action):
"""Returns a modified observation using :meth:`self.observation` after calling :meth:`env.step`."""
observation, reward, terminated, truncated, info = self.env.step(action)
return self.observation(observation), reward, terminated, truncated, info
def observation(self, observation):
"""Returns a modified observation."""
raise NotImplementedError
| ObservationWrapper |
python | sanic-org__sanic | guide/webapp/display/plugins/columns.py | {
"start": 288,
"end": 1344
} | class ____(DirectivePlugin):
def parse(
self, block: BlockParser, m: Match, state: BlockState
) -> dict[str, Any]:
info = m.groupdict()
new_state = block.state_cls()
new_state.process(dedent(info["text"]))
block.parse(new_state)
return {
"type": "column",
"text": info["text"],
"children": new_state.tokens,
"attrs": {},
}
def __call__( # type: ignore
self, directive: RSTDirective, md: Markdown
) -> None:
directive.register("column", self.parse)
if md.renderer.NAME == "html":
md.renderer.register("column", self._render_column)
def _render_column(self, renderer: HTMLRenderer, text: str, **attrs):
start = (
'<div class="columns mt-3 is-multiline">\n'
if attrs.get("first")
else ""
)
end = "</div>\n" if attrs.get("last") else ""
col = f'<div class="column is-half">{text}</div>\n'
return start + (col) + end
| Column |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.